├── .github ├── release-drafter.yml └── workflows │ ├── checks.yml │ ├── create-tag.yml │ ├── main-checks.yml │ ├── pr-checks.yml │ ├── publish-pypi.yml │ └── release-drafter.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .python-version ├── .vscode ├── extensions.json ├── fastagent.config.schema.json └── settings.json ├── LICENSE ├── README.md ├── examples ├── azure-openai │ └── fastagent.config.yaml ├── custom-agents │ ├── agent.py │ └── fastagent.config.yaml ├── data-analysis │ ├── analysis-campaign.py │ ├── analysis.py │ ├── fastagent.config.yaml │ └── mount-point │ │ └── WA_Fn-UseC_-HR-Employee-Attrition.csv ├── mcp │ ├── state-transfer │ │ ├── agent_one.py │ │ ├── agent_two.py │ │ └── fastagent.config.yaml │ └── vision-examples │ │ ├── cat.png │ │ ├── example1.py │ │ ├── example2.py │ │ ├── example3.py │ │ └── fastagent.config.yaml ├── otel │ ├── agent.py │ ├── agent2.py │ ├── docker-compose.yaml │ └── fastagent.config.yaml ├── researcher │ ├── fastagent.config.yaml │ ├── researcher-eval.py │ ├── researcher-imp.py │ └── researcher.py ├── tensorzero │ ├── .env.sample │ ├── Makefile │ ├── README.md │ ├── agent.py │ ├── demo_images │ │ ├── clam.jpg │ │ ├── crab.png │ │ └── shrimp.png │ ├── docker-compose.yml │ ├── fastagent.config.yaml │ ├── image_demo.py │ ├── mcp_server │ │ ├── Dockerfile │ │ ├── entrypoint.sh │ │ └── mcp_server.py │ ├── simple_agent.py │ └── tensorzero_config │ │ ├── system_schema.json │ │ ├── system_template.minijinja │ │ └── tensorzero.toml └── workflows │ ├── chaining.py │ ├── evaluator.py │ ├── fastagent.config.yaml │ ├── graded_report.md │ ├── human_input.py │ ├── orchestrator.py │ ├── parallel.py │ ├── router.py │ ├── short_story.md │ └── short_story.txt ├── pyproject.toml ├── scripts ├── event_replay.py ├── event_summary.py ├── event_viewer.py ├── example.py ├── format.py ├── gen_schema.py ├── lint.py ├── promptify.py ├── rich_progress_test.py └── test_package_install.sh ├── src └── mcp_agent │ ├── __init__.py │ ├── agents │ ├── __init__.py │ ├── agent.py │ ├── base_agent.py │ └── workflow │ │ ├── __init__.py │ │ ├── chain_agent.py │ │ ├── evaluator_optimizer.py │ │ ├── orchestrator_agent.py │ │ ├── orchestrator_models.py │ │ ├── orchestrator_prompts.py │ │ ├── parallel_agent.py │ │ └── router_agent.py │ ├── app.py │ ├── cli │ ├── __init__.py │ ├── __main__.py │ ├── commands │ │ ├── README.md │ │ ├── check_config.py │ │ ├── go.py │ │ ├── quickstart.py │ │ ├── setup.py │ │ └── url_parser.py │ ├── main.py │ └── terminal.py │ ├── config.py │ ├── console.py │ ├── context.py │ ├── context_dependent.py │ ├── core │ ├── __init__.py │ ├── agent_app.py │ ├── agent_types.py │ ├── direct_decorators.py │ ├── direct_factory.py │ ├── enhanced_prompt.py │ ├── error_handling.py │ ├── exceptions.py │ ├── fastagent.py │ ├── interactive_prompt.py │ ├── mcp_content.py │ ├── prompt.py │ ├── request_params.py │ └── validation.py │ ├── event_progress.py │ ├── executor │ ├── __init__.py │ ├── executor.py │ ├── task_registry.py │ └── workflow_signal.py │ ├── human_input │ ├── __init__.py │ ├── handler.py │ └── types.py │ ├── llm │ ├── __init__.py │ ├── augmented_llm.py │ ├── augmented_llm_passthrough.py │ ├── augmented_llm_playback.py │ ├── augmented_llm_slow.py │ ├── memory.py │ ├── model_factory.py │ ├── prompt_utils.py │ ├── provider_key_manager.py │ ├── provider_types.py │ ├── providers │ │ ├── README_anth_multipart.md │ │ ├── __init__.py │ │ ├── anthropic_utils.py │ │ ├── augmented_llm_aliyun.py │ │ ├── augmented_llm_anthropic.py │ │ ├── augmented_llm_azure.py │ │ ├── augmented_llm_deepseek.py │ │ ├── augmented_llm_generic.py │ │ ├── augmented_llm_google_native.py │ │ ├── augmented_llm_google_oai.py │ │ ├── augmented_llm_openai.py │ │ ├── augmented_llm_openrouter.py │ │ ├── augmented_llm_tensorzero.py │ │ ├── google_converter.py │ │ ├── multipart_converter_anthropic.py │ │ ├── multipart_converter_openai.py │ │ ├── multipart_converter_tensorzero.py │ │ ├── openai_multipart.py │ │ ├── openai_utils.py │ │ ├── sampling_converter_anthropic.py │ │ └── sampling_converter_openai.py │ ├── sampling_converter.py │ └── sampling_format_converter.py │ ├── logging │ ├── __init__.py │ ├── events.py │ ├── json_serializer.py │ ├── listeners.py │ ├── logger.py │ ├── rich_progress.py │ └── transport.py │ ├── mcp │ ├── __init__.py │ ├── common.py │ ├── gen_client.py │ ├── helpers │ │ ├── __init__.py │ │ ├── content_helpers.py │ │ └── server_config_helpers.py │ ├── hf_auth.py │ ├── interfaces.py │ ├── logger_textio.py │ ├── mcp_agent_client_session.py │ ├── mcp_aggregator.py │ ├── mcp_connection_manager.py │ ├── mime_utils.py │ ├── prompt_message_multipart.py │ ├── prompt_render.py │ ├── prompt_serialization.py │ ├── prompts │ │ ├── __init__.py │ │ ├── __main__.py │ │ ├── prompt_constants.py │ │ ├── prompt_helpers.py │ │ ├── prompt_load.py │ │ ├── prompt_server.py │ │ └── prompt_template.py │ ├── resource_utils.py │ └── sampling.py │ ├── mcp_server │ ├── __init__.py │ └── agent_server.py │ ├── mcp_server_registry.py │ ├── progress_display.py │ ├── py.typed │ ├── resources │ └── examples │ │ ├── data-analysis │ │ ├── analysis-campaign.py │ │ ├── analysis.py │ │ ├── fastagent.config.yaml │ │ └── mount-point │ │ │ └── WA_Fn-UseC_-HR-Employee-Attrition.csv │ │ ├── in_dev │ │ ├── agent_build.py │ │ ├── css-LICENSE.txt │ │ ├── freud.css │ │ ├── schema.css │ │ ├── slides.md │ │ ├── slides.py │ │ └── structure.css │ │ ├── internal │ │ ├── agent.py │ │ ├── fastagent.config.yaml │ │ ├── fastagent.jsonl │ │ ├── history_transfer.py │ │ ├── job.py │ │ ├── prompt_category.py │ │ ├── prompt_sizing.py │ │ ├── simple.txt │ │ ├── sizer.py │ │ └── social.py │ │ ├── mcp │ │ └── state-transfer │ │ │ ├── agent_one.py │ │ │ ├── agent_two.py │ │ │ ├── fastagent.config.yaml │ │ │ └── fastagent.secrets.yaml.example │ │ ├── prompting │ │ ├── 2025-03-19_FLUX_1-schnell-infer_Image_dff80.webp │ │ ├── __init__.py │ │ ├── agent.py │ │ ├── delimited_prompt.txt │ │ ├── fastagent.config.yaml │ │ ├── fastagent.jsonl │ │ ├── foo.md │ │ ├── image.jpg │ │ ├── image.png │ │ ├── image_server.py │ │ ├── long_chat.md │ │ ├── pdf_prompt.md │ │ ├── persist.md │ │ ├── prompt1.txt │ │ ├── prompt2.md │ │ ├── resource-exe.md │ │ ├── resource.md │ │ ├── sample.css │ │ ├── sample.pdf │ │ ├── sizing.md │ │ ├── test.exe │ │ ├── testingagain.md │ │ └── work_with_image.py │ │ ├── researcher │ │ ├── fastagent.config.yaml │ │ ├── researcher-eval.py │ │ ├── researcher-imp.py │ │ └── researcher.py │ │ └── workflows │ │ ├── chaining.py │ │ ├── evaluator.py │ │ ├── fastagent.config.yaml │ │ ├── graded_report.md │ │ ├── human_input.py │ │ ├── orchestrator.py │ │ ├── parallel.py │ │ ├── router.py │ │ ├── short_story.md │ │ └── short_story.txt │ ├── tools │ └── tool_definition.py │ └── ui │ └── console_display.py ├── tests ├── e2e │ ├── conftest.py │ ├── multimodal │ │ ├── fastagent.config.yaml │ │ ├── image.png │ │ ├── image_server.py │ │ ├── sample.pdf │ │ └── test_multimodal_images.py │ ├── prompts-resources │ │ ├── fastagent.config.yaml │ │ ├── fastagent.jsonl │ │ ├── multiturn.md │ │ ├── sample.pdf │ │ ├── simple.txt │ │ ├── style.css │ │ ├── test_prompts.py │ │ ├── test_resources.py │ │ ├── with_attachment.md │ │ └── with_attachment_css.md │ ├── sampling │ │ ├── fastagent.config.yaml │ │ ├── fastagent.jsonl │ │ ├── image.png │ │ ├── sampling_resource_server.py │ │ └── test_sampling_e2e.py │ ├── smoke │ │ ├── base │ │ │ ├── fastagent.config.yaml │ │ │ ├── index.js.TEST_ONLY │ │ │ ├── test_e2e_smoke.py │ │ │ └── test_server.py │ │ └── tensorzero │ │ │ ├── test_agent_interaction.py │ │ │ ├── test_image_demo.py │ │ │ └── test_simple_agent_interaction.py │ ├── structured │ │ ├── fastagent.config.yaml │ │ └── test_structured_outputs.py │ └── workflow │ │ ├── fastagent.config.yaml │ │ ├── sunny.png │ │ ├── test_router_agent_e2e.py │ │ ├── test_routing_server.py │ │ └── umbrella.png ├── integration │ ├── api │ │ ├── fastagent.config.markup.yaml │ │ ├── fastagent.config.yaml │ │ ├── fastagent.secrets.yaml │ │ ├── integration_agent.py │ │ ├── mcp_dynamic_tools.py │ │ ├── mcp_tools_server.py │ │ ├── playback.md │ │ ├── prompt.txt │ │ ├── stderr_test_script.py │ │ ├── test_api.py │ │ ├── test_cli_and_mcp_server.py │ │ ├── test_describe_a2a.py │ │ ├── test_hyphens_in_name.py │ │ ├── test_logger_textio.py │ │ ├── test_markup_config.py │ │ ├── test_prompt_commands.py │ │ ├── test_prompt_listing.py │ │ ├── test_provider_keys.py │ │ └── test_tool_list_change.py │ ├── conftest.py │ ├── prompt-server │ │ ├── fastagent.config.yaml │ │ ├── multi.txt │ │ ├── multi_sub.txt │ │ ├── multipart.json │ │ ├── simple.txt │ │ ├── simple_sub.txt │ │ └── test_prompt_server_integration.py │ ├── prompt-state │ │ ├── conv1_simple.md │ │ ├── conv2_attach.md │ │ ├── conv2_css.css │ │ ├── conv2_img.png │ │ ├── conv2_text.txt │ │ ├── fastagent.config.yaml │ │ └── test_load_prompt_templates.py │ ├── resources │ │ ├── fastagent.config.yaml │ │ ├── prompt1.txt │ │ ├── prompt2.txt │ │ ├── r1file1.txt │ │ ├── r1file2.txt │ │ ├── r2file1.txt │ │ ├── r2file2.txt │ │ └── test_resource_api.py │ ├── roots │ │ ├── fastagent.config.yaml │ │ ├── fastagent.jsonl │ │ ├── live.py │ │ ├── root_client.py │ │ ├── root_test_server.py │ │ └── test_roots.py │ ├── sampling │ │ ├── fastagent.config.auto_sampling_off.yaml │ │ ├── fastagent.config.yaml │ │ ├── live.py │ │ ├── sampling_test_server.py │ │ └── test_sampling_integration.py │ └── workflow │ │ ├── chain │ │ ├── fastagent.config.yaml │ │ ├── test_chain.py │ │ └── test_chain_passthrough.py │ │ ├── evaluator_optimizer │ │ ├── fastagent.config.yaml │ │ └── test_evaluator_optimizer.py │ │ ├── mixed │ │ ├── fastagent.config.yaml │ │ └── test_mixed_workflow.py │ │ ├── orchestrator │ │ ├── fastagent.config.yaml │ │ └── test_orchestrator.py │ │ ├── parallel │ │ ├── fastagent.config.yaml │ │ └── test_parallel_agent.py │ │ └── router │ │ ├── fastagent.config.yaml │ │ ├── router_script.txt │ │ └── test_router_agent.py └── unit │ └── mcp_agent │ ├── agents │ ├── test_agent_types.py │ └── workflow │ │ ├── test_orchestrator_agent.py │ │ └── test_router_unit.py │ ├── cli │ └── commands │ │ ├── test_check_config.py │ │ ├── test_check_config_hf.py │ │ ├── test_url_parser.py │ │ └── test_url_parser_hf_auth.py │ ├── core │ ├── test_mcp_content.py │ └── test_prompt.py │ ├── llm │ ├── providers │ │ ├── test_augmented_llm_azure.py │ │ ├── test_augmented_llm_tensorzero_unit.py │ │ ├── test_multipart_converter_anthropic.py │ │ ├── test_multipart_converter_google.py │ │ ├── test_multipart_converter_openai.py │ │ ├── test_multipart_converter_tensorzero.py │ │ ├── test_sampling_converter_anthropic.py │ │ └── test_sampling_converter_openai.py │ ├── test_model_factory.py │ ├── test_passthrough.py │ ├── test_playback.py │ ├── test_prepare_arguments.py │ ├── test_provider_key_manager_hf.py │ ├── test_sampling_converter.py │ └── test_structured.py │ ├── mcp │ ├── prompts │ │ ├── test_prompt_helpers.py │ │ ├── test_prompt_template.py │ │ └── test_template_multipart_integration.py │ ├── test_hf_auth.py │ ├── test_mime_utils.py │ ├── test_prompt_format_utils.py │ ├── test_prompt_message_multipart.py │ ├── test_prompt_multipart.py │ ├── test_prompt_multipart_conversion.py │ ├── test_prompt_render.py │ ├── test_prompt_serialization.py │ ├── test_resource_utils.py │ └── test_sampling.py │ └── mcp_agent │ ├── fixture │ ├── README.md │ ├── expected_output.txt │ └── mcp-basic-agent-2025-02-17.jsonl │ └── test_event_progress.py └── uv.lock /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: "v$NEXT_PATCH_VERSION" 2 | tag-template: "v$NEXT_PATCH_VERSION" 3 | categories: 4 | - title: "🚀 Features" 5 | labels: 6 | - "feature" 7 | - "enhancement" 8 | - title: "🐛 Bug Fixes" 9 | labels: 10 | - "fix" 11 | - "bugfix" 12 | - "bug" 13 | - title: "🧰 Maintenance" 14 | label: "chore" 15 | change-template: "- $TITLE @$AUTHOR (#$NUMBER)" 16 | template: | 17 | ## Changes 18 | $CHANGES 19 | -------------------------------------------------------------------------------- /.github/workflows/checks.yml: -------------------------------------------------------------------------------- 1 | name: tests 2 | 3 | on: 4 | workflow_call: 5 | 6 | jobs: 7 | format: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | 12 | - name: Install uv 13 | uses: astral-sh/setup-uv@v3 14 | with: 15 | enable-cache: true 16 | 17 | - name: "Set up Python" 18 | uses: actions/setup-python@v5 19 | with: 20 | python-version-file: ".python-version" 21 | 22 | - name: Install the project 23 | run: | 24 | uv venv .venv 25 | source .venv/bin/activate 26 | uv pip install -e ".[dev]" 27 | 28 | - name: Run ruff format check 29 | run: uv run scripts/format.py 30 | 31 | lint: 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v4 35 | 36 | - name: Install uv 37 | uses: astral-sh/setup-uv@v3 38 | with: 39 | enable-cache: true 40 | 41 | - name: "Set up Python" 42 | uses: actions/setup-python@v5 43 | with: 44 | python-version-file: ".python-version" 45 | 46 | - name: Install the project 47 | run: | 48 | uv venv .venv 49 | source .venv/bin/activate 50 | uv pip install -e ".[dev]" 51 | 52 | - name: Run pyright 53 | run: uv run scripts/lint.py 54 | 55 | test: 56 | runs-on: ubuntu-latest 57 | steps: 58 | - uses: actions/checkout@v4 59 | 60 | - name: Install uv 61 | uses: astral-sh/setup-uv@v3 62 | with: 63 | enable-cache: true 64 | 65 | - name: "Set up Python" 66 | uses: actions/setup-python@v5 67 | with: 68 | python-version-file: ".python-version" 69 | 70 | - name: Install the project 71 | run: | 72 | uv venv .venv 73 | source .venv/bin/activate 74 | uv pip install -e ".[dev]" 75 | 76 | - name: Run pytest 77 | run: | 78 | source .venv/bin/activate 79 | python -m pytest tests/unit -v 80 | python -m pytest tests/integration -v 81 | -------------------------------------------------------------------------------- /.github/workflows/create-tag.yml: -------------------------------------------------------------------------------- 1 | name: Create Version Tag from pyproject.toml 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - "pyproject.toml" 9 | workflow_dispatch: # Enables manual runs 10 | 11 | permissions: 12 | contents: write 13 | 14 | jobs: 15 | create-tag: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Check out code 19 | uses: actions/checkout@v4 20 | 21 | - name: Install uv 22 | uses: astral-sh/setup-uv@v3 23 | with: 24 | enable-cache: true 25 | 26 | - name: "Set up Python" 27 | uses: actions/setup-python@v5 28 | with: 29 | python-version-file: ".python-version" 30 | 31 | - name: Install dependencies 32 | run: pip install toml 33 | 34 | - name: Extract version from pyproject.toml 35 | id: get_version 36 | run: | 37 | version=$(python -c "import toml; print(toml.load('pyproject.toml')['project']['version'])") 38 | echo "version=$version" >> $GITHUB_OUTPUT 39 | 40 | - name: Create Git tag if not exists 41 | run: | 42 | git fetch --tags 43 | tag="v${{ steps.get_version.outputs.version }}" 44 | if ! git rev-parse "$tag" >/dev/null 2>&1; then 45 | git tag "$tag" 46 | git push origin "$tag" 47 | else 48 | echo "Tag $tag already exists." 49 | fi 50 | -------------------------------------------------------------------------------- /.github/workflows/main-checks.yml: -------------------------------------------------------------------------------- 1 | name: Main Checks 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - "v*.*.*" 8 | tags: 9 | - "v*.*.*" 10 | 11 | jobs: 12 | checks: 13 | uses: ./.github/workflows/checks.yml 14 | -------------------------------------------------------------------------------- /.github/workflows/pr-checks.yml: -------------------------------------------------------------------------------- 1 | name: Pull Request Checks 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | checks: 8 | uses: ./.github/workflows/checks.yml 9 | -------------------------------------------------------------------------------- /.github/workflows/publish-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish Package to PyPI 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" # Triggers on tags like v1.2.3 7 | 8 | workflow_dispatch: # Enables manual runs 9 | 10 | jobs: 11 | checks: 12 | uses: ./.github/workflows/checks.yml 13 | 14 | publish: 15 | name: Build and publish package to PyPI 16 | runs-on: ubuntu-latest 17 | needs: [checks] # Run checks before publishing 18 | 19 | # This ties the job to a protected environment. 20 | environment: 21 | name: production # Ensure this environment is configured in your repo settings with required reviewers 22 | 23 | steps: 24 | - name: Check out code 25 | uses: actions/checkout@v4 26 | 27 | - name: Install uv 28 | uses: astral-sh/setup-uv@v3 29 | 30 | - name: "Set up Python" 31 | uses: actions/setup-python@v5 32 | with: 33 | python-version-file: ".python-version" 34 | 35 | - name: Install the project 36 | run: | 37 | uv venv .venv 38 | source .venv/bin/activate 39 | uv pip install -e ".[dev]" 40 | 41 | - name: Build 42 | run: uv build 43 | 44 | - name: Upload artifacts 45 | uses: actions/upload-artifact@v4 46 | with: 47 | name: release-dists 48 | path: dist/ 49 | 50 | - name: Publish package to PyPI using uv 51 | env: 52 | UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }} 53 | run: uv publish 54 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: Update Release Draft 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | # pull_request event is required only for autolabeler 9 | pull_request: 10 | # Only following types are handled by the action, but one can default to all as well 11 | types: [opened, reopened, synchronize] 12 | 13 | # pull_request_target event is required for autolabeler to support PRs from forks 14 | pull_request_target: 15 | types: [opened, reopened, synchronize] 16 | 17 | workflow_dispatch: # Enables manual runs 18 | 19 | permissions: 20 | contents: read 21 | 22 | jobs: 23 | update_release_draft: 24 | permissions: 25 | # write permission is required to create a github release 26 | contents: write 27 | # write permission is required for autolabeler 28 | pull-requests: write 29 | runs-on: ubuntu-latest 30 | steps: 31 | - uses: actions/checkout@v3 32 | - uses: release-drafter/release-drafter@v6 33 | env: 34 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 35 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/astral-sh/ruff-pre-commit 3 | # Ruff version. 4 | rev: v0.8.4 5 | hooks: 6 | # Run the linter. 7 | - id: ruff 8 | args: [--fix] 9 | # Run the formatter. 10 | - id: ruff-format 11 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.12.7 2 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": ["esbenp.prettier-vscode", "charliermarsh.ruff"] 3 | } 4 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.formatOnSave": true, 3 | "editor.defaultFormatter": "esbenp.prettier-vscode", 4 | "[python]": { 5 | "editor.defaultFormatter": "charliermarsh.ruff", 6 | "editor.formatOnSave": true, 7 | "editor.rulers": [] 8 | }, 9 | "yaml.schemas": { 10 | "./.vscode/fastagent.config.schema.json": [ 11 | "fastagent.config.yaml", 12 | "fastagent.secrets.yaml" 13 | ] 14 | }, 15 | "editor.fontFamily": "BlexMono Nerd Font", 16 | "python.testing.pytestArgs": ["tests"], 17 | "python.testing.unittestEnabled": false, 18 | "python.testing.pytestEnabled": true, 19 | "python.analysis.typeCheckingMode": "standard" 20 | } 21 | -------------------------------------------------------------------------------- /examples/custom-agents/agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.agents.base_agent import BaseAgent 4 | from mcp_agent.core.fastagent import FastAgent 5 | 6 | # Create the application 7 | fast = FastAgent("fast-agent example") 8 | 9 | 10 | class MyAgent(BaseAgent): 11 | async def initialize(self): 12 | await super().initialize() 13 | print("it's a-me!...Mario!") 14 | 15 | 16 | # Define the agent 17 | @fast.custom(MyAgent, instruction="You are a helpful AI Agent") 18 | async def main(): 19 | # use the --model command line switch or agent arguments to change model 20 | async with fast.run() as agent: 21 | await agent.interactive() 22 | 23 | 24 | if __name__ == "__main__": 25 | asyncio.run(main()) 26 | -------------------------------------------------------------------------------- /examples/custom-agents/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # Please edit this configuration file to match your environment (on Windows). 2 | # Examples in comments below - check/change the paths. 3 | # 4 | # 5 | 6 | execution_engine: asyncio 7 | logger: 8 | type: file 9 | level: error 10 | truncate_tools: true 11 | 12 | mcp: 13 | servers: 14 | filesystem: 15 | # On windows update the command and arguments to use `node` and the absolute path to the server. 16 | # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally. 17 | # Use `npm -g root` to find the global node_modules path.` 18 | # command: "node" 19 | # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","."] 20 | command: "npx" 21 | args: ["-y", "@modelcontextprotocol/server-filesystem", "."] 22 | fetch: 23 | command: "uvx" 24 | args: ["mcp-server-fetch"] 25 | -------------------------------------------------------------------------------- /examples/data-analysis/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | default_model: sonnet 2 | 3 | # on windows, adjust the mount point to be the full path e.g. x:/temp/data-analysis/mount-point:/mnt/data/ 4 | 5 | mcp: 6 | servers: 7 | interpreter: 8 | command: "docker" 9 | args: 10 | [ 11 | "run", 12 | "-i", 13 | "--rm", 14 | "--pull=always", 15 | "-v", 16 | "./mount-point:/mnt/data/", 17 | "ghcr.io/evalstate/mcp-py-repl:latest", 18 | ] 19 | roots: 20 | - uri: "file://./mount-point/" 21 | name: "test_data" 22 | server_uri_alias: "file:///mnt/data/" 23 | filesystem: 24 | # On windows update the command and arguments to use `node` and the absolute path to the server. 25 | # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally. 26 | # Use `npm -g root` to find the global node_modules path.` 27 | # command: "node" 28 | # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","."] 29 | command: "npx" 30 | args: ["-y", "@modelcontextprotocol/server-filesystem", "./mount-point/"] 31 | fetch: 32 | command: "uvx" 33 | args: ["mcp-server-fetch"] 34 | brave: 35 | # On windows replace the command and args line to use `node` and the absolute path to the server. 36 | # Use `npm i -g @modelcontextprotocol/server-brave-search` to install the server globally. 37 | # Use `npm -g root` to find the global node_modules path.` 38 | # command: "node" 39 | # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"] 40 | command: "npx" 41 | args: ["-y", "@modelcontextprotocol/server-brave-search"] 42 | -------------------------------------------------------------------------------- /examples/mcp/state-transfer/agent_one.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("fast-agent agent_one (mcp server)") 7 | 8 | 9 | # Define the agent 10 | @fast.agent(name="agent_one", instruction="You are a helpful AI Agent.") 11 | async def main(): 12 | # use the --model command line switch or agent arguments to change model 13 | async with fast.run() as agent: 14 | await agent.interactive() 15 | 16 | 17 | if __name__ == "__main__": 18 | asyncio.run(main()) 19 | -------------------------------------------------------------------------------- /examples/mcp/state-transfer/agent_two.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("fast-agent agent_two (mcp client)") 7 | 8 | 9 | # Define the agent 10 | @fast.agent(name="agent_two", instruction="You are a helpful AI Agent.", servers=["agent_one"]) 11 | async def main(): 12 | # use the --model command line switch or agent arguments to change model 13 | async with fast.run() as agent: 14 | await agent.interactive() 15 | 16 | 17 | if __name__ == "__main__": 18 | asyncio.run(main()) 19 | -------------------------------------------------------------------------------- /examples/mcp/state-transfer/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # Model string takes format: 2 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 3 | # 4 | # Can be overriden with a command line switch --model=, or within the Agent decorator. 5 | # Check here for current details: https://fast-agent.ai/models/ 6 | 7 | # set the default model for fast-agent below: 8 | default_model: gpt-4.1 9 | 10 | # Logging and Console Configuration: 11 | logger: 12 | # Switched off to avoid polluting the console 13 | progress_display: false 14 | 15 | # Show chat User/Assistant messages on the console 16 | show_chat: true 17 | # Show tool calls on the console 18 | show_tools: true 19 | # Truncate long tool responses on the console 20 | truncate_tools: true 21 | 22 | # MCP Servers 23 | mcp: 24 | servers: 25 | agent_one: 26 | transport: sse 27 | url: http://localhost:8001/sse 28 | -------------------------------------------------------------------------------- /examples/mcp/vision-examples/cat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/examples/mcp/vision-examples/cat.png -------------------------------------------------------------------------------- /examples/mcp/vision-examples/example1.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from pathlib import Path 3 | 4 | from mcp_agent.core.fastagent import FastAgent 5 | from mcp_agent.core.prompt import Prompt 6 | 7 | # Create the application 8 | fast = FastAgent("fast-agent example") 9 | 10 | 11 | # Define the agent 12 | @fast.agent(instruction="You are a helpful AI Agent", servers=["filesystem"]) 13 | async def main(): 14 | # use the --model command line switch or agent arguments to change model 15 | async with fast.run() as agent: 16 | await agent.default.generate( 17 | [ 18 | Prompt.user( 19 | Path("cat.png"), "Write a report on the content of the image to 'report.md'" 20 | ) 21 | ] 22 | ) 23 | await agent.interactive() 24 | 25 | 26 | if __name__ == "__main__": 27 | asyncio.run(main()) 28 | -------------------------------------------------------------------------------- /examples/mcp/vision-examples/example2.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("fast-agent example") 7 | 8 | 9 | # Define the agent 10 | @fast.agent(instruction="You are a helpful AI Agent", servers=["filesystem"]) 11 | async def main(): 12 | # use the --model command line switch or agent arguments to change model 13 | async with fast.run() as agent: 14 | await agent.interactive() 15 | 16 | 17 | if __name__ == "__main__": 18 | asyncio.run(main()) 19 | -------------------------------------------------------------------------------- /examples/mcp/vision-examples/example3.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("fast-agent example") 7 | 8 | 9 | # Define the agent 10 | @fast.agent(instruction="You are a helpful AI Agent", servers=["webcam", "hfspace"]) 11 | async def main(): 12 | async with fast.run() as agent: 13 | await agent.interactive( 14 | default_prompt="take an image with the webcam, describe it to flux to " 15 | "reproduce it and then judge the quality of the result" 16 | ) 17 | 18 | 19 | if __name__ == "__main__": 20 | asyncio.run(main()) 21 | -------------------------------------------------------------------------------- /examples/mcp/vision-examples/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4.1, gpt-4.1-mini, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: haiku 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | 22 | # Switch the progress display on or off 23 | progress_display: true 24 | 25 | # Show chat User/Assistant messages on the console 26 | show_chat: true 27 | # Show tool calls on the console 28 | show_tools: true 29 | # Truncate long tool responses on the console 30 | truncate_tools: true 31 | 32 | # MCP Servers 33 | mcp: 34 | servers: 35 | fetch: 36 | command: "uvx" 37 | args: ["mcp-server-fetch"] 38 | filesystem: 39 | command: "npx" 40 | args: ["-y", "@modelcontextprotocol/server-filesystem", "."] 41 | webcam: 42 | command: "npx" 43 | args: ["-y","@llmindset/mcp-webcam"] 44 | hfspace: 45 | command: "npx" 46 | args: ["-y","@llmindset/mcp-hfspace"] 47 | 48 | -------------------------------------------------------------------------------- /examples/otel/agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Annotated 3 | 4 | from pydantic import BaseModel, Field 5 | 6 | from mcp_agent.core.fastagent import FastAgent 7 | from mcp_agent.core.prompt import Prompt 8 | from mcp_agent.core.request_params import RequestParams 9 | 10 | # Create the application 11 | fast = FastAgent("fast-agent example") 12 | 13 | 14 | class FormattedResponse(BaseModel): 15 | thinking: Annotated[ 16 | str, Field(description="Your reflection on the conversation that is not seen by the user.") 17 | ] 18 | message: str 19 | 20 | 21 | # Define the agent 22 | @fast.agent( 23 | name="chat", 24 | instruction="You are a helpful AI Agent", 25 | servers=["fetch"], 26 | request_params=RequestParams(maxTokens=8192), 27 | ) 28 | async def main(): 29 | # use the --model command line switch or agent arguments to change model 30 | async with fast.run() as agent: 31 | thinking, response = await agent.chat.structured( 32 | multipart_messages=[Prompt.user("Let's talk about guitars.")], 33 | model=FormattedResponse, 34 | ) 35 | 36 | 37 | if __name__ == "__main__": 38 | asyncio.run(main()) 39 | -------------------------------------------------------------------------------- /examples/otel/agent2.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Annotated 3 | 4 | from pydantic import BaseModel, Field 5 | 6 | from mcp_agent.core.fastagent import FastAgent 7 | from mcp_agent.core.prompt import Prompt 8 | from mcp_agent.core.request_params import RequestParams 9 | 10 | # Create the application 11 | fast = FastAgent("fast-agent example") 12 | 13 | 14 | class FormattedResponse(BaseModel): 15 | thinking: Annotated[ 16 | str, Field(description="Your reflection on the conversation that is not seen by the user.") 17 | ] 18 | message: str 19 | 20 | 21 | # Define the agent 22 | @fast.agent( 23 | name="chat", 24 | instruction="You are a helpful AI Agent", 25 | servers=["fetch"], 26 | request_params=RequestParams(maxTokens=8192), 27 | ) 28 | async def main(): 29 | # use the --model command line switch or agent arguments to change model 30 | async with fast.run() as agent: 31 | thinking, response = await agent.chat.generate( 32 | multipart_messages=[Prompt.user("Let's talk about guitars. Fetch from wikipedia")], 33 | ) 34 | 35 | 36 | if __name__ == "__main__": 37 | asyncio.run(main()) 38 | -------------------------------------------------------------------------------- /examples/otel/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | jaeger: 3 | image: jaegertracing/jaeger:2.5.0 4 | container_name: jaeger 5 | ports: 6 | - "16686:16686" # Web UI 7 | - "4317:4317" # OTLP gRPC 8 | - "4318:4318" # OTLP HTTP 9 | - "5778:5778" # Config server 10 | - "9411:9411" # Zipkin compatible 11 | restart: unless-stopped 12 | 13 | -------------------------------------------------------------------------------- /examples/otel/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4.1, gpt-4.1-mini, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: haiku 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | 31 | otel: 32 | enabled: true # Enable or disable OpenTelemetry 33 | 34 | # MCP Servers 35 | mcp: 36 | servers: 37 | fetch: 38 | command: "uvx" 39 | args: ["mcp-server-fetch"] 40 | filesystem: 41 | command: "npx" 42 | args: ["-y", "@modelcontextprotocol/server-filesystem", "."] 43 | think: 44 | command: "mcp-think-tool" 45 | -------------------------------------------------------------------------------- /examples/researcher/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Please edit this configuration file to match your environment (on Windows). 3 | # Examples in comments below - check/change the paths. 4 | # 5 | # 6 | 7 | logger: 8 | type: console 9 | level: error 10 | truncate_tools: true 11 | 12 | mcp: 13 | servers: 14 | brave: 15 | # On windows replace the command and args line to use `node` and the absolute path to the server. 16 | # Use `npm i -g @modelcontextprotocol/server-brave-search` to install the server globally. 17 | # Use `npm -g root` to find the global node_modules path.` 18 | # command: "node" 19 | # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"] 20 | command: "npx" 21 | args: ["-y", "@modelcontextprotocol/server-brave-search"] 22 | env: 23 | # You can also place your BRAVE_API_KEY in the fastagent.secrets.yaml file. 24 | BRAVE_API_KEY: 25 | filesystem: 26 | # On windows update the command and arguments to use `node` and the absolute path to the server. 27 | # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally. 28 | # Use `npm -g root` to find the global node_modules path.` 29 | # command: "node" 30 | # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","./agent_folder"] 31 | command: "npx" 32 | args: ["-y", "@modelcontextprotocol/server-filesystem", "./agent_folder/"] 33 | interpreter: 34 | command: "docker" 35 | args: [ 36 | "run", 37 | "-i", 38 | "--rm", 39 | "--pull=always", 40 | "-v", 41 | "./agent_folder:/mnt/data/", 42 | # Docker needs the absolute path on Windows (e.g. "x:/fastagent/agent_folder:/mnt/data/") 43 | # "./agent_folder:/mnt/data/", 44 | "ghcr.io/evalstate/mcp-py-repl:latest", 45 | ] 46 | roots: 47 | - uri: "file://./agent_folder/" 48 | name: "agent_folder" 49 | server_uri_alias: "file:///mnt/data/" 50 | fetch: 51 | command: "uvx" 52 | args: ["mcp-server-fetch"] 53 | sequential: 54 | command: "npx" 55 | args: ["-y", "@modelcontextprotocol/server-sequential-thinking"] 56 | # webmcp: 57 | # command: "node" 58 | # args: ["/home/ssmith/.webmcp/server.cjs"] 59 | # env: 60 | # WEBMCP_SERVER_TOKEN: 96e22896d8143fc1d61fec09208fc5ed 61 | 62 | -------------------------------------------------------------------------------- /examples/researcher/researcher-eval.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | agents = FastAgent(name="Researcher Agent (EO)") 6 | 7 | 8 | @agents.agent( 9 | name="Researcher", 10 | instruction=""" 11 | You are a research assistant, with access to internet search (via Brave), 12 | website fetch, a python interpreter (you can install packages with uv) and a filesystem. 13 | Use the current working directory to save and create files with both the Interpreter and Filesystem tools. 14 | The interpreter has numpy, pandas, matplotlib and seaborn already installed. 15 | 16 | You must always provide a summary of the specific sources you have used in your research. 17 | """, 18 | servers=["brave", "interpreter", "filesystem", "fetch"], 19 | ) 20 | @agents.agent( 21 | name="Evaluator", 22 | model="sonnet", 23 | instruction=""" 24 | Evaluate the response from the researcher based on the criteria: 25 | - Sources cited. Has the researcher provided a summary of the specific sources used in the research? 26 | - Validity. Has the researcher cross-checked and validated data and assumptions. 27 | - Alignment. Has the researher acted and addressed feedback from any previous assessments? 28 | 29 | For each criterion: 30 | - Provide a rating (EXCELLENT, GOOD, FAIR, or POOR). 31 | - Offer specific feedback or suggestions for improvement. 32 | 33 | Summarize your evaluation as a structured response with: 34 | - Overall quality rating. 35 | - Specific feedback and areas for improvement.""", 36 | ) 37 | @agents.evaluator_optimizer( 38 | generator="Researcher", 39 | evaluator="Evaluator", 40 | max_refinements=5, 41 | min_rating="EXCELLENT", 42 | name="Researcher_Evaluator", 43 | ) 44 | async def main() -> None: 45 | async with agents.run() as agent: 46 | await agent.prompt("Researcher_Evaluator") 47 | 48 | print("Ask follow up quesions to the Researcher?") 49 | await agent.prompt("Researcher", default_prompt="STOP") 50 | 51 | 52 | if __name__ == "__main__": 53 | asyncio.run(main()) 54 | -------------------------------------------------------------------------------- /examples/researcher/researcher.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # from rich import print 6 | 7 | agents = FastAgent(name="Researcher Agent") 8 | 9 | 10 | @agents.agent( 11 | "Researcher", 12 | instruction=""" 13 | You are a research assistant, with access to internet search (via Brave), 14 | website fetch, a python interpreter (you can install packages with uv) and a filesystem. 15 | Use the current working directory to save and create files with both the Interpreter and Filesystem tools. 16 | The interpreter has numpy, pandas, matplotlib and seaborn already installed 17 | """, 18 | servers=["brave", "interpreter", "filesystem", "fetch"], 19 | ) 20 | async def main() -> None: 21 | research_prompt = """ 22 | Produce an investment report for the company Eutelsat. The final report should be saved in the filesystem in markdown format, and 23 | contain at least the following: 24 | 1 - A brief description of the company 25 | 2 - Current financial position (find data, create and incorporate charts) 26 | 3 - A PESTLE analysis 27 | 4 - An investment thesis for the next 3 years. Include both 'buy side' and 'sell side' arguments, and a final 28 | summary and recommendation. 29 | Todays date is 15 February 2025. Include the main data sources consulted in presenting the report.""" # noqa: F841 30 | 31 | async with agents.run() as agent: 32 | await agent.prompt() 33 | 34 | 35 | if __name__ == "__main__": 36 | asyncio.run(main()) 37 | -------------------------------------------------------------------------------- /examples/tensorzero/.env.sample: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= 2 | ANTHROPIC_API_KEY= 3 | -------------------------------------------------------------------------------- /examples/tensorzero/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all 2 | 3 | build: 4 | docker compose build 5 | 6 | up: 7 | docker compose up -d 8 | 9 | logs: 10 | docker compose logs -f 11 | 12 | tensorzero-logs: 13 | docker compose logs -f gateway 14 | 15 | mcp-logs: 16 | docker compose logs -f mcp-server 17 | 18 | minio-logs: 19 | docker compose logs -f minio 20 | 21 | stop: 22 | docker compose stop 23 | 24 | agent: 25 | uv run agent.py --model=tensorzero.test_chat 26 | 27 | simple-agent: 28 | uv run simple_agent.py --model=tensorzero.simple_chat 29 | 30 | image-test: 31 | uv run image_demo.py 32 | -------------------------------------------------------------------------------- /examples/tensorzero/README.md: -------------------------------------------------------------------------------- 1 | # About the tensorzero / fast-agent integration 2 | 3 | [TensorZero](https://www.tensorzero.com/) is an open source project designed to help LLM application developers rapidly improve their inference calls. Its core features include: 4 | 5 | - A uniform inference interface to all leading LLM platforms. 6 | - The ability to dynamic route to different platforms and program failovers. 7 | - Automated parameter tuning and training 8 | - Advance templating features for your system prompts 9 | - Organization of LLM inference data into a Clickhouse DB allowing for sophisticated downstream analytics 10 | - A bunch of other good stuff is always in development 11 | 12 | `tensorzero` is powerful heavy, so we provide here a quickstart example that combines the basic components of `fast-agent`, an MCP server, `tensorzero`, and other supporting services into a cohesive whole. 13 | 14 | ## Quickstart guide 15 | 16 | - Build and activate the `uv` `fast-agent` environment 17 | - Ensure that ports `3000`, `4000`, `8000`, `9000`, and `9001` are unallocated before running this demo. 18 | - Run `cp .env.sample .env` and then drop in at least one of `OPENAI_API_KEY` or `ANTHROPIC_API_KEY`. Make sure the accounts are funded. 19 | - `make up` 20 | - `make agent` 21 | 22 | The demo test's our implementation's ability to: 23 | 24 | - Implement the T0 model gateway as an inference backend 25 | - Implement T0's dynamic templating feature 26 | - Have in-conversation memory 27 | - Describe and execute tool calls 28 | - Remember previous tool calls 29 | 30 | A version of a conversation to test all of this could be: 31 | 32 | ``` 33 | Hi. 34 | 35 | Tell me a poem. 36 | 37 | Do you have any tools that you can use? 38 | 39 | Please demonstrate the use of that tool on your last response. 40 | 41 | Please summarize the conversation so far. 42 | 43 | What tool calls have you executed in this session, and what were their results? 44 | ``` 45 | 46 | ## Multimodal support 47 | 48 | Run `make image-test` to test the gateway's ability to handle base64-encoded image data 49 | 50 | ## Development notes: 51 | 52 | - `make stop` will stop the MCP server and the tensorzero server 53 | - `make tenzorzero-logs` will tail the tensorzero server logs 54 | - `make mcp-logs` will tail the MCP server logs 55 | - Generic `make logs` dumps all log output from all services to terminal 56 | -------------------------------------------------------------------------------- /examples/tensorzero/agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | from mcp_agent.core.request_params import RequestParams 5 | 6 | # Explicitly provide the path to the config file in the current directory 7 | CONFIG_FILE = "fastagent.config.yaml" 8 | fast = FastAgent("fast-agent example", config_path=CONFIG_FILE, ignore_unknown_args=True) 9 | 10 | # Define T0 system variables here 11 | my_t0_system_vars = { 12 | "TEST_VARIABLE_1": "Roses are red", 13 | "TEST_VARIABLE_2": "Violets are blue", 14 | "TEST_VARIABLE_3": "Sugar is sweet", 15 | "TEST_VARIABLE_4": "Vibe code responsibly 👍", 16 | } 17 | 18 | 19 | @fast.agent( 20 | name="default", 21 | instruction=""" 22 | You are an agent dedicated to helping developers understand the relationship between TensoZero and fast-agent. If the user makes a request 23 | that requires you to invoke the test tools, please do so. When you use the tool, describe your rationale for doing so. 24 | """, 25 | servers=["tester"], 26 | request_params=RequestParams(template_vars=my_t0_system_vars), 27 | ) 28 | async def main(): 29 | async with fast.run() as agent_app: # Get the AgentApp wrapper 30 | agent_name = "default" 31 | print("\nStarting interactive session with template_vars set via decorator...") 32 | await agent_app.interactive(agent=agent_name) 33 | 34 | 35 | if __name__ == "__main__": 36 | asyncio.run(main()) # type: ignore 37 | -------------------------------------------------------------------------------- /examples/tensorzero/demo_images/clam.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/examples/tensorzero/demo_images/clam.jpg -------------------------------------------------------------------------------- /examples/tensorzero/demo_images/crab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/examples/tensorzero/demo_images/crab.png -------------------------------------------------------------------------------- /examples/tensorzero/demo_images/shrimp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/examples/tensorzero/demo_images/shrimp.png -------------------------------------------------------------------------------- /examples/tensorzero/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | default_model: haiku 2 | 3 | tensorzero: 4 | base_url: http://localhost:3000 5 | 6 | logger: 7 | level: "info" 8 | progress_display: true 9 | show_chat: true 10 | show_tools: true 11 | truncate_tools: true 12 | 13 | mcp: 14 | servers: 15 | tester: 16 | transport: "sse" 17 | url: "http://localhost:8000/t0-example-server/sse" 18 | read_transport_sse_timeout_seconds: 300 19 | -------------------------------------------------------------------------------- /examples/tensorzero/image_demo.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import base64 3 | import mimetypes 4 | from pathlib import Path 5 | from typing import List, Union 6 | 7 | from mcp.types import ImageContent, TextContent 8 | 9 | from mcp_agent.core.fastagent import FastAgent 10 | from mcp_agent.core.prompt import Prompt 11 | from mcp_agent.core.request_params import RequestParams 12 | 13 | AGENT_NAME = "tensorzero_image_tester" 14 | TENSORZERO_MODEL = "tensorzero.test_chat" 15 | TEXT_PROMPT = ( 16 | "Provide a description of the similarities and differences between these three images." 17 | ) 18 | LOCAL_IMAGE_FILES = [ 19 | Path("./demo_images/clam.jpg"), 20 | Path("./demo_images/shrimp.png"), 21 | Path("./demo_images/crab.png"), 22 | ] 23 | 24 | MY_T0_SYSTEM_VARS = { 25 | "TEST_VARIABLE_1": "Roses are red", 26 | "TEST_VARIABLE_2": "Violets are blue", 27 | "TEST_VARIABLE_3": "Sugar is sweet", 28 | "TEST_VARIABLE_4": "Vibe code responsibly 👍", 29 | } 30 | 31 | fast = FastAgent("TensorZero Image Demo - Base64 Only") 32 | 33 | 34 | @fast.agent( 35 | name=AGENT_NAME, 36 | model=TENSORZERO_MODEL, 37 | request_params=RequestParams(template_vars=MY_T0_SYSTEM_VARS), 38 | ) 39 | async def main(): 40 | content_parts: List[Union[TextContent, ImageContent]] = [] 41 | content_parts.append(TextContent(type="text", text=TEXT_PROMPT)) 42 | 43 | for file_path in LOCAL_IMAGE_FILES: 44 | mime_type, _ = mimetypes.guess_type(file_path) 45 | if not mime_type or not mime_type.startswith("image/"): 46 | ext = file_path.suffix.lower() 47 | if ext == ".jpg" or ext == ".jpeg": 48 | mime_type = "image/jpeg" 49 | elif ext == ".png": 50 | mime_type = "image/png" 51 | if mime_type is None: 52 | mime_type = "image/png" # Default fallback if still None 53 | 54 | with open(file_path, "rb") as image_file: 55 | image_bytes = image_file.read() 56 | 57 | encoded_data = base64.b64encode(image_bytes).decode("utf-8") 58 | content_parts.append(ImageContent(type="image", mimeType=mime_type, data=encoded_data)) 59 | 60 | message = Prompt.user(*content_parts) 61 | async with fast.run() as agent_app: 62 | agent = getattr(agent_app, AGENT_NAME) 63 | await agent.send(message) 64 | 65 | 66 | if __name__ == "__main__": 67 | asyncio.run(main()) # type: ignore 68 | -------------------------------------------------------------------------------- /examples/tensorzero/mcp_server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12-slim 2 | 3 | WORKDIR /app 4 | 5 | RUN apt-get update && apt-get install -y curl wget && \ 6 | wget https://dl.min.io/client/mc/release/linux-amd64/mc -O /usr/local/bin/mc && \ 7 | chmod +x /usr/local/bin/mc && \ 8 | apt-get clean && rm -rf /var/lib/apt/lists/* 9 | 10 | RUN pip install uv 11 | 12 | COPY pyproject.toml /app/ 13 | COPY uv.lock /app/ 14 | COPY LICENSE /app/ 15 | COPY README.md /app/ 16 | 17 | RUN uv pip install --system . 18 | 19 | COPY examples/tensorzero/mcp_server/mcp_server.py /app/ 20 | COPY examples/tensorzero/mcp_server/entrypoint.sh /app/entrypoint.sh 21 | 22 | RUN chmod +x /app/entrypoint.sh 23 | 24 | EXPOSE 8000 25 | 26 | ENTRYPOINT ["/app/entrypoint.sh"] 27 | 28 | CMD ["uvicorn", "mcp_server:app", "--host", "0.0.0.0", "--port", "8000"] 29 | -------------------------------------------------------------------------------- /examples/tensorzero/mcp_server/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "Entrypoint: Waiting for MinIO to be healthy..." 4 | 5 | # Simple loop to check MinIO health endpoint (within the Docker network) 6 | # Adjust timeout as needed 7 | TIMEOUT=60 8 | START_TIME=$(date +%s) 9 | while ! curl -sf http://minio:9000/minio/health/live > /dev/null; do 10 | CURRENT_TIME=$(date +%s) 11 | ELAPSED=$(($CURRENT_TIME - $START_TIME)) 12 | if [ $ELAPSED -ge $TIMEOUT ]; then 13 | echo "Entrypoint: Timeout waiting for MinIO!" 14 | exit 1 15 | fi 16 | echo "Entrypoint: MinIO not ready, sleeping..." 17 | sleep 2 18 | done 19 | echo "Entrypoint: MinIO is healthy." 20 | 21 | echo "Entrypoint: Configuring mc client and creating bucket 'tensorzero'..." 22 | 23 | # Configure mc to talk to the MinIO server using the service name 24 | # Use --insecure because we are using http 25 | mc --insecure alias set local http://minio:9000 user password 26 | 27 | # Create the bucket if it doesn't exist 28 | # Use --insecure because we are using http 29 | mc --insecure ls local/tensorzero > /dev/null 2>&1 || mc --insecure mb local/tensorzero 30 | 31 | echo "Entrypoint: Bucket 'tensorzero' check/creation complete." 32 | 33 | echo "Entrypoint: Executing the main container command: $@" 34 | 35 | exec "$@" 36 | -------------------------------------------------------------------------------- /examples/tensorzero/mcp_server/mcp_server.py: -------------------------------------------------------------------------------- 1 | import uvicorn 2 | from mcp.server.fastmcp.server import FastMCP 3 | from starlette.applications import Starlette 4 | from starlette.routing import Mount 5 | 6 | SERVER_PATH = "t0-example-server" 7 | 8 | 9 | mcp_instance = FastMCP(name="t0-example-server") 10 | mcp_instance.settings.message_path = f"/{SERVER_PATH}/messages/" 11 | mcp_instance.settings.sse_path = f"/{SERVER_PATH}/sse" 12 | 13 | 14 | @mcp_instance.tool() 15 | def example_tool(input_text: str) -> str: 16 | """Example tool that reverses the text of a given string.""" 17 | reversed_text = input_text[::-1] 18 | return reversed_text 19 | 20 | 21 | app = Starlette( 22 | routes=[ 23 | Mount("/", app=mcp_instance.sse_app()), 24 | ] 25 | ) 26 | 27 | if __name__ == "__main__": 28 | print(f"Starting minimal MCP server ({mcp_instance.name}) on http://127.0.0.1:8000") 29 | print(f" -> SSE endpoint: {mcp_instance.settings.sse_path}") 30 | print(f" -> Message endpoint: {mcp_instance.settings.message_path}") 31 | uvicorn.run(app, host="127.0.0.1", port=8000) 32 | -------------------------------------------------------------------------------- /examples/tensorzero/simple_agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | CONFIG_FILE = "fastagent.config.yaml" 6 | fast = FastAgent("fast-agent example", config_path=CONFIG_FILE, ignore_unknown_args=True) 7 | 8 | 9 | @fast.agent( 10 | name="default", 11 | instruction=""" 12 | You are an agent dedicated to helping developers understand the relationship between TensoZero and fast-agent. If the user makes a request 13 | that requires you to invoke the test tools, please do so. When you use the tool, describe your rationale for doing so. 14 | """, 15 | servers=["tester"], 16 | ) 17 | async def main(): 18 | async with fast.run() as agent_app: 19 | agent_name = "default" 20 | print("\nStarting interactive session with template_vars set via decorator...") 21 | await agent_app.interactive(agent=agent_name) 22 | 23 | 24 | if __name__ == "__main__": 25 | asyncio.run(main()) # type: ignore 26 | -------------------------------------------------------------------------------- /examples/tensorzero/tensorzero_config/system_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "type": "object", 4 | "required": [ 5 | "TEST_VARIABLE_1", 6 | "TEST_VARIABLE_2", 7 | "TEST_VARIABLE_3", 8 | "TEST_VARIABLE_4" 9 | ], 10 | "properties": { 11 | "TEST_VARIABLE_1": { 12 | "type": "string", 13 | "description": "Test variable 1" 14 | }, 15 | "TEST_VARIABLE_2": { 16 | "type": "string", 17 | "description": "Test variable 2" 18 | }, 19 | "TEST_VARIABLE_3": { 20 | "type": "string", 21 | "description": "Test variable 3" 22 | }, 23 | "TEST_VARIABLE_4": { 24 | "type": "string", 25 | "description": "Test variable 4" 26 | } 27 | }, 28 | "additionalProperties": false 29 | } 30 | -------------------------------------------------------------------------------- /examples/tensorzero/tensorzero_config/system_template.minijinja: -------------------------------------------------------------------------------- 1 | Your purpose is to demo the integration between tensorzero, an open source project dedicated to improving LLM inference calls, and fast-agent, a framework for MCP-compatible agentic modelling. When interacting with developers, strive to be as helpful to possible, specifically as relates to technical questions about tensorzero, fast-agent, or MCPs. 2 | 3 | If you are ever asked to recite a poem, recite this one: 4 | 5 | {{ TEST_VARIABLE_1 }} 6 | 7 | {{ TEST_VARIABLE_2 }} 8 | 9 | {{ TEST_VARIABLE_3 }} 10 | 11 | {{ TEST_VARIABLE_4 }} 12 | -------------------------------------------------------------------------------- /examples/tensorzero/tensorzero_config/tensorzero.toml: -------------------------------------------------------------------------------- 1 | [functions.test_chat] 2 | type = "chat" 3 | system_schema = "./system_schema.json" 4 | 5 | [functions.test_chat.variants.gpt_4o_mini] 6 | type = "chat_completion" 7 | model = "openai::gpt-4o-mini" 8 | weight = 0.5 9 | system_template = "./system_template.minijinja" 10 | 11 | [functions.test_chat.variants.claude_3_5_haiku] 12 | type = "chat_completion" 13 | model = "anthropic::claude-3-5-haiku-20241022" 14 | weight = 0.5 15 | system_template = "./system_template.minijinja" 16 | 17 | [functions.simple_chat] 18 | type = "chat" 19 | 20 | [functions.simple_chat.variants.gpt_4o_mini] 21 | type = "chat_completion" 22 | model = "openai::gpt-4o-mini" 23 | weight = 0.5 24 | 25 | [functions.simple_chat.variants.claude_3_5_haiku] 26 | type = "chat_completion" 27 | model = "anthropic::claude-3-5-haiku-20241022" 28 | weight = 0.5 29 | 30 | # Object Storage Configuration for MinIO, simulating AWS S3 bucket 31 | [object_storage] 32 | type = "s3_compatible" 33 | endpoint = "http://minio:9000" 34 | bucket_name = "tensorzero" 35 | allow_http = true 36 | -------------------------------------------------------------------------------- /examples/workflows/chaining.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("Agent Chaining") 7 | 8 | 9 | @fast.agent( 10 | "url_fetcher", 11 | instruction="Given a URL, provide a complete and comprehensive summary", 12 | servers=["fetch"], 13 | ) 14 | @fast.agent( 15 | "social_media", 16 | instruction=""" 17 | Write a 280 character social media post for any given text. 18 | Respond only with the post, never use hashtags. 19 | """, 20 | ) 21 | @fast.chain( 22 | name="post_writer", 23 | sequence=["url_fetcher", "social_media"], 24 | ) 25 | async def main() -> None: 26 | async with fast.run() as agent: 27 | # using chain workflow 28 | await agent.post_writer.send("https://llmindset.co.uk") 29 | 30 | 31 | # alternative syntax for above is result = agent["post_writer"].send(message) 32 | # alternative syntax for above is result = agent["post_writer"].prompt() 33 | 34 | 35 | if __name__ == "__main__": 36 | asyncio.run(main()) 37 | -------------------------------------------------------------------------------- /examples/workflows/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # Please edit this configuration file to match your environment (on Windows). 2 | # Examples in comments below - check/change the paths. 3 | # 4 | # 5 | 6 | logger: 7 | type: file 8 | level: error 9 | truncate_tools: true 10 | 11 | mcp: 12 | servers: 13 | filesystem: 14 | # On windows update the command and arguments to use `node` and the absolute path to the server. 15 | # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally. 16 | # Use `npm -g root` to find the global node_modules path.` 17 | # command: "node" 18 | # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","."] 19 | command: "npx" 20 | args: ["-y", "@modelcontextprotocol/server-filesystem", "."] 21 | fetch: 22 | command: "uvx" 23 | args: ["mcp-server-fetch"] 24 | -------------------------------------------------------------------------------- /examples/workflows/human_input.py: -------------------------------------------------------------------------------- 1 | """ 2 | Agent which demonstrates Human Input tool 3 | """ 4 | 5 | import asyncio 6 | 7 | from mcp_agent.core.fastagent import FastAgent 8 | 9 | # Create the application 10 | fast = FastAgent("Human Input") 11 | 12 | 13 | # Define the agent 14 | @fast.agent( 15 | instruction="An AI agent that assists with basic tasks. Request Human Input when needed.", 16 | human_input=True, 17 | ) 18 | async def main() -> None: 19 | async with fast.run() as agent: 20 | # this usually causes the LLM to request the Human Input Tool 21 | await agent("print the next number in the sequence") 22 | await agent.prompt(default_prompt="STOP") 23 | 24 | 25 | if __name__ == "__main__": 26 | asyncio.run(main()) 27 | -------------------------------------------------------------------------------- /examples/workflows/parallel.py: -------------------------------------------------------------------------------- 1 | """ 2 | Parallel Workflow showing Fan Out and Fan In agents, using different models 3 | """ 4 | 5 | import asyncio 6 | from pathlib import Path 7 | 8 | from mcp_agent.core.fastagent import FastAgent 9 | from mcp_agent.core.prompt import Prompt 10 | 11 | # Create the application 12 | fast = FastAgent( 13 | "Parallel Workflow", 14 | ) 15 | 16 | 17 | @fast.agent( 18 | name="proofreader", 19 | instruction=""""Review the short story for grammar, spelling, and punctuation errors. 20 | Identify any awkward phrasing or structural issues that could improve clarity. 21 | Provide detailed feedback on corrections.""", 22 | ) 23 | @fast.agent( 24 | name="fact_checker", 25 | instruction="""Verify the factual consistency within the story. Identify any contradictions, 26 | logical inconsistencies, or inaccuracies in the plot, character actions, or setting. 27 | Highlight potential issues with reasoning or coherence.""", 28 | model="gpt-4.1", 29 | ) 30 | @fast.agent( 31 | name="style_enforcer", 32 | instruction="""Analyze the story for adherence to style guidelines. 33 | Evaluate the narrative flow, clarity of expression, and tone. Suggest improvements to 34 | enhance storytelling, readability, and engagement.""", 35 | model="sonnet", 36 | ) 37 | @fast.agent( 38 | name="grader", 39 | instruction="""Compile the feedback from the Proofreader, Fact Checker, and Style Enforcer 40 | into a structured report. Summarize key issues and categorize them by type. 41 | Provide actionable recommendations for improving the story, 42 | and give an overall grade based on the feedback.""", 43 | model="o3-mini.low", 44 | ) 45 | @fast.parallel( 46 | fan_out=["proofreader", "fact_checker", "style_enforcer"], 47 | fan_in="grader", 48 | name="parallel", 49 | ) 50 | async def main() -> None: 51 | async with fast.run() as agent: 52 | await agent.parallel.send( 53 | Prompt.user("Student short story submission", Path("short_story.txt")) 54 | ) 55 | 56 | 57 | if __name__ == "__main__": 58 | asyncio.run(main()) # type: ignore 59 | -------------------------------------------------------------------------------- /examples/workflows/router.py: -------------------------------------------------------------------------------- 1 | """ 2 | Example MCP Agent application showing router workflow with decorator syntax. 3 | Demonstrates router's ability to either: 4 | 1. Use tools directly to handle requests 5 | 2. Delegate requests to specialized agents 6 | """ 7 | 8 | import asyncio 9 | 10 | from mcp_agent.core.fastagent import FastAgent 11 | 12 | # Create the application 13 | fast = FastAgent( 14 | "Router Workflow", 15 | ) 16 | 17 | # Sample requests demonstrating direct tool use vs agent delegation 18 | SAMPLE_REQUESTS = [ 19 | "Download and summarize https://llmindset.co.uk/posts/2024/12/mcp-build-notes/", # Router handles directly with fetch 20 | "Analyze the quality of the Python codebase in the current working directory", # Delegated to code expert 21 | "What are the key principles of effective beekeeping?", # Delegated to general assistant 22 | ] 23 | 24 | 25 | @fast.agent( 26 | name="fetcher", 27 | instruction="""You are an agent, with a tool enabling you to fetch URLs.""", 28 | servers=["fetch"], 29 | ) 30 | @fast.agent( 31 | name="code_expert", 32 | instruction="""You are an expert in code analysis and software engineering. 33 | When asked about code, architecture, or development practices, 34 | you provide thorough and practical insights.""", 35 | servers=["filesystem"], 36 | ) 37 | @fast.agent( 38 | name="general_assistant", 39 | instruction="""You are a knowledgeable assistant that provides clear, 40 | well-reasoned responses about general topics, concepts, and principles.""", 41 | ) 42 | @fast.router( 43 | name="route", 44 | model="sonnet", 45 | agents=["code_expert", "general_assistant", "fetcher"], 46 | ) 47 | async def main() -> None: 48 | async with fast.run() as agent: 49 | for request in SAMPLE_REQUESTS: 50 | await agent.route(request) 51 | 52 | 53 | if __name__ == "__main__": 54 | asyncio.run(main()) 55 | -------------------------------------------------------------------------------- /examples/workflows/short_story.md: -------------------------------------------------------------------------------- 1 | The Kittens Castle Adventuer 2 | 3 | One sunny day, three lil kittens name Whiskers, Socks, and Mittens was walkin threw a mystirus forrest. They hadnt never seen such a big forrest before! The trees was tall an spooky, an the ground was coverd in moss an stikks. 4 | 5 | Suddenlee, thru the trees, they sawd somthing HUUUUGE! It was a castell, but not just eny castell. This castell was made of sparkling chese an glittery windos. The turrits was so high they tuch the clowds, an the doars was big enuff for a elefant to walk threw! 6 | 7 | "Lookk!" sed Whiskers, his tale all poofy wit exsitement. "We fowned a castell!" Socks meowed loudly an jumped up an down. Mittens, who was the smallist kitten, just stared wit her big rond eyes. 8 | 9 | They climed up the cheesy walls, slip-slidin on the smoth surfase. Inside, they discoverd rooms ful of soft pillows an dangling strings an shiny things that went JINGEL when they tuch them. It was like a kitten paradyse! 10 | 11 | But then, a big shadowy figur apeared... was it the castell gaurd? Or sumthing mor mystirus? The kittens hudeld togethar, there lil hearts beating fast. What wud happan next in there amazeing adventuer? 12 | 13 | THE END?? -------------------------------------------------------------------------------- /examples/workflows/short_story.txt: -------------------------------------------------------------------------------- 1 | The Battle of Glimmerwood 2 | 3 | In the heart of Glimmerwood, a mystical forest knowed for its radiant trees, a small village thrived. 4 | The villagers, who were live peacefully, shared their home with the forest's magical creatures, 5 | especially the Glimmerfoxes whose fur shimmer like moonlight. 6 | 7 | One fateful evening, the peace was shaterred when the infamous Dark Marauders attack. 8 | Lead by the cunning Captain Thorn, the bandits aim to steal the precious Glimmerstones which was believed to grant immortality. 9 | 10 | Amidst the choas, a young girl named Elara stood her ground, she rallied the villagers and devised a clever plan. 11 | Using the forests natural defenses they lured the marauders into a trap. 12 | As the bandits aproached the village square, a herd of Glimmerfoxes emerged, blinding them with their dazzling light, 13 | the villagers seized the opportunity to captured the invaders. 14 | 15 | Elara's bravery was celebrated and she was hailed as the "Guardian of Glimmerwood". 16 | The Glimmerstones were secured in a hidden grove protected by an ancient spell. 17 | 18 | However, not all was as it seemed. The Glimmerstones true power was never confirm, 19 | and whispers of a hidden agenda linger among the villagers. 20 | -------------------------------------------------------------------------------- /scripts/event_replay.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Event Replay Script 3 | 4 | Replays events from a JSONL log file using rich_progress display. 5 | """ 6 | 7 | import json 8 | import time 9 | from datetime import datetime 10 | from pathlib import Path 11 | 12 | import typer 13 | 14 | from mcp_agent.event_progress import convert_log_event 15 | from mcp_agent.logging.events import Event 16 | from mcp_agent.logging.rich_progress import RichProgressDisplay 17 | 18 | 19 | def load_events(path: Path) -> list[Event]: 20 | """Load events from JSONL file.""" 21 | events = [] 22 | with open(path) as f: 23 | for line in f: 24 | if line.strip(): 25 | raw_event = json.loads(line) 26 | # Convert from log format to event format 27 | event = Event( 28 | type=raw_event.get("level", "info").lower(), 29 | namespace=raw_event.get("namespace", ""), 30 | message=raw_event.get("message", ""), 31 | timestamp=datetime.fromisoformat(raw_event["timestamp"]), 32 | data=raw_event.get("data", {}), # Get data directly 33 | ) 34 | events.append(event) 35 | return events 36 | 37 | 38 | def main(log_file: str) -> None: 39 | """Replay MCP Agent events from a log file with progress display.""" 40 | # Load events from file 41 | events = load_events(Path(log_file)) 42 | 43 | # Initialize progress display 44 | progress = RichProgressDisplay() 45 | progress.start() 46 | 47 | try: 48 | # Process each event in sequence 49 | for event in events: 50 | progress_event = convert_log_event(event) 51 | if progress_event: 52 | # Add agent info to the progress event target from data 53 | progress.update(progress_event) 54 | # Add a small delay to make the replay visible 55 | time.sleep(1) 56 | except KeyboardInterrupt: 57 | pass 58 | finally: 59 | progress.stop() 60 | 61 | 62 | if __name__ == "__main__": 63 | typer.run(main) 64 | -------------------------------------------------------------------------------- /scripts/format.py: -------------------------------------------------------------------------------- 1 | # /// script 2 | # requires-python = ">=3.10" 3 | # dependencies = [ 4 | # "ruff", 5 | # "typer", 6 | # ] 7 | # /// 8 | 9 | import subprocess 10 | import sys 11 | 12 | import typer 13 | from rich import print 14 | 15 | 16 | def main(path: str = None) -> None: 17 | try: 18 | command = ["ruff", "format"] 19 | 20 | if path: 21 | command.append(path) 22 | 23 | # Run `ruff` and pipe output to the terminal 24 | process = subprocess.run( 25 | command, 26 | check=True, 27 | stdout=sys.stdout, # Redirect stdout to the terminal 28 | stderr=sys.stderr, # Redirect stderr to the terminal 29 | ) 30 | sys.exit(process.returncode) # Exit with the same code as the command 31 | except subprocess.CalledProcessError as e: 32 | print(f"Error: {e}") # Log the error in a user-friendly way 33 | sys.exit(e.returncode) # Exit with the error code from the command 34 | except FileNotFoundError: 35 | print("Error: `ruff` command not found. Make sure it's installed in the environment.") 36 | sys.exit(1) 37 | 38 | 39 | if __name__ == "__main__": 40 | typer.run(main) 41 | -------------------------------------------------------------------------------- /scripts/lint.py: -------------------------------------------------------------------------------- 1 | # /// script 2 | # requires-python = ">=3.10" 3 | # dependencies = [ 4 | # "ruff", 5 | # "typer", 6 | # ] 7 | # /// 8 | 9 | import subprocess 10 | import sys 11 | 12 | import typer 13 | from rich import print 14 | 15 | 16 | def main(fix: bool = False, watch: bool = False, path: str = None) -> None: 17 | try: 18 | command = ["ruff", "check"] 19 | if fix: 20 | command.append("--fix") 21 | 22 | if watch: 23 | command.append("--watch") 24 | 25 | if path: 26 | command.append(path) 27 | 28 | # Run `ruff` and pipe output to the terminal 29 | process = subprocess.run( 30 | command, 31 | check=True, 32 | stdout=sys.stdout, # Redirect stdout to the terminal 33 | stderr=sys.stderr, # Redirect stderr to the terminal 34 | ) 35 | sys.exit(process.returncode) # Exit with the same code as the command 36 | except subprocess.CalledProcessError as e: 37 | print(f"Error: {e}") # Log the error in a user-friendly way 38 | sys.exit(e.returncode) # Exit with the error code from the command 39 | except FileNotFoundError: 40 | print("Error: `ruff` command not found. Make sure it's installed in the environment.") 41 | sys.exit(1) 42 | 43 | 44 | if __name__ == "__main__": 45 | typer.run(main) 46 | -------------------------------------------------------------------------------- /scripts/test_package_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Build the package 4 | uv build 5 | 6 | # Extract version from the built wheel 7 | VERSION=$(ls dist/fast_agent_mcp-*.whl | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+' | head -1) 8 | 9 | # Create test folder 10 | TEST_DIR="dist/test_install" 11 | rm -rf "$TEST_DIR" 12 | mkdir -p "$TEST_DIR" 13 | cd "$TEST_DIR" 14 | 15 | # Create virtual environment 16 | uv venv .venv 17 | source .venv/bin/activate 18 | 19 | # Install the built package 20 | uv pip install ../../dist/fast_agent_mcp-$VERSION-py3-none-any.whl 21 | 22 | # Run the quickstart command 23 | fast-agent quickstart workflow 24 | 25 | # Check if workflows folder was created 26 | if [ -d "workflow" ]; then 27 | echo "✅ Test successful: workflows folder created!" 28 | else 29 | echo "❌ Test failed: workflows folder not created." 30 | exit 1 31 | fi 32 | 33 | 34 | # Run the quickstart command 35 | fast-agent quickstart state-transfer 36 | if [ -d "state-transfer" ]; then 37 | echo "✅ Test successful: state-transfer folder created!" 38 | else 39 | echo "❌ Test failed: state-transfer folder not created." 40 | exit 1 41 | fi 42 | 43 | # Deactivate the virtual environment 44 | deactivate 45 | 46 | echo "Test completed successfully!" 47 | 48 | -------------------------------------------------------------------------------- /src/mcp_agent/__init__.py: -------------------------------------------------------------------------------- 1 | """fast-agent - (fast-agent-mcp) An MCP native agent application framework""" 2 | 3 | # Import important MCP types 4 | from mcp.types import ( 5 | CallToolResult, 6 | EmbeddedResource, 7 | GetPromptResult, 8 | ImageContent, 9 | Prompt, 10 | PromptMessage, 11 | ReadResourceResult, 12 | Role, 13 | TextContent, 14 | Tool, 15 | ) 16 | 17 | # Core agent components 18 | from mcp_agent.agents.agent import Agent, AgentConfig 19 | from mcp_agent.core.agent_app import AgentApp 20 | 21 | # Workflow decorators 22 | from mcp_agent.core.direct_decorators import ( 23 | agent, 24 | chain, 25 | evaluator_optimizer, 26 | orchestrator, 27 | parallel, 28 | router, 29 | ) 30 | 31 | # FastAgent components 32 | from mcp_agent.core.fastagent import FastAgent 33 | 34 | # Request configuration 35 | from mcp_agent.core.request_params import RequestParams 36 | 37 | # Core protocol interfaces 38 | from mcp_agent.mcp.interfaces import AgentProtocol, AugmentedLLMProtocol 39 | from mcp_agent.mcp.mcp_aggregator import MCPAggregator 40 | from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart 41 | 42 | __all__ = [ 43 | # MCP types 44 | "Prompt", 45 | "Tool", 46 | "CallToolResult", 47 | "TextContent", 48 | "ImageContent", 49 | "PromptMessage", 50 | "GetPromptResult", 51 | "ReadResourceResult", 52 | "EmbeddedResource", 53 | "Role", 54 | # Core protocols 55 | "AgentProtocol", 56 | "AugmentedLLMProtocol", 57 | # Core agent components 58 | "Agent", 59 | "AgentConfig", 60 | "MCPAggregator", 61 | "PromptMessageMultipart", 62 | # FastAgent components 63 | "FastAgent", 64 | "AgentApp", 65 | # Workflow decorators 66 | "agent", 67 | "orchestrator", 68 | "router", 69 | "chain", 70 | "parallel", 71 | "evaluator_optimizer", 72 | # Request configuration 73 | "RequestParams", 74 | ] 75 | -------------------------------------------------------------------------------- /src/mcp_agent/agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/agents/__init__.py -------------------------------------------------------------------------------- /src/mcp_agent/agents/workflow/__init__.py: -------------------------------------------------------------------------------- 1 | # Workflow agents module 2 | -------------------------------------------------------------------------------- /src/mcp_agent/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/cli/__init__.py -------------------------------------------------------------------------------- /src/mcp_agent/cli/__main__.py: -------------------------------------------------------------------------------- 1 | from mcp_agent.cli.main import app 2 | 3 | # This must be here for the console entry points defined in pyproject.toml 4 | # DO NOT REMOVE! 5 | 6 | if __name__ == "__main__": 7 | app() 8 | -------------------------------------------------------------------------------- /src/mcp_agent/cli/terminal.py: -------------------------------------------------------------------------------- 1 | from mcp_agent.console import console, error_console 2 | 3 | 4 | class Application: 5 | def __init__(self, verbosity: int = 0, enable_color: bool = True) -> None: 6 | self.verbosity = verbosity 7 | # Use the central console instances, respecting color setting 8 | if not enable_color: 9 | # Create new instances without color if color is disabled 10 | self.console = console.__class__(color_system=None) 11 | self.error_console = error_console.__class__(color_system=None, stderr=True) 12 | else: 13 | self.console = console 14 | self.error_console = error_console 15 | 16 | def log(self, message: str, level: str = "info") -> None: 17 | if (level == "info" or (level == "debug" and self.verbosity > 0) or level == "error"): 18 | if level == "error": 19 | self.error_console.print(f"[{level.upper()}] {message}") 20 | else: 21 | self.console.print(f"[{level.upper()}] {message}") 22 | 23 | def status(self, message: str): 24 | return self.console.status(f"[bold cyan]{message}[/bold cyan]") 25 | -------------------------------------------------------------------------------- /src/mcp_agent/console.py: -------------------------------------------------------------------------------- 1 | """ 2 | Centralized console configuration for MCP Agent. 3 | 4 | This module provides shared console instances for consistent output handling: 5 | - console: Main console for general output 6 | - error_console: Error console for application errors (writes to stderr) 7 | - server_console: Special console for MCP server output 8 | """ 9 | 10 | from rich.console import Console 11 | 12 | # Main console for general output 13 | console = Console( 14 | color_system="auto", 15 | ) 16 | 17 | # Error console for application errors 18 | error_console = Console( 19 | stderr=True, 20 | style="bold red", 21 | ) 22 | 23 | # Special console for MCP server output 24 | # This could have custom styling to distinguish server messages 25 | server_console = Console( 26 | # Not stderr since we want to maintain output ordering with other messages 27 | style="dim blue", # Or whatever style makes server output distinct 28 | ) 29 | -------------------------------------------------------------------------------- /src/mcp_agent/context_dependent.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | from typing import TYPE_CHECKING, Any, Optional 3 | 4 | if TYPE_CHECKING: 5 | from mcp_agent.context import Context 6 | 7 | 8 | class ContextDependent: 9 | """ 10 | Mixin class for components that need context access. 11 | Provides both global fallback and instance-specific context support. 12 | """ 13 | 14 | def __init__(self, context: Optional["Context"] = None, **kwargs: dict[str, Any]) -> None: 15 | self._context = context 16 | super().__init__(**kwargs) 17 | 18 | @property 19 | def context(self) -> "Context": 20 | """ 21 | Get context, with graceful fallback to global context if needed. 22 | Raises clear error if no context is available. 23 | """ 24 | # First try instance context 25 | if self._context is not None: 26 | return self._context 27 | 28 | try: 29 | # Fall back to global context if available 30 | from mcp_agent.context import get_current_context 31 | 32 | return get_current_context() 33 | except Exception as e: 34 | raise RuntimeError( 35 | f"No context available for {self.__class__.__name__}. Either initialize MCPApp first or pass context explicitly." 36 | ) from e 37 | 38 | @contextmanager 39 | def use_context(self, context: "Context"): 40 | """Temporarily use a different context.""" 41 | old_context = self._context 42 | self._context = context 43 | try: 44 | yield 45 | finally: 46 | self._context = old_context 47 | -------------------------------------------------------------------------------- /src/mcp_agent/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/core/__init__.py -------------------------------------------------------------------------------- /src/mcp_agent/core/agent_types.py: -------------------------------------------------------------------------------- 1 | """ 2 | Type definitions for agents and agent configurations. 3 | """ 4 | 5 | from enum import Enum 6 | from typing import List 7 | 8 | from pydantic import BaseModel, Field, model_validator 9 | 10 | # Forward imports to avoid circular dependencies 11 | from mcp_agent.core.request_params import RequestParams 12 | 13 | 14 | class AgentType(Enum): 15 | """Enumeration of supported agent types.""" 16 | 17 | BASIC = "agent" 18 | CUSTOM = "custom" 19 | ORCHESTRATOR = "orchestrator" 20 | PARALLEL = "parallel" 21 | EVALUATOR_OPTIMIZER = "evaluator_optimizer" 22 | ROUTER = "router" 23 | CHAIN = "chain" 24 | 25 | 26 | class AgentConfig(BaseModel): 27 | """Configuration for an Agent instance""" 28 | 29 | name: str 30 | instruction: str = "You are a helpful agent." 31 | servers: List[str] = Field(default_factory=list) 32 | model: str | None = None 33 | use_history: bool = True 34 | default_request_params: RequestParams | None = None 35 | human_input: bool = False 36 | agent_type: AgentType = AgentType.BASIC 37 | default: bool = False 38 | 39 | @model_validator(mode="after") 40 | def ensure_default_request_params(self) -> "AgentConfig": 41 | """Ensure default_request_params exists with proper history setting""" 42 | if self.default_request_params is None: 43 | self.default_request_params = RequestParams( 44 | use_history=self.use_history, systemPrompt=self.instruction 45 | ) 46 | else: 47 | # Override the request params history setting if explicitly configured 48 | self.default_request_params.use_history = self.use_history 49 | return self 50 | -------------------------------------------------------------------------------- /src/mcp_agent/core/error_handling.py: -------------------------------------------------------------------------------- 1 | """ 2 | Error handling utilities for agent operations. 3 | """ 4 | 5 | from rich import print 6 | 7 | 8 | def handle_error(e: Exception, error_type: str, suggestion: str = None) -> None: 9 | """ 10 | Handle errors with consistent formatting and messaging. 11 | 12 | Args: 13 | e: The exception that was raised 14 | error_type: Type of error to display 15 | suggestion: Optional suggestion message to display 16 | """ 17 | print(f"\n[bold red]{error_type}:") 18 | print(getattr(e, "message", str(e))) 19 | if hasattr(e, "details") and e.details: 20 | print("\nDetails:") 21 | print(e.details) 22 | if suggestion: 23 | print(f"\n{suggestion}") 24 | -------------------------------------------------------------------------------- /src/mcp_agent/core/request_params.py: -------------------------------------------------------------------------------- 1 | """ 2 | Request parameters definitions for LLM interactions. 3 | """ 4 | 5 | from typing import Any, Dict, List 6 | 7 | from mcp import SamplingMessage 8 | from mcp.types import CreateMessageRequestParams 9 | from pydantic import Field 10 | 11 | 12 | class RequestParams(CreateMessageRequestParams): 13 | """ 14 | Parameters to configure the AugmentedLLM 'generate' requests. 15 | """ 16 | 17 | messages: List[SamplingMessage] = Field(exclude=True, default=[]) 18 | """ 19 | Ignored. 'messages' are removed from CreateMessageRequestParams 20 | to avoid confusion with the 'message' parameter on 'generate' method. 21 | """ 22 | 23 | maxTokens: int = 2048 24 | """The maximum number of tokens to sample, as requested by the server.""" 25 | 26 | model: str | None = None 27 | """ 28 | The model to use for the LLM generation. This can only be set during Agent creation. 29 | If specified, this overrides the 'modelPreferences' selection criteria. 30 | """ 31 | 32 | use_history: bool = True 33 | """ 34 | Agent/LLM maintains conversation history. Does not include applied Prompts 35 | """ 36 | 37 | max_iterations: int = 20 38 | """ 39 | The maximum number of tool calls allowed in a conversation turn 40 | """ 41 | 42 | parallel_tool_calls: bool = True 43 | """ 44 | Whether to allow simultaneous tool calls 45 | """ 46 | response_format: Any | None = None 47 | """ 48 | Override response format for structured calls. Prefer sending pydantic model - only use in exceptional circumstances 49 | """ 50 | 51 | template_vars: Dict[str, Any] = Field(default_factory=dict) 52 | """ 53 | Optional dictionary of template variables for dynamic templates. Currently only works for TensorZero inference backend 54 | """ 55 | -------------------------------------------------------------------------------- /src/mcp_agent/executor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/executor/__init__.py -------------------------------------------------------------------------------- /src/mcp_agent/executor/task_registry.py: -------------------------------------------------------------------------------- 1 | """ 2 | Keep track of all activities/tasks that the executor needs to run. 3 | This is used by the workflow engine to dynamically orchestrate a workflow graph. 4 | The user just writes standard functions annotated with @workflow_task, but behind the scenes a workflow graph is built. 5 | """ 6 | 7 | from typing import Any, Callable, Dict, List 8 | 9 | 10 | class ActivityRegistry: 11 | """Centralized task/activity management with validation and metadata.""" 12 | 13 | def __init__(self) -> None: 14 | self._activities: Dict[str, Callable] = {} 15 | self._metadata: Dict[str, Dict[str, Any]] = {} 16 | 17 | def register(self, name: str, func: Callable, metadata: Dict[str, Any] | None = None) -> None: 18 | if name in self._activities: 19 | raise ValueError(f"Activity '{name}' is already registered.") 20 | self._activities[name] = func 21 | self._metadata[name] = metadata or {} 22 | 23 | def get_activity(self, name: str) -> Callable: 24 | if name not in self._activities: 25 | raise KeyError(f"Activity '{name}' not found.") 26 | return self._activities[name] 27 | 28 | def get_metadata(self, name: str) -> Dict[str, Any]: 29 | return self._metadata.get(name, {}) 30 | 31 | def list_activities(self) -> List[str]: 32 | return list(self._activities.keys()) 33 | -------------------------------------------------------------------------------- /src/mcp_agent/human_input/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/human_input/__init__.py -------------------------------------------------------------------------------- /src/mcp_agent/human_input/types.py: -------------------------------------------------------------------------------- 1 | from typing import Any, AsyncIterator, Protocol 2 | 3 | from pydantic import BaseModel 4 | 5 | HUMAN_INPUT_SIGNAL_NAME = "__human_input__" 6 | 7 | 8 | class HumanInputRequest(BaseModel): 9 | """Represents a request for human input.""" 10 | 11 | prompt: str 12 | """The prompt to show to the user""" 13 | 14 | description: str | None = None 15 | """Optional description of what the input is for""" 16 | 17 | request_id: str | None = None 18 | """Unique identifier for this request""" 19 | 20 | workflow_id: str | None = None 21 | """Optional workflow ID if using workflow engine""" 22 | 23 | timeout_seconds: int | None = None 24 | """Optional timeout in seconds""" 25 | 26 | metadata: dict | None = None 27 | """Additional request payload""" 28 | 29 | 30 | class HumanInputResponse(BaseModel): 31 | """Represents a response to a human input request""" 32 | 33 | request_id: str 34 | """ID of the original request""" 35 | 36 | response: str 37 | """The input provided by the human""" 38 | 39 | metadata: dict[str, Any] | None = None 40 | """Additional response payload""" 41 | 42 | 43 | class HumanInputCallback(Protocol): 44 | """Protocol for callbacks that handle human input requests.""" 45 | 46 | async def __call__(self, request: HumanInputRequest) -> AsyncIterator[HumanInputResponse]: 47 | """ 48 | Handle a human input request. 49 | 50 | Args: 51 | request: The input request to handle 52 | 53 | Returns: 54 | AsyncIterator yielding responses as they come in 55 | TODO: saqadri - Keep it simple and just return HumanInputResponse? 56 | """ 57 | ... 58 | -------------------------------------------------------------------------------- /src/mcp_agent/llm/__init__.py: -------------------------------------------------------------------------------- 1 | # LLM module 2 | # Contains code for working with large language models 3 | -------------------------------------------------------------------------------- /src/mcp_agent/llm/augmented_llm_slow.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Any, List, Optional, Union 3 | 4 | from mcp_agent.llm.augmented_llm import ( 5 | MessageParamT, 6 | RequestParams, 7 | ) 8 | from mcp_agent.llm.augmented_llm_passthrough import PassthroughLLM 9 | from mcp_agent.llm.provider_types import Provider 10 | from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart 11 | 12 | 13 | class SlowLLM(PassthroughLLM): 14 | """ 15 | A specialized LLM implementation that sleeps for 3 seconds before responding like PassthroughLLM. 16 | 17 | This is useful for testing scenarios where you want to simulate slow responses 18 | or for debugging timing-related issues in parallel workflows. 19 | """ 20 | 21 | def __init__( 22 | self, provider=Provider.FAST_AGENT, name: str = "Slow", **kwargs: dict[str, Any] 23 | ) -> None: 24 | super().__init__(name=name, provider=provider, **kwargs) 25 | 26 | async def generate_str( 27 | self, 28 | message: Union[str, MessageParamT, List[MessageParamT]], 29 | request_params: Optional[RequestParams] = None, 30 | ) -> str: 31 | """Sleep for 3 seconds then return the input message as a string.""" 32 | await asyncio.sleep(3) 33 | return await super().generate_str(message, request_params) 34 | 35 | async def _apply_prompt_provider_specific( 36 | self, 37 | multipart_messages: List["PromptMessageMultipart"], 38 | request_params: RequestParams | None = None, 39 | ) -> PromptMessageMultipart: 40 | """Sleep for 3 seconds then apply prompt like PassthroughLLM.""" 41 | await asyncio.sleep(3) 42 | return await super()._apply_prompt_provider_specific(multipart_messages, request_params) 43 | -------------------------------------------------------------------------------- /src/mcp_agent/llm/provider_types.py: -------------------------------------------------------------------------------- 1 | """ 2 | Type definitions for LLM providers. 3 | """ 4 | 5 | from enum import Enum 6 | 7 | 8 | class Provider(Enum): 9 | """Supported LLM providers""" 10 | 11 | ANTHROPIC = "anthropic" 12 | DEEPSEEK = "deepseek" 13 | FAST_AGENT = "fast-agent" 14 | GENERIC = "generic" 15 | GOOGLE_OAI = "googleoai" # For Google through OpenAI libraries 16 | GOOGLE = "google" # For Google GenAI native library 17 | OPENAI = "openai" 18 | OPENROUTER = "openrouter" 19 | TENSORZERO = "tensorzero" # For TensorZero Gateway 20 | AZURE = "azure" # Azure OpenAI Service 21 | ALIYUN = "aliyun" # Aliyun Bailian OpenAI Service 22 | HUGGINGFACE = "huggingface" # For HuggingFace MCP connections 23 | -------------------------------------------------------------------------------- /src/mcp_agent/llm/providers/__init__.py: -------------------------------------------------------------------------------- 1 | from mcp_agent.llm.providers.sampling_converter_anthropic import ( 2 | AnthropicSamplingConverter, 3 | ) 4 | from mcp_agent.llm.providers.sampling_converter_openai import ( 5 | OpenAISamplingConverter, 6 | ) 7 | 8 | __all__ = ["AnthropicSamplingConverter", "OpenAISamplingConverter"] 9 | -------------------------------------------------------------------------------- /src/mcp_agent/llm/providers/augmented_llm_aliyun.py: -------------------------------------------------------------------------------- 1 | from mcp_agent.core.request_params import RequestParams 2 | from mcp_agent.llm.provider_types import Provider 3 | from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM 4 | 5 | ALIYUN_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1" 6 | DEFAULT_QWEN_MODEL = "qwen-turbo" 7 | 8 | 9 | class AliyunAugmentedLLM(OpenAIAugmentedLLM): 10 | def __init__(self, *args, **kwargs) -> None: 11 | super().__init__(*args, provider=Provider.ALIYUN, **kwargs) 12 | 13 | def _initialize_default_params(self, kwargs: dict) -> RequestParams: 14 | """Initialize Aliyun-specific default parameters""" 15 | chosen_model = kwargs.get("model", DEFAULT_QWEN_MODEL) 16 | 17 | return RequestParams( 18 | model=chosen_model, 19 | systemPrompt=self.instruction, 20 | parallel_tool_calls=True, 21 | max_iterations=10, 22 | use_history=True, 23 | ) 24 | 25 | def _base_url(self) -> str: 26 | base_url = None 27 | if self.context.config and self.context.config.aliyun: 28 | base_url = self.context.config.aliyun.base_url 29 | 30 | return base_url if base_url else ALIYUN_BASE_URL 31 | -------------------------------------------------------------------------------- /src/mcp_agent/llm/providers/augmented_llm_generic.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from mcp_agent.core.request_params import RequestParams 4 | from mcp_agent.llm.provider_types import Provider 5 | from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM 6 | 7 | DEFAULT_OLLAMA_BASE_URL = "http://localhost:11434/v1" 8 | DEFAULT_OLLAMA_MODEL = "llama3.2:latest" 9 | DEFAULT_OLLAMA_API_KEY = "ollama" 10 | 11 | 12 | class GenericAugmentedLLM(OpenAIAugmentedLLM): 13 | def __init__(self, *args, **kwargs) -> None: 14 | super().__init__( 15 | *args, provider=Provider.GENERIC, **kwargs 16 | ) # Properly pass args and kwargs to parent 17 | 18 | def _initialize_default_params(self, kwargs: dict) -> RequestParams: 19 | """Initialize Generic parameters""" 20 | chosen_model = kwargs.get("model", DEFAULT_OLLAMA_MODEL) 21 | 22 | return RequestParams( 23 | model=chosen_model, 24 | systemPrompt=self.instruction, 25 | parallel_tool_calls=True, 26 | max_iterations=10, 27 | use_history=True, 28 | ) 29 | 30 | def _base_url(self) -> str: 31 | base_url = os.getenv("GENERIC_BASE_URL", DEFAULT_OLLAMA_BASE_URL) 32 | if self.context.config and self.context.config.generic: 33 | base_url = self.context.config.generic.base_url 34 | 35 | return base_url 36 | -------------------------------------------------------------------------------- /src/mcp_agent/llm/providers/augmented_llm_google_oai.py: -------------------------------------------------------------------------------- 1 | from mcp_agent.core.request_params import RequestParams 2 | from mcp_agent.llm.provider_types import Provider 3 | from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM 4 | 5 | GOOGLE_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai" 6 | DEFAULT_GOOGLE_MODEL = "gemini-2.0-flash" 7 | 8 | 9 | class GoogleOaiAugmentedLLM(OpenAIAugmentedLLM): 10 | def __init__(self, *args, **kwargs) -> None: 11 | super().__init__(*args, provider=Provider.GOOGLE_OAI, **kwargs) 12 | 13 | def _initialize_default_params(self, kwargs: dict) -> RequestParams: 14 | """Initialize Google OpenAI Compatibility default parameters""" 15 | chosen_model = kwargs.get("model", DEFAULT_GOOGLE_MODEL) 16 | 17 | return RequestParams( 18 | model=chosen_model, 19 | systemPrompt=self.instruction, 20 | parallel_tool_calls=False, 21 | max_iterations=10, 22 | use_history=True, 23 | ) 24 | 25 | def _base_url(self) -> str: 26 | base_url = None 27 | if self.context.config and self.context.config.google: 28 | base_url = self.context.config.google.base_url 29 | 30 | return base_url if base_url else GOOGLE_BASE_URL 31 | -------------------------------------------------------------------------------- /src/mcp_agent/llm/providers/openai_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions for OpenAI integration with MCP. 3 | 4 | This file provides backward compatibility with the existing API while 5 | delegating to the proper implementations in the providers/ directory. 6 | """ 7 | 8 | from typing import Any, Dict, Union 9 | 10 | from openai.types.chat import ( 11 | ChatCompletionMessage, 12 | ChatCompletionMessageParam, 13 | ) 14 | 15 | from mcp_agent.llm.providers.multipart_converter_openai import OpenAIConverter 16 | from mcp_agent.llm.providers.openai_multipart import ( 17 | openai_to_multipart, 18 | ) 19 | from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart 20 | 21 | 22 | def openai_message_to_prompt_message_multipart( 23 | message: Union[ChatCompletionMessage, Dict[str, Any]], 24 | ) -> PromptMessageMultipart: 25 | """ 26 | Convert an OpenAI ChatCompletionMessage to a PromptMessageMultipart. 27 | 28 | Args: 29 | message: The OpenAI message to convert (can be an actual ChatCompletionMessage 30 | or a dictionary with the same structure) 31 | 32 | Returns: 33 | A PromptMessageMultipart representation 34 | """ 35 | return openai_to_multipart(message) 36 | 37 | 38 | def openai_message_param_to_prompt_message_multipart( 39 | message_param: ChatCompletionMessageParam, 40 | ) -> PromptMessageMultipart: 41 | """ 42 | Convert an OpenAI ChatCompletionMessageParam to a PromptMessageMultipart. 43 | 44 | Args: 45 | message_param: The OpenAI message param to convert 46 | 47 | Returns: 48 | A PromptMessageMultipart representation 49 | """ 50 | return openai_to_multipart(message_param) 51 | 52 | 53 | def prompt_message_multipart_to_openai_message_param( 54 | multipart: PromptMessageMultipart, 55 | ) -> ChatCompletionMessageParam: 56 | """ 57 | Convert a PromptMessageMultipart to an OpenAI ChatCompletionMessageParam. 58 | 59 | Args: 60 | multipart: The PromptMessageMultipart to convert 61 | 62 | Returns: 63 | An OpenAI ChatCompletionMessageParam representation 64 | """ 65 | return OpenAIConverter.convert_to_openai(multipart) 66 | -------------------------------------------------------------------------------- /src/mcp_agent/llm/providers/sampling_converter_anthropic.py: -------------------------------------------------------------------------------- 1 | from anthropic.types import ( 2 | Message, 3 | MessageParam, 4 | ) 5 | from mcp import StopReason 6 | from mcp.types import ( 7 | PromptMessage, 8 | ) 9 | 10 | from mcp_agent.llm.providers.multipart_converter_anthropic import ( 11 | AnthropicConverter, 12 | ) 13 | from mcp_agent.llm.sampling_format_converter import ProviderFormatConverter 14 | from mcp_agent.logging.logger import get_logger 15 | 16 | _logger = get_logger(__name__) 17 | 18 | 19 | class AnthropicSamplingConverter(ProviderFormatConverter[MessageParam, Message]): 20 | """ 21 | Convert between Anthropic and MCP types. 22 | """ 23 | 24 | @classmethod 25 | def from_prompt_message(cls, message: PromptMessage) -> MessageParam: 26 | """Convert an MCP PromptMessage to an Anthropic MessageParam.""" 27 | return AnthropicConverter.convert_prompt_message_to_anthropic(message) 28 | 29 | 30 | def mcp_stop_reason_to_anthropic_stop_reason(stop_reason: StopReason): 31 | if not stop_reason: 32 | return None 33 | elif stop_reason == "endTurn": 34 | return "end_turn" 35 | elif stop_reason == "maxTokens": 36 | return "max_tokens" 37 | elif stop_reason == "stopSequence": 38 | return "stop_sequence" 39 | elif stop_reason == "toolUse": 40 | return "tool_use" 41 | else: 42 | return stop_reason 43 | 44 | 45 | def anthropic_stop_reason_to_mcp_stop_reason(stop_reason: str) -> StopReason: 46 | if not stop_reason: 47 | return "end_turn" 48 | elif stop_reason == "end_turn": 49 | return "endTurn" 50 | elif stop_reason == "max_tokens": 51 | return "maxTokens" 52 | elif stop_reason == "stop_sequence": 53 | return "stopSequence" 54 | elif stop_reason == "tool_use": 55 | return "toolUse" 56 | else: 57 | return stop_reason 58 | -------------------------------------------------------------------------------- /src/mcp_agent/llm/providers/sampling_converter_openai.py: -------------------------------------------------------------------------------- 1 | 2 | from mcp.types import ( 3 | PromptMessage, 4 | ) 5 | from openai.types.chat import ChatCompletionMessage, ChatCompletionMessageParam 6 | 7 | from mcp_agent.llm.sampling_format_converter import ( 8 | ProviderFormatConverter, 9 | ) 10 | from mcp_agent.logging.logger import get_logger 11 | 12 | _logger = get_logger(__name__) 13 | 14 | 15 | class OpenAISamplingConverter( 16 | ProviderFormatConverter[ChatCompletionMessageParam, ChatCompletionMessage] 17 | ): 18 | @classmethod 19 | def from_prompt_message(cls, message: PromptMessage) -> ChatCompletionMessageParam: 20 | """Convert an MCP PromptMessage to an OpenAI message dict.""" 21 | from mcp_agent.llm.providers.multipart_converter_openai import ( 22 | OpenAIConverter, 23 | ) 24 | 25 | # Use the full-featured OpenAI converter for consistent handling 26 | return OpenAIConverter.convert_prompt_message_to_openai(message) 27 | -------------------------------------------------------------------------------- /src/mcp_agent/llm/sampling_format_converter.py: -------------------------------------------------------------------------------- 1 | from typing import Generic, List, Protocol, TypeVar 2 | 3 | from mcp.types import PromptMessage 4 | 5 | from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart 6 | 7 | # Define covariant type variables 8 | MessageParamT_co = TypeVar("MessageParamT_co", covariant=True) 9 | MessageT_co = TypeVar("MessageT_co", covariant=True) 10 | 11 | 12 | class ProviderFormatConverter(Protocol, Generic[MessageParamT_co, MessageT_co]): 13 | """Conversions between LLM provider and MCP types""" 14 | 15 | @classmethod 16 | def from_prompt_message(cls, message: PromptMessage) -> MessageParamT_co: 17 | """Convert an MCP PromptMessage to a provider-specific message parameter.""" 18 | ... 19 | 20 | @classmethod 21 | def from_mutlipart_prompts( 22 | cls, messages: List[PromptMessageMultipart] 23 | ) -> List[MessageParamT_co]: 24 | """Convert a list of PromptMessageMultiparts to a list of provider-specific implementations""" 25 | ... 26 | 27 | 28 | class BasicFormatConverter(ProviderFormatConverter[PromptMessage, PromptMessage]): 29 | @classmethod 30 | def from_prompt_message(cls, message: PromptMessage) -> PromptMessage: 31 | return message 32 | 33 | @classmethod 34 | def from_multipart_prompts( 35 | cls, messages: List[PromptMessageMultipart] 36 | ) -> List[PromptMessageMultipart]: 37 | return messages 38 | -------------------------------------------------------------------------------- /src/mcp_agent/logging/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/logging/__init__.py -------------------------------------------------------------------------------- /src/mcp_agent/mcp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/mcp/__init__.py -------------------------------------------------------------------------------- /src/mcp_agent/mcp/common.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common constants and utilities shared between modules to avoid circular imports. 3 | """ 4 | 5 | # Constants 6 | SEP = "-" 7 | 8 | 9 | def create_namespaced_name(server_name: str, resource_name: str) -> str: 10 | """Create a namespaced resource name from server and resource names""" 11 | return f"{server_name}{SEP}{resource_name}"[:64] 12 | 13 | 14 | def is_namespaced_name(name: str) -> bool: 15 | """Check if a name is already namespaced""" 16 | return SEP in name 17 | -------------------------------------------------------------------------------- /src/mcp_agent/mcp/helpers/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helper modules for working with MCP content. 3 | """ -------------------------------------------------------------------------------- /src/mcp_agent/mcp/helpers/server_config_helpers.py: -------------------------------------------------------------------------------- 1 | """Helper functions for type-safe server config access.""" 2 | 3 | from typing import TYPE_CHECKING, Optional 4 | 5 | from mcp import ClientSession 6 | 7 | if TYPE_CHECKING: 8 | from mcp_agent.config import MCPServerSettings 9 | 10 | 11 | def get_server_config(ctx: ClientSession) -> Optional["MCPServerSettings"]: 12 | """Extract server config from context if available. 13 | 14 | Type guard helper that safely accesses server_config with proper type checking. 15 | """ 16 | # Import here to avoid circular import 17 | from mcp_agent.mcp.mcp_agent_client_session import MCPAgentClientSession 18 | 19 | if (hasattr(ctx, "session") and 20 | isinstance(ctx.session, MCPAgentClientSession) and 21 | ctx.session.server_config): 22 | return ctx.session.server_config 23 | return None -------------------------------------------------------------------------------- /src/mcp_agent/mcp/mime_utils.py: -------------------------------------------------------------------------------- 1 | # mime_utils.py 2 | 3 | import mimetypes 4 | 5 | # Initialize mimetypes database 6 | mimetypes.init() 7 | 8 | # Extend with additional types that might be missing 9 | mimetypes.add_type("text/x-python", ".py") 10 | mimetypes.add_type("image/webp", ".webp") 11 | 12 | # Known text-based MIME types not starting with "text/" 13 | TEXT_MIME_TYPES = { 14 | "application/json", 15 | "application/javascript", 16 | "application/xml", 17 | "application/ld+json", 18 | "application/xhtml+xml", 19 | "application/x-httpd-php", 20 | "application/x-sh", 21 | "application/ecmascript", 22 | "application/graphql", 23 | "application/x-www-form-urlencoded", 24 | "application/yaml", 25 | "application/toml", 26 | "application/x-python-code", 27 | "application/vnd.api+json", 28 | } 29 | 30 | # Common text-based MIME type patterns 31 | TEXT_MIME_PATTERNS = ("+xml", "+json", "+yaml", "+text") 32 | 33 | 34 | def guess_mime_type(file_path: str) -> str: 35 | """ 36 | Guess the MIME type of a file based on its extension. 37 | """ 38 | mime_type, _ = mimetypes.guess_type(file_path) 39 | return mime_type or "application/octet-stream" 40 | 41 | 42 | def is_text_mime_type(mime_type: str) -> bool: 43 | """Determine if a MIME type represents text content.""" 44 | if not mime_type: 45 | return False 46 | 47 | # Standard text types 48 | if mime_type.startswith("text/"): 49 | return True 50 | 51 | # Known text types 52 | if mime_type in TEXT_MIME_TYPES: 53 | return True 54 | 55 | # Common text patterns 56 | if any(mime_type.endswith(pattern) for pattern in TEXT_MIME_PATTERNS): 57 | return True 58 | 59 | return False 60 | 61 | 62 | def is_binary_content(mime_type: str) -> bool: 63 | """Check if content should be treated as binary.""" 64 | return not is_text_mime_type(mime_type) 65 | 66 | 67 | def is_image_mime_type(mime_type: str) -> bool: 68 | """Check if a MIME type represents an image.""" 69 | return mime_type.startswith("image/") and mime_type != "image/svg+xml" 70 | -------------------------------------------------------------------------------- /src/mcp_agent/mcp/prompts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/mcp/prompts/__init__.py -------------------------------------------------------------------------------- /src/mcp_agent/mcp/prompts/__main__.py: -------------------------------------------------------------------------------- 1 | from mcp_agent.mcp.prompts.prompt_server import main 2 | 3 | # This must be here for the console entry points defined in pyproject.toml 4 | # DO NOT REMOVE! 5 | 6 | # For the entry point in pyproject.toml 7 | app = main 8 | 9 | if __name__ == "__main__": 10 | main() 11 | -------------------------------------------------------------------------------- /src/mcp_agent/mcp/prompts/prompt_constants.py: -------------------------------------------------------------------------------- 1 | """ 2 | Constants for the prompt system. 3 | 4 | This module defines constants used throughout the prompt system, including 5 | delimiters for parsing prompt files and serializing prompt messages. 6 | """ 7 | 8 | # Standard delimiters used for prompt template parsing and serialization 9 | USER_DELIMITER = "---USER" 10 | ASSISTANT_DELIMITER = "---ASSISTANT" 11 | RESOURCE_DELIMITER = "---RESOURCE" 12 | 13 | # Default delimiter mapping used by PromptTemplate and PromptTemplateLoader 14 | DEFAULT_DELIMITER_MAP = { 15 | USER_DELIMITER: "user", 16 | ASSISTANT_DELIMITER: "assistant", 17 | RESOURCE_DELIMITER: "resource", 18 | } 19 | -------------------------------------------------------------------------------- /src/mcp_agent/mcp_server/__init__.py: -------------------------------------------------------------------------------- 1 | # Import and re-export AgentMCPServer to avoid circular imports 2 | from mcp_agent.mcp_server.agent_server import AgentMCPServer 3 | 4 | __all__ = ["AgentMCPServer"] 5 | -------------------------------------------------------------------------------- /src/mcp_agent/progress_display.py: -------------------------------------------------------------------------------- 1 | """ 2 | Centralized progress display configuration for MCP Agent. 3 | Provides a shared progress display instance for consistent progress handling. 4 | """ 5 | 6 | from mcp_agent.console import console 7 | from mcp_agent.logging.rich_progress import RichProgressDisplay 8 | 9 | # Main progress display instance - shared across the application 10 | progress_display = RichProgressDisplay(console) 11 | -------------------------------------------------------------------------------- /src/mcp_agent/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/py.typed -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | default_model: sonnet 2 | 3 | # on windows, adjust the mount point to be the full path e.g. x:/temp/data-analysis/mount-point:/mnt/data/ 4 | 5 | mcp: 6 | servers: 7 | interpreter: 8 | command: "docker" 9 | args: 10 | [ 11 | "run", 12 | "-i", 13 | "--rm", 14 | "--pull=always", 15 | "-v", 16 | "./mount-point:/mnt/data/", 17 | "ghcr.io/evalstate/mcp-py-repl:latest", 18 | ] 19 | roots: 20 | - uri: "file://./mount-point/" 21 | name: "test_data" 22 | server_uri_alias: "file:///mnt/data/" 23 | filesystem: 24 | # On windows update the command and arguments to use `node` and the absolute path to the server. 25 | # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally. 26 | # Use `npm -g root` to find the global node_modules path.` 27 | # command: "node" 28 | # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","."] 29 | command: "npx" 30 | args: ["-y", "@modelcontextprotocol/server-filesystem", "./mount-point/"] 31 | fetch: 32 | command: "uvx" 33 | args: ["mcp-server-fetch"] 34 | brave: 35 | # On windows replace the command and args line to use `node` and the absolute path to the server. 36 | # Use `npm i -g @modelcontextprotocol/server-brave-search` to install the server globally. 37 | # Use `npm -g root` to find the global node_modules path.` 38 | # command: "node" 39 | # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"] 40 | command: "npx" 41 | args: ["-y", "@modelcontextprotocol/server-brave-search"] 42 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/in_dev/css-LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021-2024 Paulo Cunha 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/in_dev/slides.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | Here is a MARP theme known as "freud.css". You are to use these templates when asked 3 | by the User to produce a presentation. You are able to include HTML markdown and style 4 | adjustments within the MARP file to enhance the overall appearance of the presentation. 5 | Pay attention to font sizes and layouts - make sure that content does not spill over 6 | the slide canvas. 7 | ---RESOURCE 8 | schema.css 9 | ---RESOURCE 10 | freud.css 11 | ---RESOURCE 12 | structure.css 13 | ---ASSISTANT 14 | I understand, I will refer to the freud.css template and it's dependencies (schema.css and structure.css) when 15 | producing MARP presentations. 16 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/internal/agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("FastAgent Example") 7 | 8 | 9 | # Define the agent 10 | @fast.agent(servers=["category", "mcp_hfspace","mcp_webcam"]) 11 | #@fast.agent(name="test") 12 | async def main() -> None: 13 | # use the --model command line switch or agent arguments to change model 14 | async with fast.run() as agent: 15 | # await agent.prompt(agent_name="test") 16 | await agent() 17 | 18 | 19 | if __name__ == "__main__": 20 | asyncio.run(main()) 21 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/internal/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | default_model: sonnet 2 | 3 | # on windows, adjust the mount point to be the full path e.g. x:/temp/data-analysis/mount-point:/mnt/data/ 4 | 5 | # logger: 6 | # level: "debug" 7 | # type: "console" 8 | 9 | mcp: 10 | servers: 11 | interpreter: 12 | command: "docker" 13 | args: 14 | [ 15 | "run", 16 | "-i", 17 | "--rm", 18 | "--pull=always", 19 | "-v", 20 | "./mount-point:/mnt/data/", 21 | "ghcr.io/evalstate/mcp-py-repl:latest", 22 | ] 23 | roots: 24 | - uri: "file://./mount-point/" 25 | name: "test_data" 26 | server_uri_alias: "file:///mnt/data/" 27 | filesystem: 28 | # On windows update the command and arguments to use `node` and the absolute path to the server. 29 | # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally. 30 | # Use `npm -g root` to find the global node_modules path.` 31 | # command: "node" 32 | # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","."] 33 | command: "npx" 34 | args: 35 | [ 36 | "-y", 37 | "@modelcontextprotocol/server-filesystem", 38 | "src/mcp_agent/resources/examples/data-analysis/mount-point/", 39 | ] 40 | fetch: 41 | command: "uvx" 42 | args: ["mcp-server-fetch"] 43 | brave: 44 | # On windows replace the command and args line to use `node` and the absolute path to the server. 45 | # Use `npm i -g @modelcontextprotocol/server-brave-search` to install the server globally. 46 | # Use `npm -g root` to find the global node_modules path.` 47 | # command: "node" 48 | # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"] 49 | command: "npx" 50 | args: ["-y", "@modelcontextprotocol/server-brave-search"] 51 | sizing_setup: 52 | command: "uv" 53 | args: ["run", "prompt_sizing1.py"] 54 | 55 | category: 56 | command: "prompt-server" 57 | args: ["simple.txt"] 58 | 59 | mcp_hfspace: 60 | command: "npx" 61 | args: ["@llmindset/mcp-hfspace"] 62 | 63 | mcp_webcam: 64 | command: "npx" 65 | args: ["@llmindset/mcp-webcam"] 66 | 67 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/internal/history_transfer.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("FastAgent Example") 7 | 8 | 9 | # Define the agent 10 | @fast.agent(name="haiku", model="haiku") 11 | @fast.agent(name="openai", model="o3-mini.medium") 12 | 13 | # @fast.agent(name="test") 14 | async def main() -> None: 15 | async with fast.run() as agent: 16 | # Start an interactive session with "haiku" 17 | await agent.prompt(agent_name="haiku") 18 | # Transfer the message history top "openai" 19 | await agent.openai.generate(agent.haiku.message_history) 20 | # Continue the conversation 21 | await agent.prompt(agent_name="openai") # Interactive shell 22 | 23 | # result: str = await agent.send("foo") 24 | # mcp_prompt: PromptMessage = PromptMessage( 25 | # role="user", content=TextContent(type="text", text="How are you?") 26 | # ) 27 | # result: str = agent.send(mcp_prompt) 28 | # resource: ReadResourceResult = agent.openai.get_resource( 29 | # "server_name", "resource://images/cat.png" 30 | # ) 31 | # response: str = Prompt.user("What is in this image?", resource) 32 | 33 | 34 | if __name__ == "__main__": 35 | asyncio.run(main()) 36 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/internal/prompt_category.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import FastMCP 2 | from mcp.server.fastmcp.prompts.base import AssistantMessage, UserMessage 3 | 4 | mcp = FastMCP("MCP Root Tester") 5 | 6 | 7 | @mcp.prompt(name="category_prompt", description="set up the category protocol") 8 | def category_prompt(): 9 | return [ 10 | UserMessage("Cat"), 11 | AssistantMessage("animal"), 12 | UserMessage("dog"), 13 | AssistantMessage("animal"), 14 | UserMessage("quartz"), 15 | AssistantMessage("mineral"), 16 | # UserMessage("the sun"), 17 | ] 18 | 19 | 20 | if __name__ == "__main__": 21 | mcp.run() 22 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/internal/prompt_sizing.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import FastMCP 2 | from mcp.server.fastmcp.prompts.base import AssistantMessage, UserMessage 3 | from pydantic import Field 4 | 5 | mcp = FastMCP("MCP Prompt Tester") 6 | 7 | 8 | @mcp.prompt(name="sizing_prompt", description="set up the sizing protocol") 9 | def sizing_prompt(): 10 | return [ 11 | UserMessage("What is the size of the moon?"), 12 | AssistantMessage("OBJECT: MOON\nSIZE: 3,474.8\nUNITS: KM\nTYPE: MINERAL"), 13 | UserMessage("What is the size of the Earth?"), 14 | AssistantMessage("OBJECT: EARTH\nSIZE: 12,742\nUNITS: KM\nTYPE: MINERAL"), 15 | UserMessage("A tiger"), 16 | AssistantMessage("OBJECT: TIGER\nSIZE: 1.2\nUNITS: M\nTYPE: ANIMAL"), 17 | UserMessage("Domestic Cat"), 18 | ] 19 | 20 | 21 | @mcp.prompt( 22 | name="sizing_prompt_units", 23 | description="set up the sizing protocol with metric or imperial units", 24 | ) 25 | def sizing_prompt_units( 26 | metric: bool = Field(description="Set to True for Metric, False for Imperial", default=True), 27 | ): 28 | if metric: 29 | return [ 30 | UserMessage("What is the size of the moon?"), 31 | AssistantMessage("OBJECT: MOON\nSIZE: 3,474.8\nUNITS: KM\nTYPE: MINERAL"), 32 | UserMessage("What is the size of the Earth?"), 33 | AssistantMessage("OBJECT: EARTH\nSIZE: 12,742\nUNITS: KM\nTYPE: MINERAL"), 34 | UserMessage("A tiger"), 35 | AssistantMessage("OBJECT: TIGER\nSIZE: 1.2\nUNITS: M\nTYPE: ANIMAL"), 36 | UserMessage("Domestic Cat"), 37 | ] 38 | else: 39 | return [ 40 | UserMessage("What is the size of the moon?"), 41 | AssistantMessage("OBJECT: MOON\nSIZE: 2,159.1\nUNITS: MI\nTYPE: MINERAL"), 42 | UserMessage("What is the size of the Earth?"), 43 | AssistantMessage("OBJECT: EARTH\nSIZE: 7,918\nUNITS: MI\nTYPE: MINERAL"), 44 | UserMessage("A tiger"), 45 | AssistantMessage("OBJECT: TIGER\nSIZE: 3.9\nUNITS: FT\nTYPE: ANIMAL"), 46 | UserMessage("Domestic Cat"), 47 | ] 48 | 49 | 50 | if __name__ == "__main__": 51 | mcp.run() 52 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/internal/simple.txt: -------------------------------------------------------------------------------- 1 | hello, world 2 | 3 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/internal/sizer.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | fast = FastAgent("Sizer Prompt Test") 6 | 7 | 8 | @fast.agent( 9 | "sizer", 10 | "given an object return its size", 11 | servers=["sizer", "category"], 12 | use_history=True, 13 | ) 14 | async def main() -> None: 15 | async with fast.run() as agent: 16 | await agent() 17 | 18 | 19 | if __name__ == "__main__": 20 | asyncio.run(main()) 21 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/internal/social.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("Social Media Manager") 7 | 8 | 9 | @fast.agent( 10 | "url_fetcher", 11 | "Given a URL, provide a complete and comprehensive summary", 12 | servers=["fetch"], 13 | ) 14 | @fast.agent( 15 | "post_author", 16 | """ 17 | Write a 280 character social media post for any given text. 18 | Respond only with the post, never use hashtags. 19 | """, 20 | ) 21 | @fast.agent("translate_fr", "Translate the text to French.") 22 | @fast.agent("translate_de", "Translate the text to German.") 23 | @fast.agent( 24 | "review", 25 | """ 26 | Cleanly format the original content and translations for review by a Social Media manager. 27 | Highlight any cultural sensitivities. 28 | """, 29 | model="sonnet", 30 | ) 31 | @fast.parallel( 32 | "translated_plan", 33 | fan_out=["translate_fr", "translate_de"], 34 | ) 35 | @fast.agent( 36 | "human_review_and_post", 37 | """ 38 | - You can send a social media post by saving it to a file name 'post-.md'. 39 | - NEVER POST TO SOCIAL MEDIA UNLESS THE HUMAN HAS REVIEWED AND APPROVED. 40 | 41 | Present the Social Media report to the Human, and then provide direct actionable questions to assist 42 | the Human in posting the content. 43 | 44 | You are being connected to a Human now, the first message you receive will be a 45 | Social Media report ready to review with the Human. 46 | 47 | """, 48 | human_input=True, 49 | servers=["filesystem"], 50 | ) 51 | @fast.chain( 52 | "post_writer", 53 | sequence=[ 54 | "url_fetcher", 55 | "post_author", 56 | "translated_plan", 57 | "human_review_and_post", 58 | ], 59 | ) 60 | async def main() -> None: 61 | async with fast.run() as agent: 62 | # using chain workflow 63 | await agent.post_writer.prompt() 64 | 65 | 66 | if __name__ == "__main__": 67 | asyncio.run(main()) 68 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/mcp/state-transfer/agent_one.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("fast-agent agent_one (mcp server)") 7 | 8 | 9 | # Define the agent 10 | @fast.agent(name="agent_one", instruction="You are a helpful AI Agent.") 11 | async def main(): 12 | # use the --model command line switch or agent arguments to change model 13 | async with fast.run() as agent: 14 | await agent.interactive() 15 | 16 | 17 | if __name__ == "__main__": 18 | asyncio.run(main()) 19 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/mcp/state-transfer/agent_two.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("fast-agent agent_two (mcp host)") 7 | 8 | 9 | # Define the agent 10 | @fast.agent(name="agent_two", instruction="You are a helpful AI Agent.", servers=["agent_one"]) 11 | async def main(): 12 | # use the --model command line switch or agent arguments to change model 13 | async with fast.run() as agent: 14 | await agent.interactive() 15 | 16 | 17 | if __name__ == "__main__": 18 | asyncio.run(main()) 19 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # Model string takes format: 2 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 3 | # 4 | # Can be overriden with a command line switch --model=, or within the Agent decorator. 5 | # Check here for current details: https://fast-agent.ai/models/ 6 | 7 | # set the default model for fast-agent below: 8 | default_model: gpt-4.1 9 | 10 | # Logging and Console Configuration: 11 | logger: 12 | # Switched off to avoid cluttering the console 13 | progress_display: false 14 | 15 | # Show chat User/Assistant messages on the console 16 | show_chat: true 17 | # Show tool calls on the console 18 | show_tools: true 19 | # Truncate long tool responses on the console 20 | truncate_tools: true 21 | 22 | # MCP Servers 23 | mcp: 24 | servers: 25 | agent_one: 26 | transport: http 27 | url: http://localhost:8001/mcp 28 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example: -------------------------------------------------------------------------------- 1 | # FastAgent Secrets Configuration 2 | # WARNING: Keep this file secure and never commit to version control 3 | 4 | # Alternatively set OPENAI_API_KEY, ANTHROPIC_API_KEY or other environment variables. 5 | # Keys in the configuration file override environment variables. 6 | 7 | openai: 8 | api_key: 9 | anthropic: 10 | api_key: 11 | deepseek: 12 | api_key: 13 | openrouter: 14 | api_key: 15 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/2025-03-19_FLUX_1-schnell-infer_Image_dff80.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/resources/examples/prompting/2025-03-19_FLUX_1-schnell-infer_Image_dff80.webp -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Prompting examples package for MCP Agent. 3 | """ 4 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/agent.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("FastAgent Example") 7 | 8 | 9 | # Define the agent 10 | @fast.agent( 11 | "agent", 12 | instruction="You are a helpful AI Agent", 13 | servers=["prompts"], # , "imgetage", "hfspace"], 14 | ) 15 | async def main() -> None: 16 | # use the --model command line switch or agent arguments to change model 17 | async with fast.run() as agent: 18 | await agent() 19 | 20 | 21 | if __name__ == "__main__": 22 | asyncio.run(main()) 23 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/delimited_prompt.txt: -------------------------------------------------------------------------------- 1 | ---USER 2 | I want to learn about {{topic}}. 3 | 4 | Can you tell me about it in detail? 5 | 6 | ---ASSISTANT 7 | I'd be happy to tell you about {{topic}}! 8 | 9 | Here are some key facts about {{topic}}: 10 | 1. It's very interesting 11 | 2. It has a rich history 12 | 3. Many people study it 13 | 14 | Would you like me to elaborate on any specific aspect? -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4.1-mini, gpt-4.1, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: haiku 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | type: file 21 | level: error 22 | # Switch the progress display on or off 23 | progress_display: true 24 | 25 | # Show chat User/Assistant messages on the console 26 | show_chat: true 27 | # Show tool calls on the console 28 | show_tools: true 29 | # Truncate long tool responses on the console 30 | truncate_tools: true 31 | 32 | # MCP Servers 33 | mcp: 34 | servers: 35 | prompts: 36 | command: "prompt-server" 37 | args: ["sizing.md", "resource.md", "resource-exe.md", "pdf_prompt.md"] 38 | hfspace: 39 | command: "npx" 40 | args: ["@llmindset/mcp-hfspace"] 41 | image: 42 | command: "uv" 43 | args: ["run", "image_server.py"] 44 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/foo.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | 3 | hello 4 | 5 | ---ASSISTANT 6 | 7 | Hello! How can I assist you today? 8 | 9 | ---USER 10 | 11 | does this work? 12 | 13 | ---ASSISTANT 14 | 15 | Yes, it works! How can I assist you today? -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/resources/examples/prompting/image.jpg -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/resources/examples/prompting/image.png -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/image_server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Simple MCP server that responds to tool calls with text and image content. 4 | """ 5 | 6 | import logging 7 | from pathlib import Path 8 | 9 | from mcp.server.fastmcp import Context, FastMCP, Image 10 | from mcp.types import ImageContent, TextContent 11 | 12 | # Configure logging 13 | logging.basicConfig(level=logging.INFO) 14 | logger = logging.getLogger(__name__) 15 | 16 | # Create the FastMCP server 17 | app = FastMCP(name="ImageToolServer", debug=True) 18 | 19 | 20 | @app.tool(name="get_image", description="Returns an image with a descriptive text") 21 | async def get_image( 22 | image_name: str = "default", ctx: Context = None 23 | ) -> list[TextContent | ImageContent]: 24 | """ 25 | Returns an image file along with a descriptive text. 26 | 27 | Args: 28 | image_name: Name of the image to return (default just returns image.jpg) 29 | 30 | Returns: 31 | A list containing a text message and the requested image 32 | """ 33 | try: 34 | # Read the image file and convert to base64 35 | # Create the response with text and image 36 | return [ 37 | TextContent(type="text", text="Here's your image:"), 38 | Image(path="image.jpg").to_image_content(), 39 | ] 40 | except Exception as e: 41 | logger.exception(f"Error processing image: {e}") 42 | return [TextContent(type="text", text=f"Error processing image: {str(e)}")] 43 | 44 | 45 | if __name__ == "__main__": 46 | # Check if the default image exists 47 | if not Path("image.jpg").exists(): 48 | logger.warning("Default image file 'image.jpg' not found in the current directory") 49 | logger.warning("Please add an image file named 'image.jpg' to the current directory") 50 | 51 | # Run the server using stdio transport 52 | app.run(transport="stdio") 53 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/pdf_prompt.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | Summarize this PDF 3 | ---RESOURCE 4 | sample.pdf 5 | 6 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/prompt1.txt: -------------------------------------------------------------------------------- 1 | Hello, World. 2 | 3 | This is {{blah}} foo 4 | 5 | This is {{terrible}} etc. 6 | 7 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/prompt2.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | hello, claude {{name}} 3 | ---ASSISTANT 4 | 5 | how are you??? 6 | ---USER 7 | Just fine 8 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/resource-exe.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | Good morning 3 | ---ASSISTANT 4 | Good morning! 5 | ---USER 6 | Can you tell me what's in this file? 7 | ---RESOURCE 8 | test.exe 9 | 10 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/resource.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | Please help me improve this CSS File 3 | ---RESOURCE 4 | sample.css 5 | ---ASSISTANT 6 | Very good, we should change all of the colours to #000000. do you agree? 7 | ---USER 8 | Yes, great idea! 9 | ---ASSISTANT 10 | i agree 11 | ---USER 12 | ok, here's an image 13 | ---RESOURCE 14 | image.png 15 | ---USER 16 | what's in the image? 17 | 18 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/sample.css: -------------------------------------------------------------------------------- 1 | /* Modern Web Application Styles */ 2 | :root { 3 | --primary: #4a6cf7; 4 | --secondary: #f7c04a; 5 | --dark: #1f2937; 6 | --light: #f9fafb; 7 | --success: #10b981; 8 | --danger: #ef4444; 9 | --font-main: system-ui, -apple-system, sans-serif; 10 | } 11 | * { 12 | box-sizing: border-box; 13 | margin: 0; 14 | padding: 0; 15 | } 16 | body { 17 | font-family: var(--font-main); 18 | line-height: 1.6; 19 | color: var(--dark); 20 | background: var(--light); 21 | } 22 | h1, 23 | h2, 24 | h3 { 25 | margin-bottom: 0.5em; 26 | line-height: 1.2; 27 | } 28 | p { 29 | margin-bottom: 1em; 30 | } 31 | a { 32 | color: var(--primary); 33 | text-decoration: none; 34 | transition: color 0.2s; 35 | } 36 | a:hover { 37 | color: #3255d8; 38 | } 39 | .container { 40 | width: 100%; 41 | max-width: 1200px; 42 | margin: 0 auto; 43 | padding: 0 1rem; 44 | } 45 | .btn { 46 | display: inline-block; 47 | padding: 0.5em 1em; 48 | border-radius: 4px; 49 | background: var(--primary); 50 | color: white; 51 | border: none; 52 | cursor: pointer; 53 | } 54 | .btn:hover { 55 | background: #3255d8; 56 | } 57 | .btn-secondary { 58 | background: var(--secondary); 59 | color: var(--dark); 60 | } 61 | .card { 62 | padding: 1.5rem; 63 | border-radius: 8px; 64 | box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); 65 | background: white; 66 | } 67 | .flex { 68 | display: flex; 69 | } 70 | .flex-col { 71 | flex-direction: column; 72 | } 73 | .items-center { 74 | align-items: center; 75 | } 76 | .justify-between { 77 | justify-content: space-between; 78 | } 79 | .grid { 80 | display: grid; 81 | grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); 82 | gap: 1rem; 83 | } 84 | @media (max-width: 768px) { 85 | .grid { 86 | grid-template-columns: 1fr; 87 | } 88 | } 89 | .mt-1 { 90 | margin-top: 0.25rem; 91 | } 92 | .mt-2 { 93 | margin-top: 0.5rem; 94 | } 95 | .mt-4 { 96 | margin-top: 1rem; 97 | } 98 | .mt-8 { 99 | margin-top: 2rem; 100 | } 101 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/sample.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/resources/examples/prompting/sample.pdf -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/sizing.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | how big is the moon? 3 | ---ASSISTANT 4 | OBJECT: MOON 5 | SIZE: 3,474.8 6 | UNITS: KM 7 | TYPE: SATELLITE 8 | ---USER 9 | Earth? 10 | ---ASSISTANT 11 | OBJECT: EARTH 12 | SIZE: 12,742 13 | UNITS: KM 14 | TYPE: PLANET 15 | ---USER 16 | A tiger 17 | ---ASSISTANT 18 | OBJECT: TIGER 19 | SIZE: 1.2 20 | UNITS: M 21 | TYPE: ANIMAL 22 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/test.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/src/mcp_agent/resources/examples/prompting/test.exe -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/prompting/work_with_image.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from pathlib import Path 3 | 4 | from mcp_agent.core.fastagent import FastAgent 5 | from mcp_agent.core.prompt import Prompt 6 | 7 | # Create the application 8 | fast = FastAgent("FastAgent Example") 9 | 10 | 11 | # Define the agent 12 | @fast.agent("agent", instruction="You are a helpful AI Agent", servers=["prompts"]) 13 | async def main() -> None: 14 | async with fast.run() as agent: 15 | await agent.agent.generate([Prompt.user("What's in this image?", Path("image.png"))]) 16 | 17 | 18 | if __name__ == "__main__": 19 | asyncio.run(main()) 20 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/researcher/researcher-eval.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | agents = FastAgent(name="Researcher Agent (EO)") 6 | 7 | 8 | @agents.agent( 9 | name="Researcher", 10 | instruction=""" 11 | You are a research assistant, with access to internet search (via Brave), 12 | website fetch, a python interpreter (you can install packages with uv) and a filesystem. 13 | Use the current working directory to save and create files with both the Interpreter and Filesystem tools. 14 | The interpreter has numpy, pandas, matplotlib and seaborn already installed. 15 | 16 | You must always provide a summary of the specific sources you have used in your research. 17 | """, 18 | servers=["brave", "interpreter", "filesystem", "fetch"], 19 | ) 20 | @agents.agent( 21 | name="Evaluator", 22 | model="sonnet", 23 | instruction=""" 24 | Evaluate the response from the researcher based on the criteria: 25 | - Sources cited. Has the researcher provided a summary of the specific sources used in the research? 26 | - Validity. Has the researcher cross-checked and validated data and assumptions. 27 | - Alignment. Has the researher acted and addressed feedback from any previous assessments? 28 | 29 | For each criterion: 30 | - Provide a rating (EXCELLENT, GOOD, FAIR, or POOR). 31 | - Offer specific feedback or suggestions for improvement. 32 | 33 | Summarize your evaluation as a structured response with: 34 | - Overall quality rating. 35 | - Specific feedback and areas for improvement.""", 36 | ) 37 | @agents.evaluator_optimizer( 38 | generator="Researcher", 39 | evaluator="Evaluator", 40 | max_refinements=5, 41 | min_rating="EXCELLENT", 42 | name="Researcher_Evaluator", 43 | ) 44 | async def main() -> None: 45 | async with agents.run() as agent: 46 | await agent.prompt("Researcher_Evaluator") 47 | 48 | print("Ask follow up quesions to the Researcher?") 49 | await agent.prompt("Researcher", default_prompt="STOP") 50 | 51 | 52 | if __name__ == "__main__": 53 | asyncio.run(main()) 54 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/researcher/researcher.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # from rich import print 6 | 7 | agents = FastAgent(name="Researcher Agent") 8 | 9 | 10 | @agents.agent( 11 | "Researcher", 12 | instruction=""" 13 | You are a research assistant, with access to internet search (via Brave), 14 | website fetch, a python interpreter (you can install packages with uv) and a filesystem. 15 | Use the current working directory to save and create files with both the Interpreter and Filesystem tools. 16 | The interpreter has numpy, pandas, matplotlib and seaborn already installed 17 | """, 18 | servers=["brave", "interpreter", "filesystem", "fetch"], 19 | ) 20 | async def main() -> None: 21 | research_prompt = """ 22 | Produce an investment report for the company Eutelsat. The final report should be saved in the filesystem in markdown format, and 23 | contain at least the following: 24 | 1 - A brief description of the company 25 | 2 - Current financial position (find data, create and incorporate charts) 26 | 3 - A PESTLE analysis 27 | 4 - An investment thesis for the next 3 years. Include both 'buy side' and 'sell side' arguments, and a final 28 | summary and recommendation. 29 | Todays date is 15 February 2025. Include the main data sources consulted in presenting the report.""" # noqa: F841 30 | 31 | async with agents.run() as agent: 32 | await agent.prompt() 33 | 34 | 35 | if __name__ == "__main__": 36 | asyncio.run(main()) 37 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/workflows/chaining.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("Agent Chaining") 7 | 8 | 9 | @fast.agent( 10 | "url_fetcher", 11 | instruction="Given a URL, provide a complete and comprehensive summary", 12 | servers=["fetch"], 13 | ) 14 | @fast.agent( 15 | "social_media", 16 | instruction=""" 17 | Write a 280 character social media post for any given text. 18 | Respond only with the post, never use hashtags. 19 | """, 20 | ) 21 | @fast.chain( 22 | name="post_writer", 23 | sequence=["url_fetcher", "social_media"], 24 | ) 25 | async def main() -> None: 26 | async with fast.run() as agent: 27 | # using chain workflow 28 | await agent.post_writer.send("https://llmindset.co.uk") 29 | 30 | 31 | # alternative syntax for above is result = agent["post_writer"].send(message) 32 | # alternative syntax for above is result = agent["post_writer"].prompt() 33 | 34 | 35 | if __name__ == "__main__": 36 | asyncio.run(main()) 37 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/workflows/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # Please edit this configuration file to match your environment (on Windows). 2 | # Examples in comments below - check/change the paths. 3 | # 4 | # 5 | 6 | logger: 7 | type: file 8 | level: error 9 | truncate_tools: true 10 | 11 | mcp: 12 | servers: 13 | filesystem: 14 | # On windows update the command and arguments to use `node` and the absolute path to the server. 15 | # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally. 16 | # Use `npm -g root` to find the global node_modules path.` 17 | # command: "node" 18 | # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","."] 19 | command: "npx" 20 | args: ["-y", "@modelcontextprotocol/server-filesystem", "."] 21 | fetch: 22 | command: "uvx" 23 | args: ["mcp-server-fetch"] 24 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/workflows/human_input.py: -------------------------------------------------------------------------------- 1 | """ 2 | Agent which demonstrates Human Input tool 3 | """ 4 | 5 | import asyncio 6 | 7 | from mcp_agent.core.fastagent import FastAgent 8 | 9 | # Create the application 10 | fast = FastAgent("Human Input") 11 | 12 | 13 | # Define the agent 14 | @fast.agent( 15 | instruction="An AI agent that assists with basic tasks. Request Human Input when needed.", 16 | human_input=True, 17 | ) 18 | async def main() -> None: 19 | async with fast.run() as agent: 20 | # this usually causes the LLM to request the Human Input Tool 21 | await agent("print the next number in the sequence") 22 | await agent.prompt(default_prompt="STOP") 23 | 24 | 25 | if __name__ == "__main__": 26 | asyncio.run(main()) 27 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/workflows/parallel.py: -------------------------------------------------------------------------------- 1 | """ 2 | Parallel Workflow showing Fan Out and Fan In agents, using different models 3 | """ 4 | 5 | import asyncio 6 | from pathlib import Path 7 | 8 | from mcp_agent.core.fastagent import FastAgent 9 | from mcp_agent.core.prompt import Prompt 10 | 11 | # Create the application 12 | fast = FastAgent( 13 | "Parallel Workflow", 14 | ) 15 | 16 | 17 | @fast.agent( 18 | name="proofreader", 19 | instruction=""""Review the short story for grammar, spelling, and punctuation errors. 20 | Identify any awkward phrasing or structural issues that could improve clarity. 21 | Provide detailed feedback on corrections.""", 22 | ) 23 | @fast.agent( 24 | name="fact_checker", 25 | instruction="""Verify the factual consistency within the story. Identify any contradictions, 26 | logical inconsistencies, or inaccuracies in the plot, character actions, or setting. 27 | Highlight potential issues with reasoning or coherence.""", 28 | model="gpt-4.1-mini", 29 | ) 30 | @fast.agent( 31 | name="style_enforcer", 32 | instruction="""Analyze the story for adherence to style guidelines. 33 | Evaluate the narrative flow, clarity of expression, and tone. Suggest improvements to 34 | enhance storytelling, readability, and engagement.""", 35 | model="sonnet", 36 | ) 37 | @fast.agent( 38 | name="grader", 39 | instruction="""Compile the feedback from the Proofreader, Fact Checker, and Style Enforcer 40 | into a structured report. Summarize key issues and categorize them by type. 41 | Provide actionable recommendations for improving the story, 42 | and give an overall grade based on the feedback.""", 43 | model="o3-mini.low", 44 | ) 45 | @fast.parallel( 46 | fan_out=["proofreader", "fact_checker", "style_enforcer"], 47 | fan_in="grader", 48 | name="parallel", 49 | ) 50 | async def main() -> None: 51 | async with fast.run() as agent: 52 | await agent.parallel.send( 53 | Prompt.user("Student short story submission", Path("short_story.txt")) 54 | ) 55 | 56 | 57 | if __name__ == "__main__": 58 | asyncio.run(main()) # type: ignore 59 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/workflows/router.py: -------------------------------------------------------------------------------- 1 | """ 2 | Example MCP Agent application showing router workflow with decorator syntax. 3 | Demonstrates router's ability to either: 4 | 1. Use tools directly to handle requests 5 | 2. Delegate requests to specialized agents 6 | """ 7 | 8 | import asyncio 9 | 10 | from mcp_agent.core.fastagent import FastAgent 11 | 12 | # Create the application 13 | fast = FastAgent( 14 | "Router Workflow", 15 | ) 16 | 17 | # Sample requests demonstrating direct tool use vs agent delegation 18 | SAMPLE_REQUESTS = [ 19 | "Download and summarize https://llmindset.co.uk/posts/2024/12/mcp-build-notes/", # Router handles directly with fetch 20 | "Analyze the quality of the Python codebase in the current working directory", # Delegated to code expert 21 | "What are the key principles of effective beekeeping?", # Delegated to general assistant 22 | ] 23 | 24 | 25 | @fast.agent( 26 | name="fetcher", 27 | instruction="""You are an agent, with a tool enabling you to fetch URLs.""", 28 | servers=["fetch"], 29 | ) 30 | @fast.agent( 31 | name="code_expert", 32 | instruction="""You are an expert in code analysis and software engineering. 33 | When asked about code, architecture, or development practices, 34 | you provide thorough and practical insights.""", 35 | servers=["filesystem"], 36 | ) 37 | @fast.agent( 38 | name="general_assistant", 39 | instruction="""You are a knowledgeable assistant that provides clear, 40 | well-reasoned responses about general topics, concepts, and principles.""", 41 | ) 42 | @fast.router( 43 | name="route", 44 | model="sonnet", 45 | agents=["code_expert", "general_assistant", "fetcher"], 46 | ) 47 | async def main() -> None: 48 | async with fast.run() as agent: 49 | for request in SAMPLE_REQUESTS: 50 | await agent.route(request) 51 | 52 | 53 | if __name__ == "__main__": 54 | asyncio.run(main()) 55 | -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/workflows/short_story.md: -------------------------------------------------------------------------------- 1 | The Kittens Castle Adventuer 2 | 3 | One sunny day, three lil kittens name Whiskers, Socks, and Mittens was walkin threw a mystirus forrest. They hadnt never seen such a big forrest before! The trees was tall an spooky, an the ground was coverd in moss an stikks. 4 | 5 | Suddenlee, thru the trees, they sawd somthing HUUUUGE! It was a castell, but not just eny castell. This castell was made of sparkling chese an glittery windos. The turrits was so high they tuch the clowds, an the doars was big enuff for a elefant to walk threw! 6 | 7 | "Lookk!" sed Whiskers, his tale all poofy wit exsitement. "We fowned a castell!" Socks meowed loudly an jumped up an down. Mittens, who was the smallist kitten, just stared wit her big rond eyes. 8 | 9 | They climed up the cheesy walls, slip-slidin on the smoth surfase. Inside, they discoverd rooms ful of soft pillows an dangling strings an shiny things that went JINGEL when they tuch them. It was like a kitten paradyse! 10 | 11 | But then, a big shadowy figur apeared... was it the castell gaurd? Or sumthing mor mystirus? The kittens hudeld togethar, there lil hearts beating fast. What wud happan next in there amazeing adventuer? 12 | 13 | THE END?? -------------------------------------------------------------------------------- /src/mcp_agent/resources/examples/workflows/short_story.txt: -------------------------------------------------------------------------------- 1 | The Battle of Glimmerwood 2 | 3 | In the heart of Glimmerwood, a mystical forest knowed for its radiant trees, a small village thrived. 4 | The villagers, who were live peacefully, shared their home with the forest's magical creatures, 5 | especially the Glimmerfoxes whose fur shimmer like moonlight. 6 | 7 | One fateful evening, the peace was shaterred when the infamous Dark Marauders attack. 8 | Lead by the cunning Captain Thorn, the bandits aim to steal the precious Glimmerstones which was believed to grant immortality. 9 | 10 | Amidst the choas, a young girl named Elara stood her ground, she rallied the villagers and devised a clever plan. 11 | Using the forests natural defenses they lured the marauders into a trap. 12 | As the bandits aproached the village square, a herd of Glimmerfoxes emerged, blinding them with their dazzling light, 13 | the villagers seized the opportunity to captured the invaders. 14 | 15 | Elara's bravery was celebrated and she was hailed as the "Guardian of Glimmerwood". 16 | The Glimmerstones were secured in a hidden grove protected by an ancient spell. 17 | 18 | However, not all was as it seemed. The Glimmerstones true power was never confirm, 19 | and whispers of a hidden agenda linger among the villagers. 20 | -------------------------------------------------------------------------------- /src/mcp_agent/tools/tool_definition.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Any, Dict, Optional 3 | 4 | 5 | @dataclass 6 | class ToolDefinition: 7 | """ 8 | Represents a definition of a tool available to the agent. 9 | """ 10 | 11 | name: str 12 | description: Optional[str] = None 13 | inputSchema: Dict[str, Any] = field(default_factory=dict) 14 | # Add other relevant fields if necessary based on how tools are defined in fast-agent 15 | -------------------------------------------------------------------------------- /tests/e2e/multimodal/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: passthrough 14 | 15 | azure: 16 | 17 | # Logging and Console Configuration: 18 | logger: 19 | # level: "debug" | "info" | "warning" | "error" 20 | # type: "none" | "console" | "file" | "http" 21 | # path: "/path/to/logfile.jsonl" 22 | 23 | # Switch the progress display on or off 24 | progress_display: true 25 | 26 | # Show chat User/Assistant messages on the console 27 | show_chat: true 28 | # Show tool calls on the console 29 | show_tools: true 30 | # Truncate long tool responses on the console 31 | truncate_tools: true 32 | mcp: 33 | servers: 34 | image_server: 35 | command: "uv" 36 | args: ["run", "image_server.py", "image.png"] 37 | -------------------------------------------------------------------------------- /tests/e2e/multimodal/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/tests/e2e/multimodal/image.png -------------------------------------------------------------------------------- /tests/e2e/multimodal/sample.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/tests/e2e/multimodal/sample.pdf -------------------------------------------------------------------------------- /tests/e2e/prompts-resources/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: passthrough 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | mcp: 31 | servers: 32 | prompt_server: 33 | command: "prompt-server" 34 | args: 35 | [ 36 | "simple.txt", 37 | "multiturn.md", 38 | "with_attachment.md", 39 | "with_attachment_css.md", 40 | ] 41 | -------------------------------------------------------------------------------- /tests/e2e/prompts-resources/multiturn.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | l l M i n d s ET uk 3 | ---ASSISTANT 4 | llmindsetuk 5 | ---USER 6 | fA st age NT 7 | ---ASSISTANT 8 | fastagent 9 | ---USER 10 | m ORE training OK 11 | ---ASSISTANT 12 | moretrainingok 13 | ---USER 14 | t ESt ca seOK 15 | -------------------------------------------------------------------------------- /tests/e2e/prompts-resources/sample.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/tests/e2e/prompts-resources/sample.pdf -------------------------------------------------------------------------------- /tests/e2e/prompts-resources/simple.txt: -------------------------------------------------------------------------------- 1 | Repeat the following text verbatim: {{name}} -------------------------------------------------------------------------------- /tests/e2e/prompts-resources/style.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --primary: #3498db; 3 | --secondary: #2ecc71; 4 | --dark: #333; 5 | --light: #f8f9fa; 6 | } 7 | * { 8 | margin: 0; 9 | padding: 0; 10 | box-sizing: border-box; 11 | } 12 | body { 13 | font-family: "Segoe UI", Tahoma, Geneva, Verdana, sans-serif; 14 | line-height: 1.6; 15 | color: var(--dark); 16 | } 17 | .container { 18 | width: 90%; 19 | max-width: 1200px; 20 | margin: 0 auto; 21 | padding: 1rem; 22 | } 23 | h1, 24 | h2, 25 | h3 { 26 | margin-bottom: 1rem; 27 | } 28 | p { 29 | margin-bottom: 1.5rem; 30 | } 31 | a { 32 | color: var(--primary); 33 | text-decoration: none; 34 | } 35 | a:hover { 36 | text-decoration: underline; 37 | } 38 | .btn { 39 | display: inline-block; 40 | padding: 0.5rem 1rem; 41 | background: var(--primary); 42 | color: white; 43 | border: none; 44 | border-radius: 4px; 45 | } 46 | .btn:hover { 47 | background: #2980b9; 48 | color: white; 49 | text-decoration: none; 50 | } 51 | img { 52 | max-width: 100%; 53 | height: auto; 54 | } 55 | .card { 56 | padding: 1rem; 57 | border-radius: 8px; 58 | box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); 59 | } 60 | @media (max-width: 768px) { 61 | .container { 62 | width: 95%; 63 | } 64 | } 65 | .text-center { 66 | text-align: center; 67 | } 68 | .mt-2 { 69 | margin-top: 2rem; 70 | } 71 | -------------------------------------------------------------------------------- /tests/e2e/prompts-resources/test_resources.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.mark.integration 5 | @pytest.mark.asyncio 6 | @pytest.mark.e2e 7 | @pytest.mark.parametrize( 8 | "model_name", 9 | [ 10 | "haiku", 11 | ], 12 | ) 13 | async def test_using_resource_blob(fast_agent, model_name): 14 | """Test that the agent can process a simple prompt using directory-specific config.""" 15 | # Use the FastAgent instance from the test directory fixture 16 | fast = fast_agent 17 | 18 | # Define the agent 19 | @fast.agent( 20 | "agent", 21 | instruction="You are a helpful AI Agent", 22 | model=model_name, 23 | servers=["prompt_server"], 24 | ) 25 | async def agent_function(): 26 | async with fast.run() as agent: 27 | assert "fast-agent" in await agent.with_resource( 28 | "Summarise this PDF please, be sure to include the product name", 29 | "resource://fast-agent/sample.pdf", 30 | "prompt_server", 31 | ) 32 | 33 | await agent_function() 34 | 35 | 36 | @pytest.mark.integration 37 | @pytest.mark.asyncio 38 | @pytest.mark.e2e 39 | @pytest.mark.parametrize( 40 | "model_name", 41 | [ 42 | "haiku", 43 | ], 44 | ) 45 | async def test_using_resource_text(fast_agent, model_name): 46 | """Test that the agent can process a simple prompt using directory-specific config.""" 47 | # Use the FastAgent instance from the test directory fixture 48 | fast = fast_agent 49 | 50 | # Define the agent 51 | @fast.agent( 52 | "agent", 53 | instruction="You are a helpful AI Agent", 54 | model=model_name, 55 | servers=["prompt_server"], 56 | ) 57 | async def agent_function(): 58 | async with fast.run() as agent: 59 | answer = await agent.agent.with_resource( 60 | "What colour are buttons in this file?", 61 | "resource://fast-agent/style.css", 62 | "prompt_server", 63 | ) 64 | assert "white" in answer.lower() 65 | 66 | await agent_function() 67 | -------------------------------------------------------------------------------- /tests/e2e/prompts-resources/with_attachment.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | Good morning, how are you? 3 | ---ASSISTANT 4 | Very well thank you, can I help you by summarising documents? 5 | ---USER 6 | Can you summarise this document please. Make sure to include the company name. 7 | ---RESOURCE 8 | sample.pdf 9 | -------------------------------------------------------------------------------- /tests/e2e/prompts-resources/with_attachment_css.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | Good morning, how are you? 3 | ---ASSISTANT 4 | Very well thank you, can I help you by inspecting CSS? 5 | ---USER 6 | Can you summarise this document please. Make sure to include the company name. 7 | ---RESOURCE 8 | style.css 9 | 10 | -------------------------------------------------------------------------------- /tests/e2e/sampling/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | default_model: passthrough 2 | 3 | # Logging and Console Configuration: 4 | logger: 5 | level: "error" 6 | type: "console" 7 | # path: "/path/to/logfile.jsonl" 8 | 9 | # Switch the progress display on or off 10 | progress_display: true 11 | 12 | # Show chat User/Assistant messages on the console 13 | show_chat: true 14 | # Show tool calls on the console 15 | show_tools: true 16 | # Truncate long tool responses on the console 17 | truncate_tools: true 18 | 19 | mcp: 20 | servers: 21 | sampling_resource_anthropic: 22 | command: "uv" 23 | args: ["run", "sampling_resource_server.py"] 24 | sampling: 25 | model: "haiku" 26 | sampling_resource_openai: 27 | command: "uv" 28 | args: ["run", "sampling_resource_server.py"] 29 | sampling: 30 | model: "gpt-4.1-mini" 31 | 32 | # command: "bash" 33 | # args: ["-c", "uv run sampling_resource_server.py | tee sampling_output.log"] 34 | # sampling: 35 | # model: "haiku" 36 | -------------------------------------------------------------------------------- /tests/e2e/sampling/fastagent.jsonl: -------------------------------------------------------------------------------- 1 | {"level":"ERROR","timestamp":"2025-03-29T22:49:37.743115","namespace":"mcp_agent.mcp.mcp_connection_manager","message":"sampling_resource_anthropic: Lifecycle task encountered an error: generator didn't stop after athrow()","data":{"exc_info":true,"data":{"progress_action":"Error","server_name":"sampling_resource_anthropic"}}} 2 | {"level":"ERROR","timestamp":"2025-03-29T22:49:38.162289","namespace":"mcp_agent.mcp.mcp_connection_manager","message":"sampling_resource_openai: Lifecycle task encountered an error: generator didn't stop after athrow()","data":{"exc_info":true,"data":{"progress_action":"Error","server_name":"sampling_resource_openai"}}} 3 | {"level":"ERROR","timestamp":"2025-05-25T21:39:00.063320","namespace":"mcp_agent.mcp.sampling","message":"Error in sampling: Anthropic API key not configured\n\nThe Anthropic API key is required but not set.\nAdd it to your configuration file under anthropic.api_key or set the ANTHROPIC_API_KEY environment variable."} 4 | {"level":"ERROR","timestamp":"2025-05-25T21:39:00.831547","namespace":"mcp_agent.mcp.sampling","message":"Error in sampling: Openai API key not configured\n\nThe Openai API key is required but not set.\nAdd it to your configuration file under openai.api_key or set the OPENAI_API_KEY environment variable."} 5 | {"level":"ERROR","timestamp":"2025-05-25T21:39:01.599545","namespace":"mcp_agent.mcp.sampling","message":"Error in sampling: Anthropic API key not configured\n\nThe Anthropic API key is required but not set.\nAdd it to your configuration file under anthropic.api_key or set the ANTHROPIC_API_KEY environment variable."} 6 | -------------------------------------------------------------------------------- /tests/e2e/sampling/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/tests/e2e/sampling/image.png -------------------------------------------------------------------------------- /tests/e2e/sampling/sampling_resource_server.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import Context, FastMCP, Image 2 | from mcp.types import SamplingMessage, TextContent 3 | 4 | # Create a FastMCP server 5 | mcp = FastMCP(name="FastStoryAgent") 6 | 7 | 8 | @mcp.resource("resource://fast-agent/short-story/{topic}") 9 | async def generate_short_story(topic: str): 10 | prompt = f"Please write a short story on the topic of {topic}." 11 | 12 | # Make a sampling request to the client 13 | result = await mcp.get_context().session.create_message( 14 | max_tokens=1024, 15 | messages=[SamplingMessage(role="user", content=TextContent(type="text", text=prompt))], 16 | ) 17 | 18 | return result.content.text 19 | 20 | 21 | @mcp.tool() 22 | async def sample_with_image(ctx: Context): 23 | result = await ctx.session.create_message( 24 | max_tokens=1024, 25 | messages=[ 26 | SamplingMessage( 27 | role="user", 28 | content=TextContent( 29 | type="text", 30 | text="What is the username in this image?", 31 | ), 32 | ), 33 | SamplingMessage(role="user", content=Image(path="image.png").to_image_content()), 34 | ], 35 | ) 36 | 37 | return result.content.text 38 | 39 | 40 | # Run the server when this file is executed directly 41 | if __name__ == "__main__": 42 | mcp.run() 43 | -------------------------------------------------------------------------------- /tests/e2e/smoke/base/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: passthrough 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | mcp: 31 | servers: 32 | test_server: 33 | command: "uv" 34 | args: ["run", "test_server.py"] 35 | hyphen-name: 36 | command: "uv" 37 | args: ["run", "test_server.py"] 38 | temp_issue_ts: 39 | transport: "sse" 40 | url: "http://localhost:8080/sse" 41 | -------------------------------------------------------------------------------- /tests/e2e/smoke/base/test_server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Simple MCP server that responds to tool calls with text and image content. 4 | """ 5 | 6 | import logging 7 | 8 | from mcp.server.fastmcp import FastMCP 9 | 10 | logging.basicConfig(level=logging.INFO) 11 | logger = logging.getLogger(__name__) 12 | 13 | # Create the FastMCP server 14 | app = FastMCP(name="Integration Server") 15 | 16 | 17 | @app.tool( 18 | name="check_weather", 19 | description="Returns the weather for a specified location.", 20 | ) 21 | def check_weather(location: str) -> str: 22 | # Write the location to a text file 23 | with open("weather_location.txt", "w") as f: 24 | f.write(location) 25 | 26 | # Return sunny weather condition 27 | return "It's sunny in " + location 28 | 29 | 30 | @app.tool(name="shirt_colour", description="returns the colour of the shirt being worn") 31 | def shirt_colour() -> str: 32 | return "blue polka dots" 33 | 34 | 35 | if __name__ == "__main__": 36 | # Run the server using stdio transport 37 | app.run(transport="stdio") 38 | -------------------------------------------------------------------------------- /tests/e2e/smoke/tensorzero/test_agent_interaction.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | from mcp_agent.core.request_params import RequestParams 5 | 6 | pytestmark = pytest.mark.usefixtures("tensorzero_docker_env") 7 | 8 | 9 | @pytest.mark.asyncio 10 | async def test_tensorzero_agent_smoke(project_root, chdir_to_tensorzero_example): 11 | """ 12 | Smoke test for the TensorZero agent interaction defined in examples/tensorzero/agent.py. 13 | Sends a predefined sequence of messages. 14 | """ 15 | config_file = "fastagent.config.yaml" 16 | 17 | my_t0_system_vars = { 18 | "TEST_VARIABLE_1": "Roses are red", 19 | "TEST_VARIABLE_2": "Violets are blue", 20 | "TEST_VARIABLE_3": "Sugar is sweet", 21 | "TEST_VARIABLE_4": "Vibe code responsibly 👍", 22 | } 23 | 24 | fast = FastAgent("fast-agent example test", config_path=config_file, ignore_unknown_args=True) 25 | 26 | @fast.agent( 27 | name="default", 28 | instruction=""" 29 | You are an agent dedicated to helping developers understand the relationship between TensoZero and fast-agent. If the user makes a request 30 | that requires you to invoke the test tools, please do so. When you use the tool, describe your rationale for doing so. 31 | """, 32 | servers=["tester"], 33 | model="tensorzero.test_chat", 34 | request_params=RequestParams(template_vars=my_t0_system_vars), 35 | ) 36 | async def dummy_agent_func(): 37 | pass 38 | 39 | messages_to_send = [ 40 | "Hi.", 41 | "Tell me a poem.", 42 | "Do you have any tools that you can use?", 43 | "Please demonstrate the use of that tool on your last response.", 44 | "Please summarize the conversation so far.", 45 | "What tool calls have you executed in this session, and what were their results?", 46 | ] 47 | 48 | async with fast.run() as agent_app: 49 | agent_instance = agent_app.default 50 | print(f"\nSending {len(messages_to_send)} messages to agent '{agent_instance.name}'...") 51 | for i, msg_text in enumerate(messages_to_send): 52 | print(f"Sending message {i + 1}: '{msg_text}'") 53 | await agent_instance.send(msg_text) 54 | print(f"Message {i + 1} sent successfully.") 55 | 56 | print("\nAgent interaction smoke test completed successfully.") 57 | -------------------------------------------------------------------------------- /tests/e2e/smoke/tensorzero/test_image_demo.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import importlib.util 3 | import sys 4 | from pathlib import Path 5 | 6 | import pytest 7 | 8 | pytestmark = pytest.mark.usefixtures("tensorzero_docker_env") 9 | 10 | 11 | def import_from_path(module_name: str, file_path: Path): 12 | spec = importlib.util.spec_from_file_location(module_name, file_path) 13 | if spec is None or spec.loader is None: 14 | raise ImportError(f"Could not load spec for module {module_name} at {file_path}") 15 | module = importlib.util.module_from_spec(spec) 16 | sys.modules[module_name] = module 17 | spec.loader.exec_module(module) 18 | return module 19 | 20 | 21 | @pytest.mark.asyncio 22 | async def test_tensorzero_image_demo_smoke(project_root, chdir_to_tensorzero_example): 23 | """ 24 | Smoke test for the TensorZero image demo script. 25 | Ensures the script runs to completion without errors. 26 | """ 27 | image_demo_script_path = project_root / "examples" / "tensorzero" / "image_demo.py" 28 | 29 | if not image_demo_script_path.is_file(): 30 | pytest.fail(f"Image demo script not found at {image_demo_script_path}") 31 | 32 | print(f"\nImporting image demo script from: {image_demo_script_path}") 33 | image_demo_module = None 34 | try: 35 | image_demo_module = import_from_path("image_demo_module", image_demo_script_path) 36 | main_func = getattr(image_demo_module, "main", None) 37 | if not main_func or not asyncio.iscoroutinefunction(main_func): 38 | pytest.fail(f"'main' async function not found in {image_demo_script_path}") 39 | 40 | print("Executing image_demo.main()...") 41 | await main_func() 42 | print("image_demo.main() executed successfully.") 43 | 44 | except ImportError as e: 45 | pytest.fail(f"Failed to import image_demo script: {e}") 46 | except Exception as e: 47 | pytest.fail(f"Running image_demo script failed: {e}") 48 | finally: 49 | if image_demo_module and "image_demo_module" in sys.modules: 50 | del sys.modules["image_demo_module"] 51 | 52 | print("\nImage demo smoke test completed successfully.") 53 | -------------------------------------------------------------------------------- /tests/e2e/smoke/tensorzero/test_simple_agent_interaction.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | pytestmark = pytest.mark.usefixtures("tensorzero_docker_env", "chdir_to_tensorzero_example") 6 | 7 | 8 | @pytest.mark.asyncio 9 | async def test_tensorzero_simple_agent_smoke(): # Removed unused project_root fixture 10 | """ 11 | Smoke test for the TensorZero simple agent interaction defined in examples/tensorzero/simple_agent.py. 12 | Sends a single "hi" message. 13 | """ 14 | config_file = "fastagent.config.yaml" 15 | 16 | fast = FastAgent( 17 | "fast-agent simple example test", config_path=config_file, ignore_unknown_args=True 18 | ) 19 | 20 | @fast.agent( 21 | name="simple_default", 22 | instruction=""" 23 | You are an agent dedicated to helping developers understand the relationship between TensoZero and fast-agent. If the user makes a request 24 | that requires you to invoke the test tools, please do so. When you use the tool, describe your rationale for doing so. 25 | """, 26 | servers=["tester"], 27 | model="tensorzero.simple_chat", 28 | ) 29 | async def dummy_simple_agent_func(): 30 | pass 31 | 32 | message_to_send = "Hi." 33 | 34 | async with fast.run() as agent_app: 35 | agent_instance = agent_app.simple_default 36 | 37 | print(f"\nSending message to agent '{agent_instance.name}': '{message_to_send}'") 38 | await agent_instance.send(message_to_send) 39 | print(f"Message sent successfully to '{agent_instance.name}'.") 40 | 41 | print("\nSimple agent interaction smoke test completed successfully.") 42 | -------------------------------------------------------------------------------- /tests/e2e/structured/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: passthrough 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | mcp: 31 | -------------------------------------------------------------------------------- /tests/e2e/workflow/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: passthrough 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | # mcp: 31 | # servers: 32 | # test_server: 33 | # command: "uv" 34 | # args: ["run", "test_routing_server.py"] 35 | -------------------------------------------------------------------------------- /tests/e2e/workflow/sunny.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/tests/e2e/workflow/sunny.png -------------------------------------------------------------------------------- /tests/e2e/workflow/test_routing_server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Simple MCP server that responds to tool calls with text and image content. 4 | """ 5 | 6 | import logging 7 | 8 | from mcp.server.fastmcp import FastMCP 9 | 10 | logging.basicConfig(level=logging.INFO) 11 | logger = logging.getLogger(__name__) 12 | 13 | # Create the FastMCP server 14 | app = FastMCP(name="Integration Server") 15 | 16 | 17 | @app.tool( 18 | name="check_weather", 19 | description="Returns the weather for a specified location.", 20 | ) 21 | def check_weather(location: str) -> str: 22 | # Write the location to a text file 23 | with open("weather_location.txt", "w") as f: 24 | f.write(location) 25 | 26 | # Return sunny weather condition 27 | return "It's stormy and cold in " + location 28 | 29 | 30 | if __name__ == "__main__": 31 | # Run the server using stdio transport 32 | app.run(transport="stdio") 33 | -------------------------------------------------------------------------------- /tests/e2e/workflow/umbrella.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/tests/e2e/workflow/umbrella.png -------------------------------------------------------------------------------- /tests/integration/api/fastagent.config.markup.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: passthrough 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | enable_markup: false 31 | 32 | # MCP Servers 33 | mcp: 34 | servers: 35 | prompts: 36 | command: "prompt-server" 37 | args: ["playback.md"] 38 | std_io: 39 | command: "uv" 40 | args: ["run", "integration_agent.py", "--server", "--transport", "stdio"] 41 | sse: 42 | transport: "sse" 43 | url: "http://localhost:8723/sse" 44 | card_test: 45 | command: "uv" 46 | args: ["run", "mcp_tools_server.py"] 47 | hyphen-test: 48 | command: "uv" 49 | args: ["run", "mcp_tools_server.py"] 50 | # borrows config from prompt-server 51 | cwd_test: 52 | command: "prompt-server" 53 | args: ["multi.txt"] 54 | cwd: "../prompt-server/" 55 | -------------------------------------------------------------------------------- /tests/integration/api/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: passthrough 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | 31 | # MCP Servers 32 | mcp: 33 | servers: 34 | prompts: 35 | command: "prompt-server" 36 | args: ["playback.md"] 37 | prompts2: 38 | command: "prompt-server" 39 | args: ["prompt.txt"] 40 | std_io: 41 | command: "uv" 42 | args: ["run", "integration_agent.py", "--server", "--transport", "stdio"] 43 | sse: 44 | transport: "sse" 45 | url: "http://localhost:8723/sse" 46 | http: 47 | transport: "http" 48 | url: "http://localhost:8724/mcp" 49 | card_test: 50 | command: "uv" 51 | args: ["run", "mcp_tools_server.py"] 52 | hyphen-test: 53 | command: "uv" 54 | args: ["run", "mcp_tools_server.py"] 55 | # borrows config from prompt-server 56 | cwd_test: 57 | command: "prompt-server" 58 | args: ["multi.txt"] 59 | cwd: "../prompt-server/" 60 | dynamic_tool: 61 | command: "uv" 62 | args: ["run", "mcp_dynamic_tools.py"] 63 | -------------------------------------------------------------------------------- /tests/integration/api/fastagent.secrets.yaml: -------------------------------------------------------------------------------- 1 | # provider key logic tests 2 | anthropic: 3 | api_key: "test-key-anth" 4 | openai: 5 | api_key: 6 | -------------------------------------------------------------------------------- /tests/integration/api/integration_agent.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simple test agent for integration testing. 3 | """ 4 | 5 | import asyncio 6 | import sys 7 | 8 | from mcp_agent.core.fastagent import FastAgent 9 | 10 | # Create the application 11 | fast = FastAgent("Integration Test Agent") 12 | 13 | 14 | # Define a simple agent 15 | @fast.agent( 16 | name="test", # Important: This name matches what we use in the CLI test 17 | instruction="You are a test agent that simply echoes back any input received.", 18 | ) 19 | async def main() -> None: 20 | async with fast.run() as agent: 21 | # This executes only for interactive mode, not needed for command-line testing 22 | if sys.stdin.isatty(): # Only run interactive mode if attached to a terminal 23 | user_input = input("Enter a message: ") 24 | response = await agent.send(user_input) 25 | print(f"Agent response: {response}") 26 | 27 | 28 | if __name__ == "__main__": 29 | asyncio.run(main()) 30 | -------------------------------------------------------------------------------- /tests/integration/api/mcp_dynamic_tools.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | 4 | from mcp.server.fastmcp import FastMCP 5 | 6 | # Create the FastMCP server 7 | app = FastMCP(name="An MCP Server", instructions="Here is how to use this server") 8 | 9 | # Track if our dynamic tool is registered 10 | dynamic_tool_registered = False 11 | 12 | 13 | @app.tool( 14 | name="check_weather", 15 | description="Returns the weather for a specified location.", 16 | ) 17 | async def check_weather(location: str) -> str: 18 | """The location to check""" 19 | global dynamic_tool_registered 20 | 21 | # Get the current context which gives us access to the session 22 | context = app.get_context() 23 | 24 | # Toggle the dynamic tool 25 | if dynamic_tool_registered: 26 | # Remove the tool by recreating the tool manager's tool list 27 | # This is a simple approach for testing purposes 28 | app._tool_manager._tools = { 29 | name: tool for name, tool in app._tool_manager._tools.items() if name != "dynamic_tool" 30 | } 31 | dynamic_tool_registered = False 32 | else: 33 | # Add a new tool dynamically 34 | app.add_tool( 35 | lambda: "This is a dynamic tool", 36 | name="dynamic_tool", 37 | description="A tool that was added dynamically", 38 | ) 39 | dynamic_tool_registered = True 40 | 41 | # Send notification that the tool list has changed 42 | await context.session.send_tool_list_changed() 43 | 44 | # Return weather condition 45 | return "It's sunny in " + location 46 | 47 | 48 | if __name__ == "__main__": 49 | # Run the server using stdio transport 50 | app.run(transport="stdio") 51 | -------------------------------------------------------------------------------- /tests/integration/api/mcp_tools_server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Simple MCP server that responds to tool calls with text and image content. 4 | """ 5 | 6 | import logging 7 | 8 | from mcp.server.fastmcp import FastMCP 9 | 10 | logging.basicConfig(level=logging.INFO) 11 | logger = logging.getLogger(__name__) 12 | 13 | # Create the FastMCP server 14 | app = FastMCP(name="An MCP Server", instructions="Here is how to use this server") 15 | 16 | 17 | @app.tool( 18 | name="check_weather", 19 | description="Returns the weather for a specified location.", 20 | ) 21 | def check_weather(location: str) -> str: 22 | """The location to check""" 23 | # Write the location to a text file 24 | with open("weather_location.txt", "w") as f: 25 | f.write(location) 26 | 27 | # Return sunny weather condition 28 | return "It's sunny in " + location 29 | 30 | 31 | @app.tool(name="shirt-colour", description="Returns the colour of a shirt.") 32 | def shirt_colour() -> str: 33 | return "blue polka dots" 34 | 35 | 36 | if __name__ == "__main__": 37 | # Run the server using stdio transport 38 | app.run(transport="stdio") 39 | -------------------------------------------------------------------------------- /tests/integration/api/playback.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | user1 3 | 4 | ---ASSISTANT 5 | assistant1 6 | 7 | ---USER 8 | user2 9 | 10 | ---ASSISTANT 11 | assistant2 12 | -------------------------------------------------------------------------------- /tests/integration/api/prompt.txt: -------------------------------------------------------------------------------- 1 | this is from the prompt file 2 | -------------------------------------------------------------------------------- /tests/integration/api/stderr_test_script.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Simple script that outputs messages to stderr for testing. 4 | """ 5 | import sys 6 | 7 | # Write complete lines 8 | sys.stderr.write("Error line 1\n") 9 | sys.stderr.flush() 10 | 11 | # Write partial line then complete it 12 | sys.stderr.write("Error line 2 part 1") 13 | sys.stderr.flush() 14 | sys.stderr.write(" part 2\n") 15 | sys.stderr.flush() 16 | 17 | # Another complete line 18 | sys.stderr.write("Final error line\n") 19 | sys.stderr.flush() -------------------------------------------------------------------------------- /tests/integration/api/test_describe_a2a.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING 2 | 3 | import pytest 4 | 5 | if TYPE_CHECKING: 6 | from a2a_types.types import AgentCard, AgentSkill 7 | 8 | from mcp_agent.agents.agent import Agent 9 | 10 | 11 | @pytest.mark.integration 12 | @pytest.mark.asyncio 13 | async def test_get_agent_card_and_tools(fast_agent): 14 | fast = fast_agent 15 | 16 | @fast.agent(name="test", instruction="here are you instructions", servers=["card_test"]) 17 | async def agent_function(): 18 | async with fast.run() as app: 19 | # Simulate some agent operations 20 | agent: Agent = app["test"] 21 | card: AgentCard = await agent.agent_card() 22 | 23 | assert "test" == card.name 24 | # TODO -- migrate AgentConfig to include "description" - "instruction" is OK for the moment... 25 | assert "here are you instructions" == card.description 26 | assert 2 == len(card.skills) 27 | 28 | skill: AgentSkill = card.skills[0] 29 | assert "card_test-check_weather" == skill.id 30 | assert "check_weather" == skill.name 31 | assert "Returns the weather for a specified location." 32 | assert skill.tags 33 | assert "tool" == skill.tags[0] 34 | 35 | await agent_function() 36 | -------------------------------------------------------------------------------- /tests/integration/api/test_hyphens_in_name.py: -------------------------------------------------------------------------------- 1 | 2 | import pytest 3 | 4 | 5 | @pytest.mark.integration 6 | @pytest.mark.asyncio 7 | async def test_hyphenated_server_name(fast_agent): 8 | fast = fast_agent 9 | 10 | @fast.agent(name="test", instruction="here are you instructions", servers=["hyphen-test"]) 11 | async def agent_function(): 12 | async with fast.run() as app: 13 | result = await app.test.send('***CALL_TOOL check_weather {"location": "New York"}') 14 | assert "sunny" in result 15 | 16 | await agent_function() 17 | 18 | 19 | @pytest.mark.integration 20 | @pytest.mark.asyncio 21 | async def test_hyphenated_tool_name(fast_agent): 22 | fast = fast_agent 23 | 24 | @fast.agent(name="test", instruction="here are you instructions", servers=["hyphen-test"]) 25 | async def agent_function(): 26 | async with fast.run() as app: 27 | result = await app.test.send("***CALL_TOOL shirt-colour {}") 28 | assert "polka" in result 29 | 30 | await agent_function() 31 | -------------------------------------------------------------------------------- /tests/integration/api/test_markup_config.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from rich.errors import MarkupError 3 | 4 | from mcp_agent.core.prompt import Prompt 5 | 6 | 7 | @pytest.mark.integration 8 | @pytest.mark.asyncio 9 | async def test_markup_raises_an_error(fast_agent): 10 | """Test that the agent can process a multipart prompts using directory-specific config.""" 11 | # Use the FastAgent instance from the test directory fixture 12 | fast = fast_agent 13 | 14 | # Define the agent 15 | @fast.agent( 16 | "agent1", 17 | instruction="You are a helpful AI Agent", 18 | ) 19 | async def agent_function(): 20 | async with fast.run() as agent: 21 | with pytest.raises(MarkupError): 22 | assert "test1" in await agent.agent1.send(Prompt.user("'[/]test1")) 23 | 24 | await agent_function() 25 | 26 | 27 | @pytest.mark.integration 28 | @pytest.mark.asyncio 29 | async def test_markup_disabled_does_not_error(markup_fast_agent): 30 | @markup_fast_agent.agent( 31 | "agent2", 32 | instruction="You are a helpful AI Agent", 33 | ) 34 | async def agent_function(): 35 | async with markup_fast_agent.run() as agent: 36 | assert "test2" in await agent.agent2.send(Prompt.user("'[/]test2")) 37 | 38 | await agent_function() 39 | -------------------------------------------------------------------------------- /tests/integration/api/test_prompt_commands.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the prompt command processing functionality. 3 | """ 4 | 5 | import pytest 6 | 7 | from mcp_agent.core.enhanced_prompt import handle_special_commands 8 | 9 | 10 | @pytest.mark.asyncio 11 | async def test_command_handling_for_prompts(): 12 | """Test the command handling functions for /prompts and /prompt commands.""" 13 | # Test /prompts command after it's been pre-processed 14 | # The pre-processed form of "/prompts" is {"select_prompt": True, "prompt_name": None} 15 | result = await handle_special_commands({"select_prompt": True, "prompt_name": None}, True) 16 | assert isinstance(result, dict), "Result should be a dictionary" 17 | assert "select_prompt" in result, "Result should have select_prompt key" 18 | assert result["select_prompt"] is True 19 | assert "prompt_name" in result 20 | assert result["prompt_name"] is None 21 | 22 | # Test /prompt command after pre-processing 23 | # The pre-processed form is {"select_prompt": True, "prompt_index": 3} 24 | result = await handle_special_commands({"select_prompt": True, "prompt_index": 3}, True) 25 | assert isinstance(result, dict), "Result should be a dictionary" 26 | assert "select_prompt" in result 27 | assert "prompt_index" in result 28 | assert result["prompt_index"] == 3 29 | 30 | # Test /prompt command after pre-processing 31 | # The pre-processed form is "SELECT_PROMPT:my-prompt" 32 | result = await handle_special_commands("SELECT_PROMPT:my-prompt", True) 33 | assert isinstance(result, dict), "Result should be a dictionary" 34 | assert "select_prompt" in result 35 | assert "prompt_name" in result 36 | assert result["prompt_name"] == "my-prompt" -------------------------------------------------------------------------------- /tests/integration/api/test_tool_list_change.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from typing import TYPE_CHECKING 4 | 5 | import pytest 6 | 7 | if TYPE_CHECKING: 8 | from mcp import ListToolsResult 9 | 10 | # Enable debug logging for the test 11 | logging.basicConfig(level=logging.DEBUG) 12 | 13 | 14 | @pytest.mark.integration 15 | @pytest.mark.asyncio 16 | async def test_tool_list_changes(fast_agent): 17 | fast = fast_agent 18 | print("Starting tool list change test") 19 | 20 | @fast.agent(name="test", instruction="here are your instructions", servers=["dynamic_tool"]) 21 | async def agent_function(): 22 | print("Initializing agent") 23 | async with fast.run() as app: 24 | # Initially there should be one tool (check_weather) 25 | tools: ListToolsResult = await app.test.list_tools() 26 | assert 1 == len(tools.tools) 27 | assert "dynamic_tool-check_weather" == tools.tools[0].name 28 | 29 | # Calling check_weather will toggle the dynamic_tool and send a notification 30 | result = await app.test.send('***CALL_TOOL check_weather {"location": "New York"}') 31 | assert "sunny" in result 32 | 33 | # Wait for the tool list to be refreshed (with retry) 34 | await asyncio.sleep(0.5) 35 | 36 | tools = await app.test.list_tools() 37 | dynamic_tool_found = False 38 | # Check if dynamic_tool is in the list 39 | for tool in tools.tools: 40 | if tool.name == "dynamic_tool-dynamic_tool": 41 | dynamic_tool_found = True 42 | break 43 | 44 | # Verify the dynamic tool was added 45 | assert dynamic_tool_found, ( 46 | "Dynamic tool was not added to the tool list after notification" 47 | ) 48 | assert 2 == len(tools.tools), f"Expected 2 tools but found {len(tools.tools)}" 49 | 50 | # Call check_weather again to toggle the dynamic_tool off 51 | result = await app.test.send('***CALL_TOOL check_weather {"location": "Boston"}') 52 | assert "sunny" in result 53 | 54 | # Sleep between retries 55 | await asyncio.sleep(0.5) 56 | 57 | # Get the updated tool list 58 | tools = await app.test.list_tools() 59 | 60 | assert 1 == len(tools.tools) 61 | 62 | await agent_function() 63 | -------------------------------------------------------------------------------- /tests/integration/prompt-server/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | default_model: passthrough 2 | 3 | # Logging and Console Configuration: 4 | logger: 5 | level: "error" 6 | type: "file" 7 | 8 | # Switch the progress display on or off 9 | progress_display: true 10 | 11 | # Show chat User/Assistant messages on the console 12 | show_chat: true 13 | # Show tool calls on the console 14 | show_tools: true 15 | # Truncate long tool responses on the console 16 | truncate_tools: true 17 | mcp: 18 | servers: 19 | prompts: 20 | command: "prompt-server" 21 | args: 22 | [ 23 | "simple.txt", 24 | "simple_sub.txt", 25 | "multi.txt", 26 | "multi_sub.txt", 27 | "multipart.json", 28 | ] 29 | prompt_sse: 30 | transport: "sse" 31 | url: "http://localhost:8723/sse" 32 | prompt_http: 33 | transport: "http" 34 | url: "http://localhost:8724/mcp" 35 | -------------------------------------------------------------------------------- /tests/integration/prompt-server/multi.txt: -------------------------------------------------------------------------------- 1 | ---USER 2 | good morning 3 | ---ASSISTANT 4 | how may i help you? -------------------------------------------------------------------------------- /tests/integration/prompt-server/multi_sub.txt: -------------------------------------------------------------------------------- 1 | ---USER 2 | hello, my name is {{user_name}} 3 | ---ASSISTANT 4 | nice to meet you. i am {{assistant_name}} -------------------------------------------------------------------------------- /tests/integration/prompt-server/simple.txt: -------------------------------------------------------------------------------- 1 | simple, no delimiters -------------------------------------------------------------------------------- /tests/integration/prompt-server/simple_sub.txt: -------------------------------------------------------------------------------- 1 | this is {{product}} by {{company}} -------------------------------------------------------------------------------- /tests/integration/prompt-state/conv1_simple.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | message 1 3 | ---ASSISTANT 4 | message 2 5 | ---USER 6 | message 3 7 | ---ASSISTANT 8 | message 4 9 | -------------------------------------------------------------------------------- /tests/integration/prompt-state/conv2_attach.md: -------------------------------------------------------------------------------- 1 | ---USER 2 | hello, here is a CSS file 3 | ---RESOURCE 4 | conv2_css.css 5 | ---ASSISTANT 6 | thank you for sharing that 7 | ---USER 8 | message 3 9 | ---RESOURCE 10 | conv2_text.txt 11 | ---RESOURCE 12 | conv2_img.png 13 | ---ASSISTANT 14 | thank you for sharing both text and image 15 | ---USER 16 | you are welcome 17 | -------------------------------------------------------------------------------- /tests/integration/prompt-state/conv2_css.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: Arial, sans-serif; 3 | margin: 0; 4 | padding: 0; 5 | background-color: #f5f5f5; 6 | color: #333; 7 | } 8 | -------------------------------------------------------------------------------- /tests/integration/prompt-state/conv2_img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/evalstate/fast-agent/71369d5b2080128b0373b1ffcede4f81dcc03446/tests/integration/prompt-state/conv2_img.png -------------------------------------------------------------------------------- /tests/integration/prompt-state/conv2_text.txt: -------------------------------------------------------------------------------- 1 | here is 2 | a 3 | normal text 4 | file 5 | -------------------------------------------------------------------------------- /tests/integration/prompt-state/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | default_model: passthrough 2 | 3 | # Logging and Console Configuration: 4 | logger: 5 | level: "error" 6 | type: "file" 7 | 8 | # Switch the progress display on or off 9 | progress_display: true 10 | 11 | # Show chat User/Assistant messages on the console 12 | show_chat: true 13 | # Show tool calls on the console 14 | show_tools: true 15 | # Truncate long tool responses on the console 16 | truncate_tools: true 17 | # mcp: 18 | # servers: 19 | # roots_test: 20 | # command: "uv" 21 | # args: ["run", "root_test_server.py"] 22 | # roots: 23 | # # a root with an alias 24 | # - uri: "file://foo/bar" 25 | # name: "test_data" 26 | # server_uri_alias: "file:///mnt/data/" 27 | # - uri: "file://no/alias" 28 | # name: "no_alias" 29 | -------------------------------------------------------------------------------- /tests/integration/resources/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: passthrough 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | 31 | # MCP Servers 32 | mcp: 33 | servers: 34 | resource_server_one: 35 | command: "prompt-server" 36 | args: ["prompt1.txt"] 37 | resource_server_two: 38 | command: "prompt-server" 39 | args: ["prompt2.txt"] 40 | -------------------------------------------------------------------------------- /tests/integration/resources/prompt1.txt: -------------------------------------------------------------------------------- 1 | ---USER 2 | By attaching resources, the prompt-server exposes resources! 3 | ---RESOURCE 4 | r1file1.txt 5 | ---RESOURCE 6 | r1file2.txt 7 | -------------------------------------------------------------------------------- /tests/integration/resources/prompt2.txt: -------------------------------------------------------------------------------- 1 | ---USER 2 | By attaching resources, the prompt-server exposes resources! 3 | ---RESOURCE 4 | r2file1.txt 5 | ---RESOURCE 6 | r2file2.txt 7 | -------------------------------------------------------------------------------- /tests/integration/resources/r1file1.txt: -------------------------------------------------------------------------------- 1 | test 1 -------------------------------------------------------------------------------- /tests/integration/resources/r1file2.txt: -------------------------------------------------------------------------------- 1 | test 2 -------------------------------------------------------------------------------- /tests/integration/resources/r2file1.txt: -------------------------------------------------------------------------------- 1 | test 3 -------------------------------------------------------------------------------- /tests/integration/resources/r2file2.txt: -------------------------------------------------------------------------------- 1 | test 4 -------------------------------------------------------------------------------- /tests/integration/roots/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | default_model: passthrough 2 | 3 | # Logging and Console Configuration: 4 | logger: 5 | level: "error" 6 | type: "file" 7 | # path: "/path/to/logfile.jsonl" 8 | 9 | # Switch the progress display on or off 10 | progress_display: true 11 | 12 | # Show chat User/Assistant messages on the console 13 | show_chat: true 14 | # Show tool calls on the console 15 | show_tools: true 16 | # Truncate long tool responses on the console 17 | truncate_tools: true 18 | 19 | mcp: 20 | servers: 21 | roots_test: 22 | command: "uv" 23 | args: ["run", "root_test_server.py"] 24 | roots: 25 | # a root with an alias 26 | - uri: "file://foo/bar" 27 | name: "test_data" 28 | server_uri_alias: "file:///mnt/data/" 29 | - uri: "file://no/alias" 30 | name: "no_alias" 31 | -------------------------------------------------------------------------------- /tests/integration/roots/fastagent.jsonl: -------------------------------------------------------------------------------- 1 | {"level":"ERROR","timestamp":"2025-03-29T21:56:17.079227","namespace":"mcp_agent.mcp.mcp_connection_manager","message":"roots_test: Lifecycle task encountered an error: generator didn't stop after athrow()","data":{"exc_info":true,"data":{"progress_action":"Error","server_name":"roots_test"}}} 2 | -------------------------------------------------------------------------------- /tests/integration/roots/live.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application 6 | fast = FastAgent("FastAgent Example") 7 | 8 | 9 | # Define the agent 10 | @fast.agent(servers=["roots_test"]) 11 | async def main(): 12 | # use the --model command line switch or agent arguments to change model 13 | async with fast.run() as agent: 14 | await agent.send("***CALL_TOOL roots_test-show_roots {}") 15 | 16 | 17 | if __name__ == "__main__": 18 | asyncio.run(main()) 19 | -------------------------------------------------------------------------------- /tests/integration/roots/root_client.py: -------------------------------------------------------------------------------- 1 | import anyio 2 | from mcp.client.session import ClientSession 3 | from mcp.client.stdio import StdioServerParameters, stdio_client 4 | from mcp.types import ListRootsResult, Root 5 | from pydantic import AnyUrl 6 | 7 | 8 | async def list_roots_callback(context): 9 | # Return some example roots - change these to any paths you want to expose 10 | return ListRootsResult( 11 | roots=[ 12 | Root( 13 | uri=AnyUrl("file://foo/bar"), 14 | name="Home Directory", 15 | ), 16 | Root( 17 | uri=AnyUrl("file:///tmp"), 18 | name="Temp Directory", 19 | ), 20 | ] 21 | ) 22 | 23 | 24 | async def main(): 25 | # Start the server as a subprocess 26 | server_params = StdioServerParameters( 27 | command="uv", 28 | args=["run", "root_test_server.py"], 29 | ) 30 | 31 | # Connect to the server via stdio 32 | async with stdio_client(server_params) as (read_stream, write_stream): 33 | # Create a client session 34 | async with ClientSession(read_stream, write_stream, list_roots_callback=list_roots_callback) as session: 35 | # Initialize the session 36 | await session.initialize() 37 | 38 | # Send initialized notification (required after initialize) 39 | # This is handled internally by initialize() in ClientSession 40 | 41 | # Call list_roots to get the roots from the server 42 | try: 43 | roots_result = await session.call_tool("show_roots", {}) 44 | print(f"Received roots: {roots_result}") 45 | 46 | # Print each root for clarity 47 | # for root in roots_result.roots: 48 | # print(f"Root: {root.uri}, Name: {root.name or 'unnamed'}") 49 | except Exception as e: 50 | print(f"Error listing roots: {e}") 51 | 52 | 53 | # Run the async main function 54 | if __name__ == "__main__": 55 | anyio.run(main) 56 | -------------------------------------------------------------------------------- /tests/integration/roots/root_test_server.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import Context, FastMCP 2 | 3 | mcp = FastMCP("MCP Root Tester", log_level="DEBUG") 4 | 5 | 6 | @mcp.tool() 7 | async def show_roots(ctx: Context) -> str: 8 | return await ctx.session.list_roots() 9 | 10 | 11 | if __name__ == "__main__": 12 | mcp.run() 13 | -------------------------------------------------------------------------------- /tests/integration/roots/test_roots.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.mark.integration 5 | @pytest.mark.asyncio 6 | async def test_roots_returned(fast_agent): 7 | """Test that the agent can process a simple prompt using directory-specific config.""" 8 | # Use the FastAgent instance from the test directory fixture 9 | fast = fast_agent 10 | 11 | # Define the agent 12 | @fast.agent(name="foo", instruction="bar", servers=["roots_test"]) 13 | async def agent_function(): 14 | async with fast.run() as agent: 15 | result = await agent.foo.send("***CALL_TOOL roots_test-show_roots {}") 16 | assert "file:///mnt/data/" in result # alias 17 | assert "test_data" in result 18 | assert "file://no/alias" in result # no alias. 19 | 20 | await agent_function() 21 | -------------------------------------------------------------------------------- /tests/integration/sampling/fastagent.config.auto_sampling_off.yaml: -------------------------------------------------------------------------------- 1 | default_model: passthrough 2 | auto_sampling: false # Disable auto-sampling 3 | 4 | # Logging and Console Configuration: 5 | logger: 6 | level: "error" 7 | type: "file" 8 | 9 | # Switch the progress display on or off 10 | progress_display: true 11 | 12 | # Show chat User/Assistant messages on the console 13 | show_chat: true 14 | # Show tool calls on the console 15 | show_tools: true 16 | # Truncate long tool responses on the console 17 | truncate_tools: true 18 | 19 | mcp: 20 | servers: 21 | sampling_test: 22 | command: "uv" 23 | args: ["run", "sampling_test_server.py"] 24 | # No explicit sampling configuration - should fail with auto_sampling=false -------------------------------------------------------------------------------- /tests/integration/sampling/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | default_model: passthrough 2 | 3 | # Logging and Console Configuration: 4 | logger: 5 | level: "error" 6 | type: "file" 7 | # path: "/path/to/logfile.jsonl" 8 | 9 | # Switch the progress display on or off 10 | progress_display: true 11 | 12 | # Show chat User/Assistant messages on the console 13 | show_chat: true 14 | # Show tool calls on the console 15 | show_tools: true 16 | # Truncate long tool responses on the console 17 | truncate_tools: true 18 | 19 | mcp: 20 | servers: 21 | sampling_test: 22 | command: "uv" 23 | args: ["run", "sampling_test_server.py"] 24 | sampling: 25 | model: "passthrough" 26 | slow_sampling: 27 | command: "uv" 28 | args: ["run", "sampling_test_server.py"] 29 | sampling: 30 | model: "slow" 31 | sampling_test_no_config: 32 | command: "uv" 33 | args: ["run", "sampling_test_server.py"] 34 | # No explicit sampling configuration - relies on auto_sampling 35 | -------------------------------------------------------------------------------- /tests/integration/sampling/live.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from mcp_agent.core.fastagent import FastAgent 4 | 5 | # Create the application with specified model 6 | fast = FastAgent("FastAgent Example") 7 | 8 | 9 | # Define the agent 10 | @fast.agent(servers=["sampling_test", "slow_sampling"]) 11 | async def main(): 12 | # use the --model command line switch or agent arguments to change model 13 | async with fast.run() as agent: 14 | result = await agent.send('***CALL_TOOL sampling_test-sample {"to_sample": "123foo"}') 15 | print(f"RESULT: {result}") 16 | 17 | result = await agent.send('***CALL_TOOL slow_sampling-sample_parallel') 18 | print(f"RESULT: {result}") 19 | 20 | 21 | if __name__ == "__main__": 22 | asyncio.run(main()) 23 | -------------------------------------------------------------------------------- /tests/integration/workflow/chain/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: playback 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | -------------------------------------------------------------------------------- /tests/integration/workflow/chain/test_chain_passthrough.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.mark.integration 5 | @pytest.mark.asyncio 6 | async def test_chain_passthrough(fast_agent): # CHAIN OF 3 BASIC AGENTS 7 | fast = fast_agent 8 | 9 | @fast.agent( 10 | "url_fetcher", 11 | instruction="Look at the articles on the page of the given url and summarize each of the articles.", 12 | model="passthrough", 13 | ) 14 | @fast.agent( 15 | "summary_writer", 16 | instruction=""" 17 | Write the given text to a file named summary.txt, and output which article topic is the most relevant to college students. 18 | """, 19 | model="passthrough", 20 | ) 21 | @fast.agent( 22 | "google_sheets_writer", 23 | instruction=""" 24 | Based on the given text, write some key points to research on the topic to a new google spreadsheet with a title "Research on ". 25 | """, 26 | model="passthrough", 27 | ) 28 | @fast.chain( 29 | name="topic_writer", 30 | sequence=["url_fetcher", "summary_writer", "google_sheets_writer"], 31 | cumulative=False, 32 | ) 33 | @fast.chain( 34 | name="topic_writer_cumulative", 35 | sequence=["url_fetcher", "summary_writer", "google_sheets_writer"], 36 | cumulative=True, 37 | ) 38 | async def chain_workflow(): # Renamed from main to avoid conflicts, and wrapped inside the test 39 | async with fast.run() as agent: 40 | input_url = "https://www.nytimes.com" 41 | result = await agent.topic_writer.send(input_url) 42 | assert result == input_url 43 | 44 | result = await agent.topic_writer_cumulative.send("X") 45 | # we expect the result to include tagged responses from all agents. 46 | assert "X\nX\nX\nX" in result 47 | 48 | await chain_workflow() # Call the inner function 49 | -------------------------------------------------------------------------------- /tests/integration/workflow/evaluator_optimizer/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | mcp: 2 | name: evaluator_optimizer_tests 3 | -------------------------------------------------------------------------------- /tests/integration/workflow/mixed/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: passthrough 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | -------------------------------------------------------------------------------- /tests/integration/workflow/orchestrator/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: passthrough 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | 31 | # MCP Servers 32 | mcp: 33 | servers: 34 | prompts: 35 | command: "prompt-server" 36 | args: ["sizing.md", "resource.md"] 37 | hfspace: 38 | command: "npx" 39 | args: ["@llmindset/mcp-hfspace"] 40 | -------------------------------------------------------------------------------- /tests/integration/workflow/parallel/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: passthrough 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | -------------------------------------------------------------------------------- /tests/integration/workflow/parallel/test_parallel_agent.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.mark.integration 5 | @pytest.mark.asyncio 6 | async def test_parallel_run(fast_agent): 7 | """Single user message.""" 8 | # Use the FastAgent instance from the test directory fixture 9 | fast = fast_agent 10 | 11 | # Define the agent 12 | @fast.agent(name="fan_out_1") 13 | @fast.agent(name="fan_out_2") 14 | @fast.agent(name="fan_in") 15 | @fast.parallel(name="parallel", fan_out=["fan_out_1", "fan_out_2"], fan_in="fan_in") 16 | async def agent_function(): 17 | async with fast.run() as agent: 18 | expected: str = """The following request was sent to the agents: 19 | 20 | 21 | foo 22 | 23 | 24 | 25 | foo 26 | 27 | 28 | 29 | foo 30 | """ 31 | assert expected == await agent.parallel.send("foo") 32 | 33 | await agent_function() 34 | 35 | 36 | @pytest.mark.integration 37 | @pytest.mark.asyncio 38 | async def test_parallel_default_fan_in(fast_agent): 39 | """Single user message.""" 40 | # Use the FastAgent instance from the test directory fixture 41 | fast = fast_agent 42 | 43 | # Define the agent 44 | @fast.agent(name="fan_out_1") 45 | @fast.agent(name="fan_out_2") 46 | @fast.parallel(name="parallel", fan_out=["fan_out_1", "fan_out_2"]) 47 | async def agent_function(): 48 | async with fast.run() as agent: 49 | expected: str = """The following request was sent to the agents: 50 | 51 | 52 | foo 53 | 54 | 55 | 56 | foo 57 | 58 | 59 | 60 | foo 61 | """ 62 | # in this case the behaviour is the same as the previous test - but the fan-in passthrough was created automatically 63 | assert expected == await agent.parallel.send("foo") 64 | 65 | await agent_function() 66 | -------------------------------------------------------------------------------- /tests/integration/workflow/router/fastagent.config.yaml: -------------------------------------------------------------------------------- 1 | # FastAgent Configuration File 2 | 3 | # Default Model Configuration: 4 | # 5 | # Takes format: 6 | # .. (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low) 7 | # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3 8 | # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini 9 | # 10 | # If not specified, defaults to "haiku". 11 | # Can be overriden with a command line switch --model=, or within the Agent constructor. 12 | 13 | default_model: passthrough 14 | 15 | # Logging and Console Configuration: 16 | logger: 17 | # level: "debug" | "info" | "warning" | "error" 18 | # type: "none" | "console" | "file" | "http" 19 | # path: "/path/to/logfile.jsonl" 20 | 21 | # Switch the progress display on or off 22 | progress_display: true 23 | 24 | # Show chat User/Assistant messages on the console 25 | show_chat: true 26 | # Show tool calls on the console 27 | show_tools: true 28 | # Truncate long tool responses on the console 29 | truncate_tools: true 30 | -------------------------------------------------------------------------------- /tests/integration/workflow/router/router_script.txt: -------------------------------------------------------------------------------- 1 | ---USER 2 | routing 1 3 | ---ASSISTANT 4 | { 5 | "agent": "target1", 6 | "confidence": "high", 7 | "reasoning": "Request is asking for weather information" 8 | } 9 | 10 | ---USER 11 | routing 2 12 | ---ASSISTANT 13 | { 14 | "agent": "target2", 15 | "confidence": "high", 16 | "reasoning": "Request is asking for weather information" 17 | } 18 | -------------------------------------------------------------------------------- /tests/unit/mcp_agent/agents/test_agent_types.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for agent types and their interactions with the interactive prompt. 3 | """ 4 | 5 | from mcp_agent.agents.agent import Agent 6 | from mcp_agent.core.agent_types import AgentConfig, AgentType 7 | 8 | 9 | def test_agent_type_default(): 10 | """Test that agent_type defaults to AgentType.BASIC.value""" 11 | agent = Agent(config=AgentConfig(name="test_agent")) 12 | assert agent.agent_type == AgentType.BASIC 13 | -------------------------------------------------------------------------------- /tests/unit/mcp_agent/llm/providers/test_sampling_converter_anthropic.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for AnthropicMCPTypeConverter. 3 | """ 4 | 5 | 6 | class TestAnthropicMCPTypeConverter: 7 | def test_stop_reason_conversions(self): 8 | """Test various stop reason conversions.""" 9 | from mcp_agent.llm.providers.sampling_converter_anthropic import ( 10 | anthropic_stop_reason_to_mcp_stop_reason, 11 | mcp_stop_reason_to_anthropic_stop_reason, 12 | ) 13 | 14 | # Test MCP to Anthropic conversions 15 | assert mcp_stop_reason_to_anthropic_stop_reason("endTurn") == "end_turn" 16 | assert mcp_stop_reason_to_anthropic_stop_reason("maxTokens") == "max_tokens" 17 | assert mcp_stop_reason_to_anthropic_stop_reason("stopSequence") == "stop_sequence" 18 | assert mcp_stop_reason_to_anthropic_stop_reason("toolUse") == "tool_use" 19 | assert mcp_stop_reason_to_anthropic_stop_reason("unknown") == "unknown" 20 | 21 | # Test Anthropic to MCP conversions 22 | assert anthropic_stop_reason_to_mcp_stop_reason("end_turn") == "endTurn" 23 | assert anthropic_stop_reason_to_mcp_stop_reason("max_tokens") == "maxTokens" 24 | assert anthropic_stop_reason_to_mcp_stop_reason("stop_sequence") == "stopSequence" 25 | assert anthropic_stop_reason_to_mcp_stop_reason("tool_use") == "toolUse" 26 | assert anthropic_stop_reason_to_mcp_stop_reason("unknown") == "unknown" 27 | -------------------------------------------------------------------------------- /tests/unit/mcp_agent/llm/providers/test_sampling_converter_openai.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for OpenAIMCPTypeConverter. 3 | """ 4 | 5 | from mcp.types import ( 6 | PromptMessage, 7 | TextContent, 8 | ) 9 | 10 | from mcp_agent.llm.providers import OpenAISamplingConverter 11 | 12 | 13 | class TestOpenAIMCPTypeConverter: 14 | def test_from_mcp_prompt_message_user(self): 15 | """Test converting a user PromptMessage to OpenAI ChatCompletionMessageParam.""" 16 | # Create a user PromptMessage 17 | prompt_message = PromptMessage( 18 | role="user", 19 | content=TextContent(type="text", text="Please explain this concept."), 20 | ) 21 | 22 | # Convert to OpenAI ChatCompletionMessageParam 23 | openai_param = OpenAISamplingConverter.from_prompt_message(prompt_message) 24 | 25 | # Verify the conversion 26 | assert isinstance(openai_param, dict) # ChatCompletionMessageParam is a TypedDict 27 | assert openai_param["role"] == "user" 28 | assert "Please explain this concept." == openai_param["content"] 29 | 30 | def test_from_mcp_prompt_message_assistant(self): 31 | """Test converting an assistant PromptMessage to OpenAI ChatCompletionMessageParam.""" 32 | # Create an assistant PromptMessage 33 | prompt_message = PromptMessage( 34 | role="assistant", 35 | content=TextContent(type="text", text="Here's the explanation..."), 36 | ) 37 | 38 | # Convert to OpenAI ChatCompletionMessageParam 39 | openai_param = OpenAISamplingConverter.from_prompt_message(prompt_message) 40 | 41 | # Verify the conversion 42 | assert isinstance(openai_param, dict) # ChatCompletionMessageParam is a TypedDict 43 | assert openai_param["role"] == "assistant" 44 | assert openai_param["content"] == "Here's the explanation..." 45 | -------------------------------------------------------------------------------- /tests/unit/mcp_agent/mcp/test_mime_utils.py: -------------------------------------------------------------------------------- 1 | from mcp_agent.mcp import mime_utils 2 | 3 | 4 | class TestMimeUtils: 5 | def test_guess_mime_type(self): 6 | """Test guessing MIME types from file extensions.""" 7 | assert mime_utils.guess_mime_type("file.txt") == "text/plain" 8 | assert mime_utils.guess_mime_type("file.py") == "text/x-python" 9 | assert mime_utils.guess_mime_type("file.js") in [ 10 | "application/javascript", 11 | "text/javascript", 12 | ] 13 | assert mime_utils.guess_mime_type("file.json") == "application/json" 14 | assert mime_utils.guess_mime_type("file.html") == "text/html" 15 | assert mime_utils.guess_mime_type("file.css") == "text/css" 16 | assert mime_utils.guess_mime_type("file.png") == "image/png" 17 | assert mime_utils.guess_mime_type("file.jpg") == "image/jpeg" 18 | assert mime_utils.guess_mime_type("file.jpeg") == "image/jpeg" 19 | 20 | # TODO: decide if this should default to text or not... 21 | assert mime_utils.guess_mime_type("file.unknown") == "application/octet-stream" 22 | -------------------------------------------------------------------------------- /tests/unit/mcp_agent/mcp/test_sampling.py: -------------------------------------------------------------------------------- 1 | from mcp.types import CreateMessageRequestParams, SamplingMessage, TextContent 2 | 3 | from mcp_agent.mcp.sampling import sampling_agent_config 4 | 5 | 6 | def test_build_sampling_agent_config_with_system_prompt(): 7 | """Test building AgentConfig with system prompt from params""" 8 | # Create params with system prompt 9 | params = CreateMessageRequestParams( 10 | maxTokens=1024, 11 | messages=[SamplingMessage(role="user", content=TextContent(type="text", text="Hello"))], 12 | systemPrompt="Custom system instruction", 13 | ) 14 | 15 | # Build config 16 | config = sampling_agent_config(params) 17 | 18 | # Verify instruction is set from systemPrompt 19 | assert config.name == "sampling_agent" 20 | assert config.instruction == "Custom system instruction" 21 | assert config.servers == [] 22 | 23 | 24 | def test_build_sampling_agent_config_default(): 25 | """Test building AgentConfig with default values""" 26 | # Build config with no params 27 | config = sampling_agent_config(None) 28 | 29 | # Verify default instruction 30 | assert config.name == "sampling_agent" 31 | assert config.instruction == "You are a helpful AI Agent." 32 | assert config.servers == [] 33 | 34 | 35 | def test_build_sampling_agent_config_empty_system_prompt(): 36 | """Test building AgentConfig with empty system prompt""" 37 | # Create params with empty system prompt 38 | params = CreateMessageRequestParams( 39 | maxTokens=512, 40 | messages=[SamplingMessage(role="user", content=TextContent(type="text", text="Hello"))], 41 | systemPrompt="", 42 | ) 43 | 44 | # Build config 45 | config = sampling_agent_config(params) 46 | 47 | # Verify instruction is the empty string as received in params.systemPrompt 48 | assert config.instruction == "" 49 | -------------------------------------------------------------------------------- /tests/unit/mcp_agent/mcp_agent/fixture/README.md: -------------------------------------------------------------------------------- 1 | # Test Fixtures 2 | 3 | This directory contains test fixtures used for verifying event processing and display functionality. 4 | 5 | ## Files 6 | 7 | - `mcp_basic_agent_20250131_205604.jsonl`: Log file containing events from a basic agent run, including "final response" events from both OpenAI and Anthropic endpoints 8 | - `expected_output.txt`: Expected formatted output when processing the log file through event_summary.py 9 | 10 | ## Updating Fixtures 11 | 12 | If you need to update these fixtures (e.g., when changing event processing logic), you can: 13 | 14 | 1. Run an example to generate a new log file: 15 | ```bash 16 | cd examples/mcp_basic_agent 17 | rm -f mcp-agent.jsonl # Start with a clean log file 18 | uv run python main.py "What is the timestamp in different timezones?" 19 | cp mcp-agent.jsonl ../../tests/fixture/mcp_basic_agent_20250131_205604.jsonl 20 | ``` 21 | 22 | 2. Use the utility method to update expected output: 23 | ```python 24 | from tests.test_event_progress import update_test_fixtures 25 | update_test_fixtures() 26 | ``` 27 | 28 | The test file will verify that event processing produces consistent output matching these fixtures. 29 | 30 | Note: Always start with a clean log file (`rm -f mcp-agent.jsonl`) before generating new fixtures, as the logger appends to existing files. --------------------------------------------------------------------------------