├── .github └── workflows │ ├── lambda-extension-build.yml │ └── sql-stack.yml ├── .gitignore ├── .markdownlint.json ├── .python-version ├── BUILD.md ├── LICENSE ├── Makefile ├── README.md ├── _config.yml ├── app.py ├── buildspec-router.yml ├── buildspec-templates ├── README.md ├── cdk │ ├── buildspec-sql-stack.yml │ └── buildspec-template.yml ├── examples │ └── typescript-lambda-example.yml ├── generate_buildspec.py └── lambda │ ├── buildspec-go-lambda.yml │ ├── buildspec-java-lambda.yml │ ├── buildspec-lambda-extension.yml │ ├── buildspec-python-lambda.yml │ ├── buildspec-rust-lambda.yml │ └── buildspec-typescript-lambda.yml ├── buildspec.yml ├── cdk.json ├── docs ├── .gitignore ├── README.md ├── blog │ ├── 2019-05-28-first-blog-post.md │ ├── 2019-05-29-long-blog-post.md │ ├── 2021-08-01-mdx-blog-post.mdx │ ├── 2021-08-26-welcome │ │ ├── docusaurus-plushie-banner.jpeg │ │ └── index.md │ ├── authors.yml │ └── tags.yml ├── docs │ ├── intro.md │ ├── introduction-to-ai-agents │ │ ├── _category_.json │ │ ├── ai-agent-frameworks.md │ │ ├── understanding-ai-agents.md │ │ └── understanding-llm.md │ ├── tutorial-basics │ │ ├── _category_.json │ │ ├── congratulations.md │ │ ├── create-a-tool.md │ │ └── create-an-agent.md │ └── tutorial-extras │ │ ├── _category_.json │ │ ├── build-rust-tool.md │ │ ├── build-typescript-tool.md │ │ └── img │ │ ├── docsVersionDropdown.png │ │ └── localeDropdown.png ├── docusaurus.config.ts ├── package.json ├── sidebars.ts ├── src │ ├── components │ │ └── HomepageFeatures │ │ │ ├── index.tsx │ │ │ └── styles.module.css │ ├── css │ │ └── custom.css │ └── pages │ │ ├── index.module.css │ │ ├── index.tsx │ │ └── markdown-page.md ├── static │ ├── .nojekyll │ └── img │ │ ├── AgentSchamticFlow.png │ │ ├── TodayIsPredictionHyperbolic.png │ │ ├── docusaurus-social-card.jpg │ │ ├── docusaurus.png │ │ ├── favicon.ico │ │ ├── logo.svg │ │ ├── scalability.svg │ │ ├── undraw_docusaurus_mountain.svg │ │ ├── undraw_docusaurus_react.svg │ │ └── undraw_docusaurus_tree.svg └── tsconfig.json ├── images ├── AI-Agents-Traces-Service-Map.png ├── Agent-AI-UI.png ├── GoogleMaps-agent-step-functions.svg ├── Human-Approval-UI.png ├── agent_stepfunctions_graph.svg └── agent_with_human_approval.svg ├── lambda ├── call_llm │ ├── CLAUDE.md │ ├── README.md │ ├── __init__.py │ ├── functions │ │ ├── __init__.py │ │ ├── anthropic_llm │ │ │ ├── __init__.py │ │ │ ├── claude_handler.py │ │ │ ├── claude_lambda.py │ │ │ ├── requirements.in │ │ │ ├── requirements.txt │ │ │ └── template.yaml │ │ ├── bedrock_llm │ │ │ ├── __init__.py │ │ │ ├── bedrock_handler.py │ │ │ ├── bedrock_lambda.py │ │ │ ├── nova_handler.py │ │ │ ├── nova_lambda.py │ │ │ ├── requirements.in │ │ │ ├── requirements.txt │ │ │ └── template.yaml │ │ ├── gemini_llm │ │ │ ├── __init__.py │ │ │ ├── gemini_handler.py │ │ │ ├── gemini_lambda.py │ │ │ ├── requirements.in │ │ │ ├── requirements.txt │ │ │ └── template.yaml │ │ └── openai_llm │ │ │ ├── __init__.py │ │ │ ├── deepseek_handler.py │ │ │ ├── deepseek_lambda.py │ │ │ ├── openai_handler.py │ │ │ ├── openai_lambda.py │ │ │ ├── requirements.in │ │ │ ├── requirements.txt │ │ │ ├── template.yaml │ │ │ ├── xai_handler.py │ │ │ └── xai_lambda.py │ ├── lambda_layer │ │ └── python │ │ │ ├── __init__.py │ │ │ ├── common │ │ │ ├── __init__.py │ │ │ ├── base_llm.py │ │ │ └── config.py │ │ │ ├── requirements.in │ │ │ └── requirements.txt │ ├── requirements.in │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── events │ │ └── multiple-places-weather-event.json │ │ ├── requirements-test.txt │ │ ├── test_bedrock_handler.py │ │ ├── test_claude_handler.py │ │ ├── test_deepseek_handler.py │ │ ├── test_gemini_handler.py │ │ ├── test_nova_handler.py │ │ ├── test_openai_handler.py │ │ └── test_xai_handler.py ├── cookiecutter │ ├── README.md │ └── tools │ │ ├── python │ │ ├── cookiecutter.json │ │ ├── local_extensions.py │ │ └── {{cookiecutter.tool_name}} │ │ │ ├── README.md │ │ │ ├── index.py │ │ │ ├── requirements.in │ │ │ ├── template.yaml │ │ │ └── tests │ │ │ ├── __init__.py │ │ │ ├── requirements-test.txt │ │ │ ├── test-event.json │ │ │ └── test_tool.py │ │ ├── rust │ │ ├── cookiecutter.json │ │ └── {{cookiecutter.tool_name}} │ │ │ ├── Cargo.toml │ │ │ ├── README.md │ │ │ ├── src │ │ │ ├── event_handler.rs │ │ │ └── main.rs │ │ │ ├── template.yaml │ │ │ └── tests │ │ │ └── test-event.json │ │ └── typescript │ │ ├── cookiecutter.json │ │ └── {{cookiecutter.tool_name}} │ │ ├── README.md │ │ ├── package.json │ │ ├── src │ │ ├── index.ts │ │ └── local-test.ts │ │ ├── template.yaml │ │ ├── tests │ │ ├── __init__.py │ │ └── test-event.json │ │ └── tsconfig.json ├── extensions │ └── long-content │ │ ├── .vscode │ │ └── settings.json │ │ ├── CLAUDE.md │ │ ├── Cargo.toml │ │ ├── Makefile │ │ ├── NEXT_STEPS.md │ │ ├── README.md │ │ ├── buildspec.yml │ │ ├── images │ │ ├── diagram1.png │ │ └── diagram2.png │ │ ├── opt │ │ ├── entrypoint │ │ └── wrapper │ │ ├── samconfig.toml │ │ ├── src │ │ ├── env.rs │ │ ├── lib.rs │ │ ├── main.rs │ │ ├── route.rs │ │ ├── sandbox.rs │ │ ├── stats.rs │ │ ├── transform.rs │ │ ├── transform_complex.rs │ │ └── transform_improved.rs │ │ ├── template-arm.yaml │ │ ├── template-x86.yaml │ │ └── tests │ │ └── test_function.py └── tools │ ├── EarthQuakeQuery │ ├── README.md │ ├── index.py │ ├── requirements.in │ ├── requirements.txt │ ├── template.yaml │ └── tests │ │ ├── __init__.py │ │ ├── requirements-test.txt │ │ ├── test-event.json │ │ └── test_tool.py │ ├── EarthQuakeQueryTS │ ├── README.md │ ├── package.json │ ├── src │ │ ├── index.ts │ │ └── local-test.ts │ ├── template.yaml │ ├── tests │ │ ├── __init__.py │ │ └── test-event.json │ └── tsconfig.json │ ├── MicrosoftGraphAPI │ ├── README.md │ ├── index.py │ ├── requirements.in │ ├── requirements.txt │ ├── template.yaml │ └── tests │ │ ├── __init__.py │ │ ├── requirements-test.txt │ │ ├── test-event.json │ │ └── test_tool.py │ ├── README.md │ ├── SemanticSearchRust │ ├── Cargo.toml │ ├── README.md │ ├── notebooks │ │ └── populate_database.ipynb │ ├── src │ │ ├── event_handler.rs │ │ └── main.rs │ ├── template.yaml │ └── tests │ │ └── test-event.json │ ├── WebScraperMemory │ ├── CLAUDE.md │ ├── Cargo.toml │ ├── README.md │ ├── src │ │ ├── event_handler.rs │ │ └── main.rs │ ├── template.yaml │ └── tests │ │ ├── get-extraction-script-test.json │ │ ├── save-extraction-script-test.json │ │ ├── save-site-schema-test.json │ │ └── test-event.json │ ├── books-recommender │ ├── package.json │ ├── src │ │ ├── index.ts │ │ └── local-test.ts │ ├── template.yaml │ ├── tests │ │ └── test-event.json │ └── tsconfig.json │ ├── cloudwatch-queries │ └── index.py │ ├── code-interpreter │ ├── README.md │ ├── index.py │ ├── requirements.in │ └── requirements.txt │ ├── db-interface │ ├── index.py │ ├── requirements.in │ ├── requirements.txt │ └── sample-data │ │ ├── player.csv │ │ └── salary.csv │ ├── google-maps │ ├── README.md │ ├── package.json │ ├── src │ │ ├── index.ts │ │ └── local-test.ts │ ├── template.yaml │ ├── tests │ │ └── test-event.json │ └── tsconfig.json │ ├── graphql-interface │ ├── README.md │ ├── __init__.py │ ├── index.py │ ├── requirements.in │ ├── requirements.txt │ ├── template.yaml │ └── tests │ │ ├── __init__.py │ │ ├── requirements-test.txt │ │ ├── test-query-event.json │ │ └── test_tool.py │ ├── image-analysis │ ├── index.py │ ├── requirements.in │ ├── requirements.txt │ ├── template.yaml │ └── tests │ │ └── test-event.json │ ├── local-agent │ ├── CLAUDE.md │ ├── Cargo.toml │ ├── README.md │ ├── daemon_config.json │ ├── examples │ │ ├── New_Document_Button.png │ │ ├── image_detection_examples.json │ │ ├── notepad_windows_example.json │ │ └── textedit_mac_example.json │ ├── requirements.in │ ├── requirements.txt │ ├── script_executor.py │ ├── src │ │ ├── lib.rs │ │ └── main.rs │ └── tests │ │ ├── integration_test.rs │ │ ├── test_pyautogui_integration.rs │ │ └── test_script_executor.py │ ├── rust-clustering │ ├── .gitignore │ ├── Cargo.toml │ ├── README.md │ ├── src │ │ ├── event_handler.rs │ │ └── main.rs │ ├── template.yaml │ └── tests │ │ └── test-event.json │ ├── stock-analyzer │ ├── README.md │ ├── events │ │ └── test_event.json │ ├── pom.xml │ ├── src │ │ ├── main │ │ │ └── java │ │ │ │ └── tools │ │ │ │ └── StockAnalyzerLambda.java │ │ └── test │ │ │ └── java │ │ │ └── tools │ │ │ ├── InvokeTest.java │ │ │ ├── TestContext.java │ │ │ └── TestLogger.java │ └── template.yaml │ ├── web-research │ ├── Makefile │ ├── README.md │ ├── events │ │ └── test_event.json │ ├── go.mod │ ├── go.sum │ ├── main.go │ ├── main_test.go │ └── template.yaml │ ├── web-scraper │ ├── CLAUDE.md │ ├── README.md │ ├── package.json │ ├── setup.sh │ ├── src │ │ ├── index.ts │ │ └── local-test.ts │ ├── template.yaml │ ├── tests │ │ ├── bbc-news-article.json │ │ ├── bbc-sports-news.json │ │ ├── navigation-example.json │ │ └── test-event.json │ └── tsconfig.json │ └── yfinance │ ├── index.py │ ├── requirements.in │ └── requirements.txt ├── pyproject.toml ├── requirements.in ├── requirements.txt ├── step-functions ├── agent-with-tools-flow-template.json └── supervisor-agent-flow-template.json ├── step_functions_agent ├── README.md ├── agent_docs_stack.py ├── agent_monitoring_stack.py ├── agent_ui_stack.py ├── ai_agent_construct_from_json.py ├── ai_supervisor_agent_construct_from_json.py ├── step_functions_analysis_agent_stack.py ├── step_functions_books_agent_stack.py ├── step_functions_cloudwatch_agent_stack.py ├── step_functions_clustering_agent_stack.py ├── step_functions_earthqueke_agent_stack.py ├── step_functions_financial_agent_stack.py ├── step_functions_googlemap_agent_stack.py ├── step_functions_graphql_agent_stack.py ├── step_functions_image_analysis_agent_stack.py ├── step_functions_research_agent_stack.py ├── step_functions_semantic_search_agent_stack.py ├── step_functions_sql_agent_stack.py ├── step_functions_supervisor_agent_stack.py ├── step_functions_test_agent_stack.py └── step_functions_web_scraper_agent_stack.py └── ui ├── Dockerfile ├── apprunner.yaml ├── call_agent.py ├── requirements.in └── requirements.txt /.github/workflows/lambda-extension-build.yml: -------------------------------------------------------------------------------- 1 | name: Lambda Extensions - Build Notification 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | paths: 7 | - 'lambda/extensions/long-content/**' 8 | pull_request: 9 | branches: [ main ] 10 | paths: 11 | - 'lambda/extensions/long-content/**' 12 | workflow_dispatch: 13 | 14 | jobs: 15 | notify_trigger: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v3 20 | 21 | - name: Notify About CodeBuild 22 | run: | 23 | echo "=== Lambda Extension Build Notification ===" 24 | echo "" 25 | echo "Changes detected in lambda/extensions/long-content" 26 | echo "" 27 | echo "IMPORTANT: This workflow only serves as a notification." 28 | echo "AWS CodeBuild is configured with a webhook to GitHub." 29 | echo "Buildspec.yml is located at the repository root." 30 | echo "" 31 | echo "CodeBuild project: arn:aws:codebuild:us-west-2:672915487120:project/step-functions-agent" 32 | echo "" 33 | echo "Build status can be monitored at:" 34 | echo "https://us-west-2.console.aws.amazon.com/codesuite/codebuild/projects/step-functions-agent/history" 35 | echo "" 36 | echo "After successful build, extensions will be available at:" 37 | echo "s3://step-functions-agent-artifacts-{region}-{account-id}/lambda-layers/extension-arm.zip" 38 | echo "s3://step-functions-agent-artifacts-{region}-{account-id}/lambda-layers/extension-x86.zip" -------------------------------------------------------------------------------- /.github/workflows/sql-stack.yml: -------------------------------------------------------------------------------- 1 | name: Build SQLAgentStack 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | paths: 7 | - 'step_functions_agent/step_functions_sql_agent_stack.py' 8 | - 'app.py' 9 | pull_request: 10 | branches: [ main ] 11 | paths: 12 | - 'step_functions_agent/step_functions_sql_agent_stack.py' 13 | - 'app.py' 14 | workflow_dispatch: 15 | 16 | jobs: 17 | build: 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v3 21 | 22 | - name: Configure AWS credentials 23 | uses: aws-actions/configure-aws-credentials@v1 24 | with: 25 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 26 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 27 | aws-region: ${{ secrets.AWS_REGION }} 28 | 29 | - name: Copy buildspec to root 30 | run: | 31 | cp buildspec-templates/cdk/buildspec-sql-stack.yml buildspec.yml 32 | 33 | - name: Build with AWS CodeBuild 34 | uses: aws-actions/aws-codebuild-run-build@v1 35 | with: 36 | project-name: SQLAgentStack 37 | buildspec-override: buildspec.yml -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python-generated files 2 | __pycache__/ 3 | *.py[oc] 4 | build/ 5 | dist/ 6 | wheels/ 7 | *.egg-info 8 | 9 | # Virtual environments 10 | .venv 11 | 12 | # UV pip install outputs 13 | .uv/ 14 | .pytest_cache/ 15 | **/.uv/ 16 | **/__pycache__/ 17 | 18 | # CDK output 19 | cdk.out/ 20 | **/cdk.out/ 21 | .DS_Store 22 | .env 23 | lambda/tools/google-maps/node_modules 24 | lambda/tools/stock-analyzer/target 25 | lambda/tools/web-research/.aws-sam 26 | uv.lock 27 | .aws-sam 28 | lambda/tools/books-recommender/node_modules 29 | lambda/tools/books-recommender/package-lock.json 30 | lambda/tools/google-maps/package-lock.json 31 | lambda/tools/web-scraper/layers/chromium/nodejs 32 | lambda/tools/web-scraper/layers/chromium/chromium.zip 33 | lambda/tools/web-scraper/node_modules 34 | lambda/tools/web-scraper/package-lock.json 35 | lambda/tools/web-scraper/samconfig.toml 36 | docs/package-lock.json 37 | lambda/tools/EarthQuakeQueryTS/node_modules 38 | lambda/tools/EarthQuakeQueryTS/package-lock.json 39 | lambda/tools/SemanticSearchRust/target 40 | lambda/tools/SemanticSearchRust/Cargo.lock 41 | lambda/tools/rust-clustering/Cargo.lock 42 | lambda/tools/WebScraperMemory/target 43 | lambda/extensions/long-content/extension-x86/extensions 44 | lambda/extensions/long-content/extension-arm/extensions 45 | lambda/extensions/long-content/target 46 | lambda/extensions/long-content/extension-arm.zip 47 | lambda/extensions/long-content/extension-x86.zip 48 | lambda/extensions/long-content/Cargo.lock 49 | lambda/tools/local-agent/Cargo.lock 50 | lambda/tools/local-agent/.vscode 51 | lambda/tools/WebScraperMemory/Cargo.lock 52 | -------------------------------------------------------------------------------- /.markdownlint.json: -------------------------------------------------------------------------------- 1 | { 2 | "MD033": { 3 | "allowed_elements": [ 4 | "br" 5 | ] 6 | }, 7 | "MD013": { 8 | "line_length": 1000 9 | } 10 | } -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.11 2 | -------------------------------------------------------------------------------- /BUILD.md: -------------------------------------------------------------------------------- 1 | # Build Process 2 | 3 | This document describes the build process for the Step Functions Agent project. 4 | 5 | ## Overview 6 | 7 | The project uses AWS CodeBuild to build various components: 8 | - CDK stacks that generate CloudFormation templates 9 | - Lambda functions written in different languages (Python, TypeScript, Rust, Go, Java) 10 | - Lambda extensions 11 | 12 | All builds use a single `buildspec.yml` file in the root of the repository, which serves as a router to the appropriate build process based on the `BUILD_TYPE` environment variable. 13 | 14 | ## Setting Up a New Build 15 | 16 | 1. Create a new CodeBuild project in AWS 17 | 2. Configure the source repository 18 | 3. Set the `BUILD_TYPE` environment variable to one of the supported build types 19 | 4. Use the default buildspec path (buildspec.yml in the root directory) 20 | 5. Configure other build settings as needed (compute, service role, etc.) 21 | 22 | ## Supported Build Types 23 | 24 | The router supports the following build types through the `BUILD_TYPE` environment variable: 25 | 26 | ### CDK Stacks 27 | 28 | - `sql-stack`: Builds the SQL Agent CDK stack into CloudFormation templates 29 | 30 | ### Lambda Functions 31 | 32 | - `lambda-extension`: Builds the Lambda extension written in Rust 33 | - `web-scraper`: Builds the Web Scraper Lambda function written in TypeScript 34 | - `db-interface`: Builds the DB Interface Lambda function written in Python 35 | - `web-scraper-memory`: Builds the WebScraperMemory Lambda function written in Rust 36 | 37 | ## Adding New Build Types 38 | 39 | To add support for a new component: 40 | 41 | 1. Identify the component's type and build requirements 42 | 2. In `buildspec.yml`, add a new condition to each phase (install, build, post_build) to handle the new component 43 | 3. Update the artifacts section to include any new artifacts 44 | 45 | Example for a new Go Lambda function: 46 | ```yaml 47 | # In the build phase 48 | elif [ "$BUILD_TYPE" == "new-go-function" ]; then 49 | echo "Building New Go Lambda..." 50 | cd lambda/tools/new-go-function 51 | go mod download 52 | GOOS=linux GOARCH=arm64 go build -tags lambda.norpc -o bootstrap main.go 53 | cd ../../.. 54 | 55 | # In the post_build phase 56 | elif [ "$BUILD_TYPE" == "new-go-function" ]; then 57 | aws s3api put-object --bucket $S3_BUCKET --key lambda/new-go-function/ --content-length 0 || true 58 | cd lambda/tools/new-go-function 59 | zip -r lambda.zip bootstrap 60 | aws s3 cp lambda.zip s3://${S3_BUCKET}/lambda/new-go-function/lambda.zip 61 | cd ../../.. 62 | echo "Lambda code available at s3://$S3_BUCKET/lambda/new-go-function/" 63 | ``` 64 | 65 | ## Deployment 66 | 67 | All build artifacts are uploaded to an S3 bucket with the pattern: 68 | ``` 69 | step-functions-agent-artifacts-${AWS_REGION}-${AWS_ACCOUNT_ID} 70 | ``` 71 | 72 | The artifacts are organized in the bucket as follows: 73 | - `/cloudformation/{stack-name}/` - CloudFormation templates 74 | - `/lambda-layers/` - Lambda extensions/layers 75 | - `/lambda/{function-name}/` - Lambda function packages 76 | 77 | ## Troubleshooting 78 | 79 | If the build fails, check the following: 80 | 81 | 1. Verify the `BUILD_TYPE` is set correctly 82 | 2. Check that all necessary permissions are granted to the CodeBuild service role 83 | 3. Ensure the required directories and files exist in the source code 84 | 4. Review the build logs for specific error messages -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Guy Ernest 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | remote_theme: pages-themes/modernist@v0.2.0 2 | plugins: 3 | - jekyll-remote-theme 4 | title: [Enterprise-Ready AI Agents Framework] 5 | description: ["Build, Deploy, and Manage AI Agents at Scale using AWS Lambda and Step Functions"] 6 | -------------------------------------------------------------------------------- /buildspec-router.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | # This is a router buildspec that can dynamically run different build processes 4 | # based on environment variables set in the CodeBuild project. 5 | # 6 | # Example usage: 7 | # - Set BUILD_TYPE=cdk and STACK_NAME=SQLAgentStack in the CodeBuild project to build CDK stack 8 | # - Set BUILD_TYPE=lambda, LAMBDA_LANGUAGE=python, and LAMBDA_DIR=lambda/tools/db-interface to build a Lambda function 9 | 10 | phases: 11 | install: 12 | runtime-versions: 13 | python: 3.9 14 | commands: 15 | # Determine which buildspec to use based on environment variables 16 | - | 17 | if [ "$BUILD_TYPE" = "cdk" ]; then 18 | echo "Running CDK stack build for $STACK_NAME" 19 | cp buildspec-templates/cdk/buildspec-template.yml buildspec-temp.yml 20 | # Replace placeholder values 21 | sed -i "s/STACK_NAME/$STACK_NAME/g" buildspec-temp.yml 22 | sed -i "s/STACK_NAME_LOWERCASE/${STACK_NAME,,}/g" buildspec-temp.yml 23 | elif [ "$BUILD_TYPE" = "lambda" ]; then 24 | echo "Running Lambda build for $LAMBDA_LANGUAGE function in $LAMBDA_DIR" 25 | cp buildspec-templates/lambda/buildspec-${LAMBDA_LANGUAGE}-lambda.yml buildspec-temp.yml 26 | # Replace placeholder values 27 | sed -i "s|LAMBDA_DIR|$LAMBDA_DIR|g" buildspec-temp.yml 28 | LAMBDA_NAME=$(basename "$LAMBDA_DIR") 29 | sed -i "s/LAMBDA_NAME/$LAMBDA_NAME/g" buildspec-temp.yml 30 | elif [ "$BUILD_TYPE" = "lambda-extension" ]; then 31 | echo "Running Lambda extension build" 32 | cp buildspec-templates/lambda/buildspec-lambda-extension.yml buildspec-temp.yml 33 | else 34 | echo "Error: Unknown BUILD_TYPE: $BUILD_TYPE" 35 | exit 1 36 | fi 37 | 38 | # Debug: Show the generated buildspec 39 | - cat buildspec-temp.yml 40 | 41 | # Execute the generated buildspec 42 | - buildspec-exec buildspec-temp.yml 43 | 44 | build: 45 | commands: 46 | - echo "This build phase doesn't run - execution is delegated to the generated buildspec" 47 | 48 | post_build: 49 | commands: 50 | - echo "This post_build phase doesn't run - execution is delegated to the generated buildspec" 51 | 52 | # Note: The artifacts will be collected by the executed buildspec -------------------------------------------------------------------------------- /buildspec-templates/cdk/buildspec-sql-stack.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | runtime-versions: 6 | python: 3.9 7 | commands: 8 | # Update system packages 9 | - yum update -y 10 | 11 | # Install required dependencies 12 | - pip install --upgrade pip 13 | - pip install -r requirements.txt 14 | 15 | build: 16 | commands: 17 | # Run CDK synth to generate CloudFormation template for SQLAgentStack only 18 | - python -m cdk synth SQLAgentStack --output cdk-templates 19 | 20 | # Print the generated template 21 | - ls -la cdk-templates 22 | 23 | post_build: 24 | commands: 25 | # Get AWS account ID and region for S3 bucket name 26 | - | 27 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 28 | AWS_REGION=$AWS_DEFAULT_REGION 29 | echo "Using AWS Region: $AWS_REGION" 30 | S3_BUCKET="step-functions-agent-artifacts-${AWS_REGION}-${AWS_ACCOUNT_ID}" 31 | echo "Using S3 bucket: $S3_BUCKET" 32 | export AWS_ACCOUNT_ID AWS_REGION S3_BUCKET 33 | 34 | # Check if bucket exists or create it 35 | - | 36 | echo "Checking if bucket $S3_BUCKET exists..." 37 | # Try to access the bucket and ignore errors 38 | if aws s3 ls "s3://$S3_BUCKET" >/dev/null 2>&1; then 39 | echo "Bucket $S3_BUCKET already exists." 40 | else 41 | echo "Creating bucket $S3_BUCKET..." 42 | # Ignore errors from bucket creation 43 | aws s3 mb "s3://$S3_BUCKET" || echo "Bucket may already exist, continuing..." 44 | fi 45 | # Make sure the cloudformation directory exists 46 | aws s3api put-object --bucket $S3_BUCKET --key cloudformation/ --content-length 0 || true 47 | 48 | # Upload the CloudFormation template to S3 49 | - | 50 | if [ -d "cdk-templates" ]; then 51 | echo "Uploading CloudFormation template..." 52 | aws s3 cp cdk-templates/ s3://${S3_BUCKET}/cloudformation/sql-agent/ --recursive 53 | else 54 | echo "CloudFormation template not found, synth may have failed" 55 | exit 1 56 | fi 57 | 58 | # Output the S3 URL for the CloudFormation template 59 | - | 60 | echo "Build complete!" 61 | echo "CloudFormation template available at s3://$S3_BUCKET/cloudformation/sql-agent/" 62 | 63 | artifacts: 64 | files: 65 | - cdk-templates/**/* 66 | discard-paths: no 67 | base-directory: '.' 68 | 69 | cache: 70 | paths: 71 | - '/root/.cache/pip/**/*' -------------------------------------------------------------------------------- /buildspec-templates/cdk/buildspec-template.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | # Template for creating buildspec files for CDK stacks 4 | # Replace STACK_NAME with the actual stack name 5 | 6 | phases: 7 | install: 8 | runtime-versions: 9 | python: 3.9 10 | commands: 11 | # Update system packages 12 | - yum update -y 13 | 14 | # Install required dependencies 15 | - pip install --upgrade pip 16 | - pip install -r requirements.txt 17 | 18 | build: 19 | commands: 20 | # Run CDK synth to generate CloudFormation template for the specific stack 21 | - python -m cdk synth STACK_NAME --output cdk-templates 22 | 23 | # Print the generated template 24 | - ls -la cdk-templates 25 | 26 | post_build: 27 | commands: 28 | # Get AWS account ID and region for S3 bucket name 29 | - | 30 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 31 | AWS_REGION=$AWS_DEFAULT_REGION 32 | echo "Using AWS Region: $AWS_REGION" 33 | S3_BUCKET="step-functions-agent-artifacts-${AWS_REGION}-${AWS_ACCOUNT_ID}" 34 | echo "Using S3 bucket: $S3_BUCKET" 35 | export AWS_ACCOUNT_ID AWS_REGION S3_BUCKET 36 | 37 | # Check if bucket exists or create it 38 | - | 39 | echo "Checking if bucket $S3_BUCKET exists..." 40 | # Try to access the bucket and ignore errors 41 | if aws s3 ls "s3://$S3_BUCKET" >/dev/null 2>&1; then 42 | echo "Bucket $S3_BUCKET already exists." 43 | else 44 | echo "Creating bucket $S3_BUCKET..." 45 | # Ignore errors from bucket creation 46 | aws s3 mb "s3://$S3_BUCKET" || echo "Bucket may already exist, continuing..." 47 | fi 48 | # Make sure the cloudformation directory exists 49 | aws s3api put-object --bucket $S3_BUCKET --key cloudformation/ --content-length 0 || true 50 | 51 | # Upload the CloudFormation template to S3 52 | - | 53 | if [ -d "cdk-templates" ]; then 54 | echo "Uploading CloudFormation template..." 55 | aws s3 cp cdk-templates/ s3://${S3_BUCKET}/cloudformation/STACK_NAME_LOWERCASE/ --recursive 56 | else 57 | echo "CloudFormation template not found, synth may have failed" 58 | exit 1 59 | fi 60 | 61 | # Output the S3 URL for the CloudFormation template 62 | - | 63 | echo "Build complete!" 64 | echo "CloudFormation template available at s3://$S3_BUCKET/cloudformation/STACK_NAME_LOWERCASE/" 65 | 66 | artifacts: 67 | files: 68 | - cdk-templates/**/* 69 | discard-paths: no 70 | base-directory: '.' 71 | 72 | cache: 73 | paths: 74 | - '/root/.cache/pip/**/*' -------------------------------------------------------------------------------- /buildspec-templates/examples/typescript-lambda-example.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | # Example buildspec for a TypeScript Lambda function 4 | # This shows how to use the template for a specific Lambda function 5 | 6 | phases: 7 | install: 8 | runtime-versions: 9 | nodejs: 18 10 | python: 3.9 11 | commands: 12 | # Update system packages 13 | - yum update -y 14 | 15 | # Install AWS SAM CLI 16 | - pip install aws-sam-cli 17 | 18 | # Navigate to the Lambda directory 19 | - cd lambda/tools/web-scraper 20 | 21 | # Install npm dependencies 22 | - npm install 23 | 24 | build: 25 | commands: 26 | # Navigate to the Lambda directory 27 | - cd lambda/tools/web-scraper 28 | 29 | # Build TypeScript 30 | - npm run build 31 | 32 | # Package with SAM (if there's a template.yaml) 33 | - sam package --output-template-file packaged.yaml --s3-bucket ${S3_BUCKET} 34 | 35 | post_build: 36 | commands: 37 | # Get AWS account ID and region for S3 bucket name 38 | - | 39 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 40 | AWS_REGION=$AWS_DEFAULT_REGION 41 | echo "Using AWS Region: $AWS_REGION" 42 | S3_BUCKET="step-functions-agent-artifacts-${AWS_REGION}-${AWS_ACCOUNT_ID}" 43 | echo "Using S3 bucket: $S3_BUCKET" 44 | export AWS_ACCOUNT_ID AWS_REGION S3_BUCKET 45 | 46 | # Check if bucket exists or create it 47 | - | 48 | echo "Checking if bucket $S3_BUCKET exists..." 49 | # Try to access the bucket and ignore errors 50 | if aws s3 ls "s3://$S3_BUCKET" >/dev/null 2>&1; then 51 | echo "Bucket $S3_BUCKET already exists." 52 | else 53 | echo "Creating bucket $S3_BUCKET..." 54 | # Ignore errors from bucket creation 55 | aws s3 mb "s3://$S3_BUCKET" || echo "Bucket may already exist, continuing..." 56 | fi 57 | 58 | # Make sure the lambda directory exists 59 | aws s3api put-object --bucket $S3_BUCKET --key lambda/web-scraper/ --content-length 0 || true 60 | 61 | # Upload the Lambda code to S3 62 | - | 63 | cd lambda/tools/web-scraper 64 | echo "Uploading Lambda code..." 65 | 66 | # Create a zip file of the dist directory and upload it to S3 67 | zip -r lambda.zip dist/ 68 | aws s3 cp lambda.zip s3://${S3_BUCKET}/lambda/web-scraper/lambda.zip 69 | 70 | # Upload packaged template if available 71 | if [ -f packaged.yaml ]; then 72 | aws s3 cp packaged.yaml s3://${S3_BUCKET}/lambda/web-scraper/packaged.yaml 73 | fi 74 | 75 | # Output the S3 URLs 76 | - | 77 | echo "Build complete!" 78 | echo "Lambda code available at s3://$S3_BUCKET/lambda/web-scraper/lambda.zip" 79 | 80 | artifacts: 81 | files: 82 | - lambda/tools/web-scraper/dist/**/* 83 | - lambda/tools/web-scraper/lambda.zip 84 | - lambda/tools/web-scraper/packaged.yaml 85 | discard-paths: no 86 | base-directory: '.' 87 | 88 | cache: 89 | paths: 90 | - 'lambda/tools/web-scraper/node_modules/**/*' -------------------------------------------------------------------------------- /buildspec-templates/lambda/buildspec-go-lambda.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | # Template for building a Go Lambda function 4 | # Replace LAMBDA_DIR with the actual directory path, e.g., lambda/tools/web-research 5 | 6 | phases: 7 | install: 8 | runtime-versions: 9 | golang: 1.21 10 | python: 3.9 11 | commands: 12 | # Update system packages 13 | - yum update -y 14 | 15 | # Install AWS SAM CLI 16 | - pip install aws-sam-cli 17 | 18 | # Navigate to the Lambda directory 19 | - cd LAMBDA_DIR 20 | 21 | # Download Go dependencies 22 | - go mod download 23 | 24 | build: 25 | commands: 26 | # Navigate to the Lambda directory 27 | - cd LAMBDA_DIR 28 | 29 | # Build Go Lambda function 30 | - GOOS=linux GOARCH=arm64 go build -tags lambda.norpc -o bootstrap main.go 31 | 32 | # Package with SAM (if there's a template.yaml) 33 | - | 34 | if [ -f template.yaml ]; then 35 | sam package --output-template-file packaged.yaml --s3-bucket ${S3_BUCKET} 36 | else 37 | # Create a deployment package if no SAM template 38 | zip -r lambda.zip bootstrap 39 | fi 40 | 41 | post_build: 42 | commands: 43 | # Get AWS account ID and region for S3 bucket name 44 | - | 45 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 46 | AWS_REGION=$AWS_DEFAULT_REGION 47 | echo "Using AWS Region: $AWS_REGION" 48 | S3_BUCKET="step-functions-agent-artifacts-${AWS_REGION}-${AWS_ACCOUNT_ID}" 49 | echo "Using S3 bucket: $S3_BUCKET" 50 | export AWS_ACCOUNT_ID AWS_REGION S3_BUCKET 51 | 52 | # Check if bucket exists or create it 53 | - | 54 | echo "Checking if bucket $S3_BUCKET exists..." 55 | # Try to access the bucket and ignore errors 56 | if aws s3 ls "s3://$S3_BUCKET" >/dev/null 2>&1; then 57 | echo "Bucket $S3_BUCKET already exists." 58 | else 59 | echo "Creating bucket $S3_BUCKET..." 60 | # Ignore errors from bucket creation 61 | aws s3 mb "s3://$S3_BUCKET" || echo "Bucket may already exist, continuing..." 62 | fi 63 | 64 | # Make sure the lambda directory exists 65 | aws s3api put-object --bucket $S3_BUCKET --key lambda/LAMBDA_NAME/ --content-length 0 || true 66 | 67 | # Upload the Lambda code to S3 68 | - | 69 | cd LAMBDA_DIR 70 | echo "Uploading Lambda code..." 71 | 72 | # Upload packaged template if available 73 | if [ -f packaged.yaml ]; then 74 | aws s3 cp packaged.yaml s3://${S3_BUCKET}/lambda/LAMBDA_NAME/packaged.yaml 75 | fi 76 | 77 | # Upload lambda.zip if it exists 78 | if [ -f lambda.zip ]; then 79 | aws s3 cp lambda.zip s3://${S3_BUCKET}/lambda/LAMBDA_NAME/lambda.zip 80 | fi 81 | 82 | # Output the S3 URLs 83 | - | 84 | echo "Build complete!" 85 | echo "Lambda artifacts available at s3://$S3_BUCKET/lambda/LAMBDA_NAME/" 86 | 87 | artifacts: 88 | files: 89 | - LAMBDA_DIR/bootstrap 90 | - LAMBDA_DIR/lambda.zip 91 | - LAMBDA_DIR/packaged.yaml 92 | discard-paths: no 93 | base-directory: '.' 94 | 95 | cache: 96 | paths: 97 | - '/root/.cache/go-build/**/*' 98 | - '/go/pkg/mod/**/*' -------------------------------------------------------------------------------- /buildspec-templates/lambda/buildspec-python-lambda.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | # Template for building a Python Lambda function 4 | # Replace LAMBDA_DIR with the actual directory path, e.g., lambda/tools/db-interface 5 | 6 | phases: 7 | install: 8 | runtime-versions: 9 | python: 3.9 10 | commands: 11 | # Update system packages 12 | - yum update -y 13 | 14 | # Install AWS SAM CLI 15 | - pip install aws-sam-cli 16 | 17 | # Navigate to the Lambda directory 18 | - cd LAMBDA_DIR 19 | 20 | # Install Python dependencies 21 | - pip install -r requirements.txt 22 | 23 | build: 24 | commands: 25 | # Navigate to the Lambda directory 26 | - cd LAMBDA_DIR 27 | 28 | # Package with SAM (if there's a template.yaml) 29 | - | 30 | if [ -f template.yaml ]; then 31 | sam package --output-template-file packaged.yaml --s3-bucket ${S3_BUCKET} 32 | else 33 | # Create a deployment package if no SAM template 34 | zip -r lambda.zip *.py 35 | fi 36 | 37 | post_build: 38 | commands: 39 | # Get AWS account ID and region for S3 bucket name 40 | - | 41 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 42 | AWS_REGION=$AWS_DEFAULT_REGION 43 | echo "Using AWS Region: $AWS_REGION" 44 | S3_BUCKET="step-functions-agent-artifacts-${AWS_REGION}-${AWS_ACCOUNT_ID}" 45 | echo "Using S3 bucket: $S3_BUCKET" 46 | export AWS_ACCOUNT_ID AWS_REGION S3_BUCKET 47 | 48 | # Check if bucket exists or create it 49 | - | 50 | echo "Checking if bucket $S3_BUCKET exists..." 51 | # Try to access the bucket and ignore errors 52 | if aws s3 ls "s3://$S3_BUCKET" >/dev/null 2>&1; then 53 | echo "Bucket $S3_BUCKET already exists." 54 | else 55 | echo "Creating bucket $S3_BUCKET..." 56 | # Ignore errors from bucket creation 57 | aws s3 mb "s3://$S3_BUCKET" || echo "Bucket may already exist, continuing..." 58 | fi 59 | 60 | # Make sure the lambda directory exists 61 | aws s3api put-object --bucket $S3_BUCKET --key lambda/LAMBDA_NAME/ --content-length 0 || true 62 | 63 | # Upload the Lambda code to S3 64 | - | 65 | cd LAMBDA_DIR 66 | echo "Uploading Lambda code..." 67 | 68 | # Upload packaged template if available 69 | if [ -f packaged.yaml ]; then 70 | aws s3 cp packaged.yaml s3://${S3_BUCKET}/lambda/LAMBDA_NAME/packaged.yaml 71 | fi 72 | 73 | # Upload lambda.zip if it exists 74 | if [ -f lambda.zip ]; then 75 | aws s3 cp lambda.zip s3://${S3_BUCKET}/lambda/LAMBDA_NAME/lambda.zip 76 | fi 77 | 78 | # Output the S3 URLs 79 | - | 80 | echo "Build complete!" 81 | echo "Lambda artifacts available at s3://$S3_BUCKET/lambda/LAMBDA_NAME/" 82 | 83 | artifacts: 84 | files: 85 | - LAMBDA_DIR/lambda.zip 86 | - LAMBDA_DIR/packaged.yaml 87 | discard-paths: no 88 | base-directory: '.' 89 | 90 | cache: 91 | paths: 92 | - '/root/.cache/pip/**/*' -------------------------------------------------------------------------------- /buildspec-templates/lambda/buildspec-typescript-lambda.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | # Template for building a TypeScript Lambda function 4 | # Replace LAMBDA_DIR with the actual directory path, e.g., lambda/tools/web-scraper 5 | 6 | phases: 7 | install: 8 | runtime-versions: 9 | nodejs: 18 10 | python: 3.9 11 | commands: 12 | # Update system packages 13 | - yum update -y 14 | 15 | # Install AWS SAM CLI 16 | - pip install aws-sam-cli 17 | 18 | # Navigate to the Lambda directory 19 | - cd LAMBDA_DIR 20 | 21 | # Install npm dependencies 22 | - npm install 23 | 24 | build: 25 | commands: 26 | # Build TypeScript 27 | - cd LAMBDA_DIR 28 | - npm run build 29 | 30 | # Package with SAM (if there's a template.yaml) 31 | - sam package --output-template-file packaged.yaml --s3-bucket ${S3_BUCKET} 32 | 33 | post_build: 34 | commands: 35 | # Get AWS account ID and region for S3 bucket name 36 | - | 37 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 38 | AWS_REGION=$AWS_DEFAULT_REGION 39 | echo "Using AWS Region: $AWS_REGION" 40 | S3_BUCKET="step-functions-agent-artifacts-${AWS_REGION}-${AWS_ACCOUNT_ID}" 41 | echo "Using S3 bucket: $S3_BUCKET" 42 | export AWS_ACCOUNT_ID AWS_REGION S3_BUCKET 43 | 44 | # Check if bucket exists or create it 45 | - | 46 | echo "Checking if bucket $S3_BUCKET exists..." 47 | # Try to access the bucket and ignore errors 48 | if aws s3 ls "s3://$S3_BUCKET" >/dev/null 2>&1; then 49 | echo "Bucket $S3_BUCKET already exists." 50 | else 51 | echo "Creating bucket $S3_BUCKET..." 52 | # Ignore errors from bucket creation 53 | aws s3 mb "s3://$S3_BUCKET" || echo "Bucket may already exist, continuing..." 54 | fi 55 | 56 | # Make sure the lambda directory exists 57 | aws s3api put-object --bucket $S3_BUCKET --key lambda/LAMBDA_NAME/ --content-length 0 || true 58 | 59 | # Upload the Lambda code to S3 60 | - | 61 | cd LAMBDA_DIR 62 | echo "Uploading Lambda code..." 63 | 64 | # Create a zip file of the dist directory and upload it to S3 65 | zip -r lambda.zip dist/ 66 | aws s3 cp lambda.zip s3://${S3_BUCKET}/lambda/LAMBDA_NAME/lambda.zip 67 | 68 | # Upload packaged template if available 69 | if [ -f packaged.yaml ]; then 70 | aws s3 cp packaged.yaml s3://${S3_BUCKET}/lambda/LAMBDA_NAME/packaged.yaml 71 | fi 72 | 73 | # Output the S3 URLs 74 | - | 75 | echo "Build complete!" 76 | echo "Lambda code available at s3://$S3_BUCKET/lambda/LAMBDA_NAME/lambda.zip" 77 | 78 | artifacts: 79 | files: 80 | - LAMBDA_DIR/dist/**/* 81 | - LAMBDA_DIR/lambda.zip 82 | - LAMBDA_DIR/packaged.yaml 83 | discard-paths: no 84 | base-directory: '.' 85 | 86 | cache: 87 | paths: 88 | - 'LAMBDA_DIR/node_modules/**/*' -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | /node_modules 3 | 4 | # Production 5 | /build 6 | 7 | # Generated files 8 | .docusaurus 9 | .cache-loader 10 | 11 | # Misc 12 | .DS_Store 13 | .env.local 14 | .env.development.local 15 | .env.test.local 16 | .env.production.local 17 | 18 | npm-debug.log* 19 | yarn-debug.log* 20 | yarn-error.log* 21 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Website 2 | 3 | This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. 4 | 5 | ### Installation 6 | 7 | ``` 8 | $ yarn 9 | ``` 10 | 11 | ### Local Development 12 | 13 | ``` 14 | $ yarn start 15 | ``` 16 | 17 | This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. 18 | 19 | ### Build 20 | 21 | ``` 22 | $ yarn build 23 | ``` 24 | 25 | This command generates static content into the `build` directory and can be served using any static contents hosting service. 26 | 27 | ### Deployment 28 | 29 | Using SSH: 30 | 31 | ``` 32 | $ USE_SSH=true yarn deploy 33 | ``` 34 | 35 | Not using SSH: 36 | 37 | ``` 38 | $ GIT_USER= yarn deploy 39 | ``` 40 | 41 | If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. 42 | -------------------------------------------------------------------------------- /docs/blog/2019-05-28-first-blog-post.md: -------------------------------------------------------------------------------- 1 | --- 2 | slug: first-blog-post 3 | title: First Blog Post 4 | authors: [slorber, yangshun] 5 | tags: [hola, docusaurus] 6 | --- 7 | 8 | Lorem ipsum dolor sit amet... 9 | 10 | 11 | 12 | ...consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 13 | -------------------------------------------------------------------------------- /docs/blog/2019-05-29-long-blog-post.md: -------------------------------------------------------------------------------- 1 | --- 2 | slug: long-blog-post 3 | title: Long Blog Post 4 | authors: yangshun 5 | tags: [hello, docusaurus] 6 | --- 7 | 8 | This is the summary of a very long blog post, 9 | 10 | Use a `` comment to limit blog post size in the list view. 11 | 12 | 13 | 14 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 15 | 16 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 17 | 18 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 19 | 20 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 21 | 22 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 23 | 24 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 25 | 26 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 27 | 28 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 29 | 30 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 31 | 32 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 33 | 34 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 35 | 36 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 37 | 38 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 39 | 40 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 41 | 42 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 43 | 44 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet 45 | -------------------------------------------------------------------------------- /docs/blog/2021-08-01-mdx-blog-post.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | slug: mdx-blog-post 3 | title: MDX Blog Post 4 | authors: [slorber] 5 | tags: [docusaurus] 6 | --- 7 | 8 | Blog posts support [Docusaurus Markdown features](https://docusaurus.io/docs/markdown-features), such as [MDX](https://mdxjs.com/). 9 | 10 | :::tip 11 | 12 | Use the power of React to create interactive blog posts. 13 | 14 | ::: 15 | 16 | {/* truncate */} 17 | 18 | For example, use JSX to create an interactive button: 19 | 20 | ```js 21 | 22 | ``` 23 | 24 | 25 | -------------------------------------------------------------------------------- /docs/blog/2021-08-26-welcome/docusaurus-plushie-banner.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/docs/blog/2021-08-26-welcome/docusaurus-plushie-banner.jpeg -------------------------------------------------------------------------------- /docs/blog/2021-08-26-welcome/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | slug: welcome 3 | title: Welcome 4 | authors: [slorber, yangshun] 5 | tags: [facebook, hello, docusaurus] 6 | --- 7 | 8 | [Docusaurus blogging features](https://docusaurus.io/docs/blog) are powered by the [blog plugin](https://docusaurus.io/docs/api/plugins/@docusaurus/plugin-content-blog). 9 | 10 | Here are a few tips you might find useful. 11 | 12 | 13 | 14 | Simply add Markdown files (or folders) to the `blog` directory. 15 | 16 | Regular blog authors can be added to `authors.yml`. 17 | 18 | The blog post date can be extracted from filenames, such as: 19 | 20 | - `2019-05-30-welcome.md` 21 | - `2019-05-30-welcome/index.md` 22 | 23 | A blog post folder can be convenient to co-locate blog post images: 24 | 25 | ![Docusaurus Plushie](./docusaurus-plushie-banner.jpeg) 26 | 27 | The blog supports tags as well! 28 | 29 | **And if you don't want a blog**: just delete this directory, and use `blog: false` in your Docusaurus config. 30 | -------------------------------------------------------------------------------- /docs/blog/authors.yml: -------------------------------------------------------------------------------- 1 | yangshun: 2 | name: Yangshun Tay 3 | title: Front End Engineer @ Facebook 4 | url: https://github.com/yangshun 5 | image_url: https://github.com/yangshun.png 6 | page: true 7 | socials: 8 | x: yangshunz 9 | github: yangshun 10 | 11 | slorber: 12 | name: Sébastien Lorber 13 | title: Docusaurus maintainer 14 | url: https://sebastienlorber.com 15 | image_url: https://github.com/slorber.png 16 | page: 17 | # customize the url of the author page at /blog/authors/ 18 | permalink: '/all-sebastien-lorber-articles' 19 | socials: 20 | x: sebastienlorber 21 | linkedin: sebastienlorber 22 | github: slorber 23 | newsletter: https://thisweekinreact.com 24 | -------------------------------------------------------------------------------- /docs/blog/tags.yml: -------------------------------------------------------------------------------- 1 | facebook: 2 | label: Facebook 3 | permalink: /facebook 4 | description: Facebook tag description 5 | 6 | hello: 7 | label: Hello 8 | permalink: /hello 9 | description: Hello tag description 10 | 11 | docusaurus: 12 | label: Docusaurus 13 | permalink: /docusaurus 14 | description: Docusaurus tag description 15 | 16 | hola: 17 | label: Hola 18 | permalink: /hola 19 | description: Hola tag description 20 | -------------------------------------------------------------------------------- /docs/docs/intro.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 1 3 | --- 4 | 5 | # Executing AI Agents in AWS Step Functions 6 | 7 | > 📦 **Enterprise AI Agent Framework** 8 | > 9 | > Extreme flexibility and scalability for enterprise grade AI Agents. Supporting all LLMs and tools in any programming language. Including human approval and observability. All in a single framework. 10 | 11 | ## Introduction 12 | 13 | AI Agents are a combination of LLMs and Tools. Each tool is used to perform a specific task, and the LLM orchestrates them to perform complex tasks requested by the user. While AI Agents are powerful tools for automating complex tasks in the cloud and reducing the cost of building and maintaining complex systems, their deployment and operation can be challenging. 14 | 15 | This framework provides a robust implementation of AI Agents in AWS Step Functions, a serverless computing platform for building and deploying serverless applications. It includes implementations of several AI Agents: 16 | 17 | - SQL AI Agent - Analyzes SQL databases and answers business questions with visualization and reporting (Python) 18 | - Financial AI Agent - Analyzes financial datasets using YFinance library (Python) 19 | - Google Maps AI Agent - Analyzes Google Maps data (TypeScript) 20 | - Time Series Clustering AI Agent - Performs time series clustering analysis (Rust) 21 | - Time Series Analysis AI Agent - Analyzes large sets of time series data (Java) 22 | - Web Research AI Agent - Uses Perplexity to analyze web pages and answer business questions (Go) 23 | 24 | Key benefits of this framework include: 25 | 26 | - High scalability limited only by AWS account resources 27 | - Support for tools in any programming language 28 | - Flexibility to integrate with any LLM provider through Lambda functions 29 | - Built-in observability through Step Functions state tracking and CloudWatch/X-Ray integration 30 | - Cost efficiency through serverless Lambda and Step Functions 31 | - Human approval workflow capabilities 32 | - Enterprise-grade security through IAM roles and AWS Secrets Manager 33 | 34 | The implementation serves as a template for building custom AI Agents for specific use cases while providing production-ready features like monitoring, security, and scalability out of the box. 35 | -------------------------------------------------------------------------------- /docs/docs/introduction-to-ai-agents/_category_.json: -------------------------------------------------------------------------------- 1 | { 2 | "label": "Tutorial - What are AI Agents?", 3 | "position": 2, 4 | "link": { 5 | "type": "generated-index", 6 | "description": "5 minutes to learn the most important AI Agents concepts." 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /docs/docs/tutorial-basics/_category_.json: -------------------------------------------------------------------------------- 1 | { 2 | "label": "Tutorial - Basics", 3 | "position": 3, 4 | "link": { 5 | "type": "generated-index", 6 | "description": "5 minutes to learn the most important AI Agents concepts." 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /docs/docs/tutorial-basics/congratulations.md: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 6 3 | --- 4 | 5 | # Congratulations 6 | 7 | You have just learned the **basics of AI Agents**. 8 | 9 | AI Agents Framework has **much more to offer**! 10 | 11 | ## What's next? 12 | 13 | - Build more complex agents with the ... 14 | - Build more tools based on your internal data systems. 15 | -------------------------------------------------------------------------------- /docs/docs/tutorial-extras/_category_.json: -------------------------------------------------------------------------------- 1 | { 2 | "label": "Tutorial - Extras", 3 | "position": 4, 4 | "link": { 5 | "type": "generated-index" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /docs/docs/tutorial-extras/img/docsVersionDropdown.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/docs/docs/tutorial-extras/img/docsVersionDropdown.png -------------------------------------------------------------------------------- /docs/docs/tutorial-extras/img/localeDropdown.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/docs/docs/tutorial-extras/img/localeDropdown.png -------------------------------------------------------------------------------- /docs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "docs", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "docusaurus": "docusaurus", 7 | "start": "docusaurus start", 8 | "build": "docusaurus build", 9 | "swizzle": "docusaurus swizzle", 10 | "deploy": "docusaurus deploy", 11 | "clear": "docusaurus clear", 12 | "serve": "docusaurus serve", 13 | "write-translations": "docusaurus write-translations", 14 | "write-heading-ids": "docusaurus write-heading-ids", 15 | "typecheck": "tsc" 16 | }, 17 | "dependencies": { 18 | "@docusaurus/core": "3.7.0", 19 | "@docusaurus/preset-classic": "3.7.0", 20 | "@mdx-js/react": "^3.0.0", 21 | "clsx": "^2.0.0", 22 | "prism-react-renderer": "^2.3.0", 23 | "react": "^19.0.0", 24 | "react-dom": "^19.0.0" 25 | }, 26 | "devDependencies": { 27 | "@docusaurus/module-type-aliases": "3.7.0", 28 | "@docusaurus/tsconfig": "3.7.0", 29 | "@docusaurus/types": "3.7.0", 30 | "typescript": "~5.6.2" 31 | }, 32 | "browserslist": { 33 | "production": [ 34 | ">0.5%", 35 | "not dead", 36 | "not op_mini all" 37 | ], 38 | "development": [ 39 | "last 3 chrome version", 40 | "last 3 firefox version", 41 | "last 5 safari version" 42 | ] 43 | }, 44 | "engines": { 45 | "node": ">=18.0" 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /docs/sidebars.ts: -------------------------------------------------------------------------------- 1 | import type {SidebarsConfig} from '@docusaurus/plugin-content-docs'; 2 | 3 | // This runs in Node.js - Don't use client-side code here (browser APIs, JSX...) 4 | 5 | /** 6 | * Creating a sidebar enables you to: 7 | - create an ordered group of docs 8 | - render a sidebar for each doc of that group 9 | - provide next/previous navigation 10 | 11 | The sidebars can be generated from the filesystem, or explicitly defined here. 12 | 13 | Create as many sidebars as you want. 14 | */ 15 | const sidebars: SidebarsConfig = { 16 | // By default, Docusaurus generates a sidebar from the docs folder structure 17 | tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], 18 | 19 | // But you can create a sidebar manually 20 | /* 21 | tutorialSidebar: [ 22 | 'intro', 23 | 'hello', 24 | { 25 | type: 'category', 26 | label: 'Tutorial', 27 | items: ['tutorial-basics/create-a-document'], 28 | }, 29 | ], 30 | */ 31 | }; 32 | 33 | export default sidebars; 34 | -------------------------------------------------------------------------------- /docs/src/components/HomepageFeatures/index.tsx: -------------------------------------------------------------------------------- 1 | import type {ReactNode} from 'react'; 2 | import clsx from 'clsx'; 3 | import Heading from '@theme/Heading'; 4 | import styles from './styles.module.css'; 5 | 6 | type FeatureItem = { 7 | title: string; 8 | Svg: React.ComponentType>; 9 | description: ReactNode; 10 | }; 11 | 12 | const FeatureList: FeatureItem[] = [ 13 | { 14 | title: 'Extreme Flexibility', 15 | Svg: require('@site/static/img/undraw_docusaurus_mountain.svg').default, 16 | description: ( 17 | <> 18 | AI Agents Framework allows you to call any LLM and use any tool in any programming language. 19 | 20 | ), 21 | }, 22 | { 23 | title: 'Extreme Observability', 24 | Svg: require('@site/static/img/undraw_docusaurus_tree.svg').default, 25 | description: ( 26 | <> 27 | Built on top of the reliable serverless infrastructure (AWS Lambda and Step Functions), AI Agents Framework provides extreme observability. 28 | Inspect every execution step and every tool call, using CloudWatch and X-ray. 29 | 30 | ), 31 | }, 32 | { 33 | title: 'Extreme Scalability', 34 | Svg: require('@site/static/img/scalability.svg').default, 35 | description: ( 36 | <> 37 | Based on true serverless architecture, AI Agents Framework can scale to any number of concurrent users and any number of concurrent executions. 38 | You only pay for what you use. 39 | 40 | ), 41 | }, 42 | ]; 43 | 44 | function Feature({title, Svg, description}: FeatureItem) { 45 | return ( 46 |
47 |
48 | 49 |
50 |
51 | {title} 52 |

{description}

53 |
54 |
55 | ); 56 | } 57 | 58 | export default function HomepageFeatures(): ReactNode { 59 | return ( 60 |
61 |
62 |
63 | {FeatureList.map((props, idx) => ( 64 | 65 | ))} 66 |
67 |
68 |
69 | ); 70 | } 71 | -------------------------------------------------------------------------------- /docs/src/components/HomepageFeatures/styles.module.css: -------------------------------------------------------------------------------- 1 | .features { 2 | display: flex; 3 | align-items: center; 4 | padding: 2rem 0; 5 | width: 100%; 6 | } 7 | 8 | .featureSvg { 9 | height: 200px; 10 | width: 200px; 11 | } 12 | -------------------------------------------------------------------------------- /docs/src/css/custom.css: -------------------------------------------------------------------------------- 1 | /** 2 | * Any CSS included here will be global. The classic template 3 | * bundles Infima by default. Infima is a CSS framework designed to 4 | * work well for content-centric websites. 5 | */ 6 | 7 | /* You can override the default Infima variables here. */ 8 | :root { 9 | --ifm-color-primary: #2e8555; 10 | --ifm-color-primary-dark: #29784c; 11 | --ifm-color-primary-darker: #277148; 12 | --ifm-color-primary-darkest: #205d3b; 13 | --ifm-color-primary-light: #33925d; 14 | --ifm-color-primary-lighter: #359962; 15 | --ifm-color-primary-lightest: #3cad6e; 16 | --ifm-code-font-size: 95%; 17 | --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); 18 | } 19 | 20 | /* For readability concerns, you should choose a lighter palette in dark mode. */ 21 | [data-theme='dark'] { 22 | --ifm-color-primary: #25c2a0; 23 | --ifm-color-primary-dark: #21af90; 24 | --ifm-color-primary-darker: #1fa588; 25 | --ifm-color-primary-darkest: #1a8870; 26 | --ifm-color-primary-light: #29d5b0; 27 | --ifm-color-primary-lighter: #32d8b4; 28 | --ifm-color-primary-lightest: #4fddbf; 29 | --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); 30 | } 31 | -------------------------------------------------------------------------------- /docs/src/pages/index.module.css: -------------------------------------------------------------------------------- 1 | /** 2 | * CSS files with the .module.css suffix will be treated as CSS modules 3 | * and scoped locally. 4 | */ 5 | 6 | .heroBanner { 7 | padding: 4rem 0; 8 | text-align: center; 9 | position: relative; 10 | overflow: hidden; 11 | } 12 | 13 | @media screen and (max-width: 996px) { 14 | .heroBanner { 15 | padding: 2rem; 16 | } 17 | } 18 | 19 | .buttons { 20 | display: flex; 21 | align-items: center; 22 | justify-content: center; 23 | } 24 | -------------------------------------------------------------------------------- /docs/src/pages/index.tsx: -------------------------------------------------------------------------------- 1 | import type {ReactNode} from 'react'; 2 | import clsx from 'clsx'; 3 | import Link from '@docusaurus/Link'; 4 | import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; 5 | import Layout from '@theme/Layout'; 6 | import HomepageFeatures from '@site/src/components/HomepageFeatures'; 7 | import Heading from '@theme/Heading'; 8 | 9 | import styles from './index.module.css'; 10 | 11 | function HomepageHeader() { 12 | const {siteConfig} = useDocusaurusContext(); 13 | return ( 14 |
15 |
16 | 17 | {siteConfig.title} 18 | 19 |

{siteConfig.tagline}

20 |
21 | 24 | AI Agent Tutorial - 15 min ⏱️ 25 | 26 |
27 |
28 |
29 | ); 30 | } 31 | 32 | export default function Home(): ReactNode { 33 | const {siteConfig} = useDocusaurusContext(); 34 | return ( 35 | 38 | 39 |
40 | 41 |
42 |
43 | ); 44 | } 45 | -------------------------------------------------------------------------------- /docs/src/pages/markdown-page.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Markdown page example 3 | --- 4 | 5 | # Markdown page example 6 | 7 | You don't need React to write simple standalone pages. 8 | -------------------------------------------------------------------------------- /docs/static/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/docs/static/.nojekyll -------------------------------------------------------------------------------- /docs/static/img/AgentSchamticFlow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/docs/static/img/AgentSchamticFlow.png -------------------------------------------------------------------------------- /docs/static/img/TodayIsPredictionHyperbolic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/docs/static/img/TodayIsPredictionHyperbolic.png -------------------------------------------------------------------------------- /docs/static/img/docusaurus-social-card.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/docs/static/img/docusaurus-social-card.jpg -------------------------------------------------------------------------------- /docs/static/img/docusaurus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/docs/static/img/docusaurus.png -------------------------------------------------------------------------------- /docs/static/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/docs/static/img/favicon.ico -------------------------------------------------------------------------------- /docs/static/img/scalability.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 24 | 25 | 26 | AWS Cloud 27 | 28 | 29 | 30 | Step Functions Workflow 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | Scale 71 | 72 | 73 | Automatic Scaling 74 | 75 | -------------------------------------------------------------------------------- /docs/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | // This file is not used in compilation. It is here just for a nice editor experience. 3 | "extends": "@docusaurus/tsconfig", 4 | "compilerOptions": { 5 | "baseUrl": "." 6 | }, 7 | "exclude": [".docusaurus", "build"] 8 | } 9 | -------------------------------------------------------------------------------- /images/AI-Agents-Traces-Service-Map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/images/AI-Agents-Traces-Service-Map.png -------------------------------------------------------------------------------- /images/Agent-AI-UI.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/images/Agent-AI-UI.png -------------------------------------------------------------------------------- /images/Human-Approval-UI.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/images/Human-Approval-UI.png -------------------------------------------------------------------------------- /lambda/call_llm/CLAUDE.md: -------------------------------------------------------------------------------- 1 | # Step Functions Agent - LLM Wrappers - Claude Memory File 2 | 3 | This section implements interfaces with various LLMs (Language Model Models) used to drive AI agents with Step Functions orchestration. The framework abstracts provider-specific APIs behind consistent interfaces to make switching between models seamless. 4 | 5 | ## Build & Test Commands 6 | 7 | - Run all tests: `pytest tests/` 8 | - Run specific test: `pytest tests/test_claude_handler.py -v` 9 | - Test with events: `sam local invoke OpenAILambda -e tests/events/multiple-places-weather-event.json` 10 | - Update dependencies: `pip-compile requirements.in` 11 | 12 | ## Code Style Guidelines 13 | 14 | - **Imports**: stdlib → third-party → local; explicit imports preferred over wildcard 15 | - **Formatting**: Maintain consistent indentation (4 spaces) 16 | - **Types**: Use typing annotations (Dict, List, Any, Optional) for all public interfaces 17 | - **Naming**: 18 | - Classes: PascalCase (e.g., `ClaudeHandler`) 19 | - Functions/variables: snake_case (e.g., `process_message`) 20 | - Constants: UPPER_SNAKE_CASE (e.g., `MAX_TOKENS`) 21 | - **Error handling**: Try/except with specific exceptions, proper logging, re-raise when appropriate 22 | - **Documentation**: Docstrings for all public methods/functions 23 | - **Class structure**: Follow abstract base class pattern with ABC and @abstractmethod 24 | - **Handler pattern**: Each LLM has separate handler (business logic) and lambda (entry point) modules 25 | 26 | ## Project Structure 27 | 28 | - Organized by LLM provider (anthropic_llm, openai_llm, bedrock_llm, gemini_llm) 29 | - Each provider package follows identical structure (handler, lambda, requirements) 30 | - Common functionality in lambda_layer/python/common 31 | - Provider-specific handlers implement common interface for consistent usage -------------------------------------------------------------------------------- /lambda/call_llm/__init__.py: -------------------------------------------------------------------------------- 1 | # lambda/call_llm/__init__.py 2 | import os 3 | import sys 4 | 5 | # Add the package directory to the Python path 6 | package_dir = os.path.dirname(os.path.abspath(__file__)) 7 | if package_dir not in sys.path: 8 | sys.path.insert(0, package_dir) 9 | 10 | # Define the package name 11 | __package__ = 'call_llm' 12 | 13 | # Import your submodules 14 | from lambda_layer.python import common 15 | from functions import bedrock_llm, anthropic_llm, openai_llm 16 | # from functions.bedrock import bedrock_handler 17 | 18 | # Make the imports available at the package level 19 | __all__ = [ 20 | 'common', 21 | 'bedrock_llm', 22 | 'anthropic_llm', 23 | 'openai_llm' 24 | ] -------------------------------------------------------------------------------- /lambda/call_llm/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/lambda/call_llm/functions/__init__.py -------------------------------------------------------------------------------- /lambda/call_llm/functions/anthropic_llm/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | # Ensure that the lambda layer modules are properly registered 5 | sys.path.insert(0, os.path.dirname(__file__)) 6 | 7 | -------------------------------------------------------------------------------- /lambda/call_llm/functions/anthropic_llm/claude_lambda.py: -------------------------------------------------------------------------------- 1 | # call_llm/handlers/claude_lambda.py 2 | from common.base_llm import logger, tracer 3 | from claude_handler import ClaudeLLM 4 | 5 | @tracer.capture_lambda_handler 6 | def lambda_handler(event, context): 7 | logger.info(f"Received event: {event}") 8 | try: 9 | system = event.get('system') 10 | messages = event.get('messages', []) 11 | tools = event.get('tools', []) 12 | 13 | llm = ClaudeLLM() 14 | assistant_message = llm.generate_response(system, messages, tools) 15 | 16 | # Update messages with assistant's response 17 | messages.append(assistant_message["message"]) 18 | 19 | return { 20 | 'statusCode': 200, 21 | 'body': { 22 | 'messages': messages, 23 | 'function_calls': assistant_message["function_calls"], 24 | 'metadata': assistant_message["metadata"] 25 | } 26 | } 27 | except Exception as e: 28 | logger.error(e) 29 | raise e # To activate the retry mechanism in the caller 30 | 31 | if __name__ == "__main__": 32 | # Test event for Claude 3 33 | test_event_claude = { 34 | "model": "claude-3-5-sonnet-20241022", 35 | "system": "You are chatbot, who is helping people with answers to their questions.", 36 | "messages": [ 37 | { 38 | "role": "user", 39 | "content": "What is 2+2?" 40 | } 41 | ], 42 | "tools": [ 43 | { 44 | "name": "get_db_schema", 45 | "description": "Describe the schema of the SQLite database, including table names, and column names and types.", 46 | "input_schema": { 47 | "type": "object", 48 | "properties": {} 49 | } 50 | } 51 | ] 52 | } 53 | 54 | # Call lambda handler with test events 55 | print("\nTesting Claude 3:") 56 | response_claude = lambda_handler(test_event_claude, None) 57 | print(response_claude) -------------------------------------------------------------------------------- /lambda/call_llm/functions/anthropic_llm/requirements.in: -------------------------------------------------------------------------------- 1 | anthropic==0.35.0 2 | httpx==0.27.2 # To resolve issue with 0.28.0 -------------------------------------------------------------------------------- /lambda/call_llm/functions/anthropic_llm/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in --output-file requirements.txt 3 | annotated-types==0.7.0 4 | # via pydantic 5 | anthropic==0.35.0 6 | # via -r requirements.in 7 | anyio==4.8.0 8 | # via 9 | # anthropic 10 | # httpx 11 | certifi==2025.1.31 12 | # via 13 | # httpcore 14 | # httpx 15 | # requests 16 | charset-normalizer==3.4.1 17 | # via requests 18 | distro==1.9.0 19 | # via anthropic 20 | filelock==3.17.0 21 | # via huggingface-hub 22 | fsspec==2024.12.0 23 | # via huggingface-hub 24 | h11==0.14.0 25 | # via httpcore 26 | httpcore==1.0.7 27 | # via httpx 28 | httpx==0.27.2 29 | # via 30 | # -r requirements.in 31 | # anthropic 32 | huggingface-hub==0.28.1 33 | # via tokenizers 34 | idna==3.10 35 | # via 36 | # anyio 37 | # httpx 38 | # requests 39 | jiter==0.8.2 40 | # via anthropic 41 | packaging==24.2 42 | # via huggingface-hub 43 | pydantic==2.10.6 44 | # via anthropic 45 | pydantic-core==2.27.2 46 | # via pydantic 47 | pyyaml==6.0.2 48 | # via huggingface-hub 49 | requests==2.32.3 50 | # via huggingface-hub 51 | sniffio==1.3.1 52 | # via 53 | # anthropic 54 | # anyio 55 | # httpx 56 | tokenizers==0.21.0 57 | # via anthropic 58 | tqdm==4.67.1 59 | # via huggingface-hub 60 | typing-extensions==4.12.2 61 | # via 62 | # anthropic 63 | # anyio 64 | # huggingface-hub 65 | # pydantic 66 | # pydantic-core 67 | urllib3==2.3.0 68 | # via requests 69 | -------------------------------------------------------------------------------- /lambda/call_llm/functions/anthropic_llm/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | LLMLayer: 7 | Type: AWS::Serverless::LayerVersion 8 | Properties: 9 | ContentUri: ../../lambda_layer/python 10 | CompatibleRuntimes: 11 | - python3.12 12 | Metadata: 13 | BuildMethod: python3.12 14 | # Anthropic Lambda Function 15 | ClaudeLambda: 16 | Type: AWS::Serverless::Function 17 | Properties: 18 | CodeUri: . 19 | Handler: claude_lambda.lambda_handler 20 | Description: Claude Lambda 21 | Layers: 22 | - !Ref LLMLayer 23 | Runtime: python3.12 24 | Timeout: 90 25 | MemorySize: 128 26 | Environment: 27 | Variables: 28 | POWERTOOLS_SERVICE_NAME: ai-agent-llm 29 | Architectures: 30 | - arm64 31 | Policies: 32 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/call_llm/functions/bedrock_llm/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | # Ensure that the lambda layer modules are properly registered 5 | sys.path.insert(0, os.path.dirname(__file__)) 6 | 7 | -------------------------------------------------------------------------------- /lambda/call_llm/functions/bedrock_llm/bedrock_lambda.py: -------------------------------------------------------------------------------- 1 | # handlers/bedrock_lambda.py 2 | from common.base_llm import logger 3 | from bedrock_handler import BedrockLLM 4 | 5 | def lambda_handler(event, context): 6 | logger.info(f"Received event: {event}") 7 | try: 8 | system = event.get('system') 9 | messages = event.get('messages', []) 10 | tools = event.get('tools', []) 11 | 12 | llm = BedrockLLM() 13 | assistant_message = llm.generate_response(system, messages, tools) 14 | logger.info(assistant_message) 15 | messages.append(assistant_message["message"]) 16 | 17 | return { 18 | 'statusCode': 200, 19 | 'body': { 20 | 'messages': messages, 21 | 'function_calls': assistant_message["function_calls"], 22 | 'metadata': assistant_message["metadata"] 23 | } 24 | } 25 | except Exception as e: 26 | logger.error(e) 27 | raise e # To trigger the retry logic in the caller 28 | 29 | if __name__ == "__main__": 30 | # Test event for Jamba model 31 | event = { 32 | "model": "ai21.jamba-1-5-large-v1:0", 33 | "messages": [ 34 | {"role": "user", "content": "What is 25*4+64*3?"} 35 | ], 36 | "tools": [ 37 | { 38 | "type": "function", 39 | "function": { 40 | "name": "calculator", 41 | "parameters": { 42 | "type": "object", 43 | "properties": { 44 | "a": {"type": "number"}, 45 | "b": {"type": "number"}, 46 | }, 47 | }, 48 | }, 49 | } 50 | ] 51 | } 52 | response = lambda_handler(event, None) 53 | print(response) -------------------------------------------------------------------------------- /lambda/call_llm/functions/bedrock_llm/nova_lambda.py: -------------------------------------------------------------------------------- 1 | # handlers/bedrock_lambda.py 2 | from common.base_llm import logger 3 | from nova_handler import NovaLLM 4 | 5 | def lambda_handler(event, context): 6 | logger.info(f"Received event: {event}") 7 | try: 8 | system = event.get('system') 9 | messages = event.get('messages', []) 10 | tools = event.get('tools', []) 11 | 12 | llm = NovaLLM() 13 | assistant_message = llm.generate_response(system, messages, tools) 14 | logger.info(assistant_message) 15 | messages.append(assistant_message["message"]) 16 | 17 | return { 18 | 'statusCode': 200, 19 | 'body': { 20 | 'messages': messages, 21 | 'function_calls': assistant_message["function_calls"], 22 | 'metadata': assistant_message["metadata"] 23 | } 24 | } 25 | except Exception as e: 26 | logger.error(e) 27 | raise e # To trigger the retry logic in the caller 28 | 29 | if __name__ == "__main__": 30 | # Test event for Jamba model 31 | event = { 32 | "messages": [ 33 | {"role": "user", "content": "What is 25*4+64*3?"} 34 | ], 35 | "tools": [ 36 | { 37 | "type": "function", 38 | "function": { 39 | "name": "calculator", 40 | "parameters": { 41 | "type": "object", 42 | "properties": { 43 | "a": {"type": "number"}, 44 | "b": {"type": "number"}, 45 | }, 46 | }, 47 | }, 48 | } 49 | ] 50 | } 51 | response = lambda_handler(event, None) 52 | print(response) -------------------------------------------------------------------------------- /lambda/call_llm/functions/bedrock_llm/requirements.in: -------------------------------------------------------------------------------- 1 | aws-lambda-powertools>=2.30.2 # AWS Lambda Powertools for Python 2 | -------------------------------------------------------------------------------- /lambda/call_llm/functions/bedrock_llm/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in --output-file requirements.txt 3 | aws-lambda-powertools==3.5.0 4 | # via -r requirements.in 5 | jmespath==1.0.1 6 | # via aws-lambda-powertools 7 | typing-extensions==4.12.2 8 | # via aws-lambda-powertools 9 | -------------------------------------------------------------------------------- /lambda/call_llm/functions/bedrock_llm/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | LLMLayer: 7 | Type: AWS::Serverless::LayerVersion 8 | Properties: 9 | ContentUri: ../../lambda_layer/python 10 | CompatibleRuntimes: 11 | - python3.12 12 | Metadata: 13 | BuildMethod: python3.12 14 | # Bedrock (Jamba) Lambda Function 15 | BedrockLambda: 16 | Type: AWS::Serverless::Function 17 | Properties: 18 | CodeUri: . 19 | Handler: bedrock_lambda.lambda_handler 20 | Description: Bedrock (Jamba) Lambda 21 | Layers: 22 | - !Ref LLMLayer 23 | Runtime: python3.12 24 | Timeout: 90 25 | MemorySize: 128 26 | Environment: 27 | Variables: 28 | POWERTOOLS_SERVICE_NAME: ai-agent-llm 29 | Architectures: 30 | - arm64 31 | Policies: 32 | - AWSLambdaBasicExecutionRole # Nova Lambda Function 33 | NovaLambda: 34 | Type: AWS::Serverless::Function 35 | Properties: 36 | CodeUri: . 37 | Handler: nova_lambda.lambda_handler 38 | Description: Nova Lambda 39 | Layers: 40 | - !Ref LLMLayer 41 | Runtime: python3.12 42 | Timeout: 90 43 | MemorySize: 128 44 | Environment: 45 | Variables: 46 | POWERTOOLS_SERVICE_NAME: ai-agent-llm 47 | Architectures: 48 | - arm64 49 | Policies: 50 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/call_llm/functions/gemini_llm/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | # Ensure that the lambda layer modules are properly registered 5 | sys.path.insert(0, os.path.dirname(__file__)) 6 | -------------------------------------------------------------------------------- /lambda/call_llm/functions/gemini_llm/gemini_lambda.py: -------------------------------------------------------------------------------- 1 | # call_llm/handlers/gemini_lambda.py 2 | from common.base_llm import logger 3 | from gemini_handler import GeminiLLM 4 | 5 | def lambda_handler(event, context): 6 | logger.info(f"Received event: {event}") 7 | try: 8 | system = event.get('system') 9 | messages = event.get('messages', []) 10 | tools = event.get('tools', []) 11 | 12 | llm = GeminiLLM() 13 | assistant_message = llm.generate_response(system, messages, tools) 14 | 15 | messages.append(assistant_message["message"]) 16 | 17 | return { 18 | 'statusCode': 200, 19 | 'body': { 20 | 'messages': messages, 21 | 'function_calls': assistant_message["function_calls"], 22 | 'metadata': assistant_message["metadata"] 23 | } 24 | } 25 | except Exception as e: 26 | logger.error(e) 27 | raise e # To trigger the retry logic in the caller 28 | 29 | if __name__ == "__main__": 30 | test_event = { 31 | "messages": [ 32 | {"role": "user", "content": "What is 25*4+64*3?"} 33 | ], 34 | "tools": [ 35 | { 36 | "type": "function", 37 | "function": { 38 | "name": "calculator", 39 | "parameters": { 40 | "type": "object", 41 | "properties": { 42 | "a": {"type": "number"}, 43 | "b": {"type": "number"} 44 | } 45 | } 46 | } 47 | } 48 | ] 49 | } 50 | response = lambda_handler(test_event, None) 51 | print(response) -------------------------------------------------------------------------------- /lambda/call_llm/functions/gemini_llm/requirements.in: -------------------------------------------------------------------------------- 1 | google-genai==0.6.0 2 | -------------------------------------------------------------------------------- /lambda/call_llm/functions/gemini_llm/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in --output-file requirements.txt 3 | annotated-types==0.7.0 4 | # via pydantic 5 | cachetools==5.5.1 6 | # via google-auth 7 | certifi==2025.1.31 8 | # via requests 9 | charset-normalizer==3.4.1 10 | # via requests 11 | google-auth==2.38.0 12 | # via google-genai 13 | google-genai==0.6.0 14 | # via -r requirements.in 15 | idna==3.10 16 | # via requests 17 | pillow==11.1.0 18 | # via google-genai 19 | pyasn1==0.6.1 20 | # via 21 | # pyasn1-modules 22 | # rsa 23 | pyasn1-modules==0.4.1 24 | # via google-auth 25 | pydantic==2.10.6 26 | # via google-genai 27 | pydantic-core==2.27.2 28 | # via pydantic 29 | requests==2.32.3 30 | # via google-genai 31 | rsa==4.9 32 | # via google-auth 33 | typing-extensions==4.12.2 34 | # via 35 | # pydantic 36 | # pydantic-core 37 | urllib3==2.3.0 38 | # via requests 39 | websockets==14.2 40 | # via google-genai 41 | -------------------------------------------------------------------------------- /lambda/call_llm/functions/gemini_llm/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | LLMLayer: 7 | Type: AWS::Serverless::LayerVersion 8 | Properties: 9 | ContentUri: ../../lambda_layer/python 10 | CompatibleRuntimes: 11 | - python3.12 12 | Metadata: 13 | BuildMethod: python3.12 14 | # Gemini Lambda Function 15 | GeminiLambda: 16 | Type: AWS::Serverless::Function 17 | Properties: 18 | CodeUri: . 19 | Handler: gemini_lambda.lambda_handler 20 | Description: Gemini Lambda 21 | Layers: 22 | - !Ref LLMLayer 23 | Runtime: python3.12 24 | Timeout: 90 25 | MemorySize: 128 26 | Environment: 27 | Variables: 28 | POWERTOOLS_SERVICE_NAME: ai-agent-llm 29 | Architectures: 30 | - arm64 31 | Policies: 32 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/call_llm/functions/openai_llm/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | # Ensure that the lambda layer modules are properly registered 5 | sys.path.insert(0, os.path.dirname(__file__)) 6 | -------------------------------------------------------------------------------- /lambda/call_llm/functions/openai_llm/deepseek_handler.py: -------------------------------------------------------------------------------- 1 | # llms/deepseek_handler.py 2 | from openai import OpenAI 3 | from common.base_llm import BaseLLM, logger 4 | from openai_handler import OpenAILLM 5 | from common.config import get_api_keys 6 | 7 | from typing import List, Dict 8 | import json 9 | 10 | # Using DeepSeek's API directly 11 | MODEL_ID = "deepseek-chat" 12 | # Using OpenRouter's API to call DeepSeek 13 | # MODEL_ID = "deepseek/deepseek-chat", 14 | 15 | class DeepSeekLLM(OpenAILLM): 16 | def __init__(self): 17 | api_keys = get_api_keys() 18 | # Using DeepSeek's API directly 19 | self.client = OpenAI(api_key=api_keys["DEEPSEEK_API_KEY"], base_url="https://api.deepseek.com") 20 | # Using OpenRouter's API to call DeepSeek 21 | # self.client = OpenAI(api_key=api_keys["OPENROUTER_API_KEY"], base_url="https://openrouter.ai/api/v1") 22 | 23 | def generate_response(self, system: str, messages: List[Dict], tools: List[Dict]) -> Dict: 24 | prepared_messages = self.prepare_messages(system, messages, tools) 25 | logger.info(f"Sending request to DeepSeek: {json.dumps(prepared_messages, indent=2)}") 26 | completion = self.client.chat.completions.create( 27 | model=MODEL_ID, 28 | **prepared_messages 29 | ) 30 | logger.info(f"Received response from DeepSeek: {completion}") 31 | return self.convert_to_json(completion) -------------------------------------------------------------------------------- /lambda/call_llm/functions/openai_llm/deepseek_lambda.py: -------------------------------------------------------------------------------- 1 | # openai/deepseek_lambda.py 2 | from common.base_llm import logger 3 | from deepseek_handler import DeepSeekLLM 4 | 5 | def lambda_handler(event, context): 6 | logger.info(f"Received event: {event}") 7 | try: 8 | system = event.get('system') 9 | messages = event.get('messages', []) 10 | tools = event.get('tools', []) 11 | 12 | llm = DeepSeekLLM() 13 | assistant_message = llm.generate_response(system, messages, tools) 14 | 15 | messages.append(assistant_message["message"]) 16 | 17 | return { 18 | 'statusCode': 200, 19 | 'body': { 20 | 'messages': messages, 21 | 'function_calls': assistant_message['function_calls'], 22 | 'metadata': assistant_message["metadata"] 23 | } 24 | } 25 | except Exception as e: 26 | logger.error(e) 27 | raise e # To trigger the retry logic in the caller 28 | 29 | if __name__ == "__main__": 30 | # Test event for GPT-4 31 | test_event_deepseek = { 32 | "model": "deepseek-chat", 33 | "messages": [ 34 | {"role": "user", "content": "What is 25*4+64*3?"} 35 | ], 36 | "tools": [ 37 | { 38 | "type": "function", 39 | "function": { 40 | "name": "calculator", 41 | "parameters": { 42 | "type": "object", 43 | "properties": { 44 | "a": {"type": "number"}, 45 | "b": {"type": "number"}, 46 | }, 47 | }, 48 | }, 49 | } 50 | ] 51 | } 52 | response = lambda_handler(test_event_deepseek, None) 53 | print(response) 54 | -------------------------------------------------------------------------------- /lambda/call_llm/functions/openai_llm/openai_lambda.py: -------------------------------------------------------------------------------- 1 | # handlers/openai_lambda.py 2 | from common.base_llm import logger 3 | from openai_handler import OpenAILLM 4 | 5 | def lambda_handler(event, context): 6 | logger.info(f"Received event: {event}") 7 | try: 8 | system = event.get('system') 9 | messages = event.get('messages', []) 10 | tools = event.get('tools', []) 11 | 12 | llm = OpenAILLM() 13 | assistant_message = llm.generate_response(system, messages, tools) 14 | 15 | messages.append(assistant_message["message"]) 16 | 17 | return { 18 | 'statusCode': 200, 19 | 'body': { 20 | 'messages': messages, 21 | 'function_calls': assistant_message['function_calls'], 22 | 'metadata': assistant_message["metadata"] 23 | } 24 | } 25 | except Exception as e: 26 | logger.error(e) 27 | raise e # To trigger the retry logic in the caller 28 | 29 | if __name__ == "__main__": 30 | # Test event for GPT-4 31 | test_event_gpt4 = { 32 | "model": "gpt-4o", 33 | "messages": [ 34 | {"role": "user", "content": "What is 25*4+64*3?"} 35 | ], 36 | "tools": [ 37 | { 38 | "type": "function", 39 | "function": { 40 | "name": "calculator", 41 | "parameters": { 42 | "type": "object", 43 | "properties": { 44 | "a": {"type": "number"}, 45 | "b": {"type": "number"}, 46 | }, 47 | }, 48 | }, 49 | } 50 | ] 51 | } 52 | response = lambda_handler(test_event_gpt4, None) 53 | print(response) 54 | -------------------------------------------------------------------------------- /lambda/call_llm/functions/openai_llm/requirements.in: -------------------------------------------------------------------------------- 1 | openai -------------------------------------------------------------------------------- /lambda/call_llm/functions/openai_llm/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in --output-file requirements.txt 3 | annotated-types==0.7.0 4 | # via pydantic 5 | anyio==4.8.0 6 | # via 7 | # httpx 8 | # openai 9 | certifi==2025.1.31 10 | # via 11 | # httpcore 12 | # httpx 13 | distro==1.9.0 14 | # via openai 15 | h11==0.14.0 16 | # via httpcore 17 | httpcore==1.0.7 18 | # via httpx 19 | httpx==0.28.1 20 | # via openai 21 | idna==3.10 22 | # via 23 | # anyio 24 | # httpx 25 | jiter==0.8.2 26 | # via openai 27 | openai==1.61.0 28 | # via -r requirements.in 29 | pydantic==2.10.6 30 | # via openai 31 | pydantic-core==2.27.2 32 | # via pydantic 33 | sniffio==1.3.1 34 | # via 35 | # anyio 36 | # openai 37 | tqdm==4.67.1 38 | # via openai 39 | typing-extensions==4.12.2 40 | # via 41 | # anyio 42 | # openai 43 | # pydantic 44 | # pydantic-core 45 | -------------------------------------------------------------------------------- /lambda/call_llm/functions/openai_llm/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | LLMLayer: 7 | Type: AWS::Serverless::LayerVersion 8 | Properties: 9 | ContentUri: ../../lambda_layer/python 10 | CompatibleRuntimes: 11 | - python3.12 12 | Metadata: 13 | BuildMethod: python3.12 14 | # Anthropic Lambda Function 15 | OpenAILambda: 16 | Type: AWS::Serverless::Function 17 | Properties: 18 | CodeUri: . 19 | Handler: openai_lambda.lambda_handler 20 | Description: OpenAI LLM Lambda 21 | Layers: 22 | - !Ref LLMLayer 23 | Runtime: python3.12 24 | Timeout: 90 25 | MemorySize: 128 26 | Environment: 27 | Variables: 28 | POWERTOOLS_SERVICE_NAME: ai-agent-llm 29 | Architectures: 30 | - arm64 31 | Policies: 32 | - AWSLambdaBasicExecutionRole 33 | # Anthropic Lambda Function 34 | DeepSeekLambda: 35 | Type: AWS::Serverless::Function 36 | Properties: 37 | CodeUri: . 38 | Handler: deepseek_lambda.lambda_handler 39 | Description: DeepSeek LLM Lambda 40 | Layers: 41 | - !Ref LLMLayer 42 | Runtime: python3.12 43 | Timeout: 90 44 | MemorySize: 128 45 | Environment: 46 | Variables: 47 | POWERTOOLS_SERVICE_NAME: ai-agent-llm 48 | Architectures: 49 | - arm64 50 | Policies: 51 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/call_llm/functions/openai_llm/xai_handler.py: -------------------------------------------------------------------------------- 1 | # llms/deepseek_handler.py 2 | from openai import OpenAI 3 | from common.base_llm import logger 4 | from openai_handler import OpenAILLM 5 | from common.config import get_api_keys 6 | 7 | from typing import List, Dict 8 | import json 9 | 10 | MODEL_ID = "grok-2-1212" 11 | 12 | class XAILLM(OpenAILLM): 13 | def __init__(self): 14 | api_keys = get_api_keys() 15 | # Using DeepSeek's API directly 16 | self.client = OpenAI( 17 | api_key=api_keys["XAI_API_KEY"], 18 | base_url="https://api.x.ai/v1" 19 | ) 20 | 21 | def generate_response(self, system: str, messages: List[Dict], tools: List[Dict]) -> Dict: 22 | prepared_messages = self.prepare_messages(system, messages, tools) 23 | logger.info(f"Sending request to XAI: {json.dumps(prepared_messages, indent=2)}") 24 | completion = self.client.chat.completions.create( 25 | model=MODEL_ID, 26 | **prepared_messages 27 | ) 28 | logger.info(f"Received response from XAI: {completion}") 29 | return self.convert_to_json(completion) -------------------------------------------------------------------------------- /lambda/call_llm/functions/openai_llm/xai_lambda.py: -------------------------------------------------------------------------------- 1 | # openai/deepseek_lambda.py 2 | from common.base_llm import logger 3 | from xai_handler import XAILLM 4 | 5 | def lambda_handler(event, context): 6 | logger.info(f"Received event: {event}") 7 | try: 8 | system = event.get('system') 9 | messages = event.get('messages', []) 10 | tools = event.get('tools', []) 11 | 12 | llm = XAILLM() 13 | assistant_message = llm.generate_response(system, messages, tools) 14 | 15 | messages.append(assistant_message["message"]) 16 | 17 | return { 18 | 'statusCode': 200, 19 | 'body': { 20 | 'messages': messages, 21 | 'function_calls': assistant_message['function_calls'], 22 | 'metadata': assistant_message["metadata"] 23 | } 24 | } 25 | except Exception as e: 26 | logger.error(e) 27 | raise e # To trigger the retry logic in the caller 28 | 29 | if __name__ == "__main__": 30 | # Test event for GPT-4 31 | test_event = { 32 | "messages": [ 33 | {"role": "user", "content": "What is 25*4+64*3?"} 34 | ], 35 | "tools": [ 36 | { 37 | "type": "function", 38 | "function": { 39 | "name": "calculator", 40 | "parameters": { 41 | "type": "object", 42 | "properties": { 43 | "a": {"type": "number"}, 44 | "b": {"type": "number"}, 45 | }, 46 | }, 47 | }, 48 | } 49 | ] 50 | } 51 | response = lambda_handler(test_event, None) 52 | print(response) 53 | -------------------------------------------------------------------------------- /lambda/call_llm/lambda_layer/python/__init__.py: -------------------------------------------------------------------------------- 1 | # lambda_layer/python/__init__.py 2 | import sys 3 | import os 4 | 5 | # Ensure that the lambda layer modules are properly registered 6 | sys.path.insert(0, os.path.dirname(__file__)) 7 | 8 | # Now explicitly expose the modules 9 | __all__ = ["common"] -------------------------------------------------------------------------------- /lambda/call_llm/lambda_layer/python/common/__init__.py: -------------------------------------------------------------------------------- 1 | # call_llm/common/__init__.py 2 | from .base_llm import BaseLLM, logger 3 | from .config import get_api_keys -------------------------------------------------------------------------------- /lambda/call_llm/lambda_layer/python/common/base_llm.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Dict, List, Any 3 | from aws_lambda_powertools import Logger 4 | from aws_lambda_powertools import Tracer 5 | 6 | logger = Logger(level="INFO") 7 | tracer = Tracer(service="ai-agents") 8 | 9 | class BaseLLM(ABC): 10 | @abstractmethod 11 | def prepare_messages(self, system: str, messages: List[Dict], tools: List[Dict]) -> Dict: 12 | """Prepare messages for the specific LLM format""" 13 | pass 14 | 15 | @abstractmethod 16 | def convert_to_json(self, response: Any) -> Dict: 17 | """Convert LLM response to standardized JSON format""" 18 | pass 19 | 20 | @abstractmethod 21 | def generate_response(self, system: str, messages: List[Dict], tools: List[Dict]) -> Dict: 22 | """Generate response from the LLM""" 23 | pass 24 | -------------------------------------------------------------------------------- /lambda/call_llm/lambda_layer/python/common/config.py: -------------------------------------------------------------------------------- 1 | import json 2 | from aws_lambda_powertools.utilities import parameters 3 | 4 | def get_api_keys(): 5 | try: 6 | keys = json.loads(parameters.get_secret("/ai-agent/api-keys")) 7 | return keys 8 | except ValueError: 9 | raise ValueError("API keys not found in Secrets Manager") -------------------------------------------------------------------------------- /lambda/call_llm/lambda_layer/python/requirements.in: -------------------------------------------------------------------------------- 1 | aws-lambda-powertools>=2.30.2 # AWS Lambda Powertools for Python 2 | aws_xray_sdk # For tracer -------------------------------------------------------------------------------- /lambda/call_llm/lambda_layer/python/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in --output-file requirements.txt 3 | aws-lambda-powertools==3.5.0 4 | # via -r requirements.in 5 | aws-xray-sdk==2.14.0 6 | # via -r requirements.in 7 | botocore==1.36.15 8 | # via aws-xray-sdk 9 | jmespath==1.0.1 10 | # via 11 | # aws-lambda-powertools 12 | # botocore 13 | python-dateutil==2.9.0.post0 14 | # via botocore 15 | six==1.17.0 16 | # via python-dateutil 17 | typing-extensions==4.12.2 18 | # via aws-lambda-powertools 19 | urllib3==2.3.0 20 | # via botocore 21 | wrapt==1.17.2 22 | # via aws-xray-sdk 23 | -------------------------------------------------------------------------------- /lambda/call_llm/requirements.in: -------------------------------------------------------------------------------- 1 | anthropic==0.35.0 2 | openai 3 | ai21>=2.13.0 4 | google-genai==0.6.0 5 | aws-lambda-powertools>=2.30.2 # AWS Lambda Powertools for Python 6 | -------------------------------------------------------------------------------- /lambda/call_llm/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in --output-file requirements.txt 3 | ai21==3.0.1 4 | # via -r requirements.in 5 | ai21-tokenizer==0.12.0 6 | # via ai21 7 | annotated-types==0.7.0 8 | # via pydantic 9 | anthropic==0.35.0 10 | # via -r requirements.in 11 | anyio==4.5.2 12 | # via 13 | # ai21-tokenizer 14 | # anthropic 15 | # httpx 16 | # openai 17 | aws-lambda-powertools==3.3.0 18 | # via -r requirements.in 19 | cachetools==5.5.1 20 | # via google-auth 21 | certifi==2024.12.14 22 | # via 23 | # httpcore 24 | # httpx 25 | # requests 26 | charset-normalizer==3.4.0 27 | # via requests 28 | distro==1.9.0 29 | # via 30 | # anthropic 31 | # openai 32 | filelock==3.16.1 33 | # via huggingface-hub 34 | fsspec==2024.10.0 35 | # via huggingface-hub 36 | google-auth==2.38.0 37 | # via google-genai 38 | google-genai==0.6.0 39 | # via -r requirements.in 40 | h11==0.14.0 41 | # via httpcore 42 | httpcore==1.0.7 43 | # via httpx 44 | httpx==0.27.2 45 | # via 46 | # ai21 47 | # anthropic 48 | # openai 49 | huggingface-hub==0.26.5 50 | # via tokenizers 51 | idna==3.10 52 | # via 53 | # anyio 54 | # httpx 55 | # requests 56 | jiter==0.8.2 57 | # via 58 | # anthropic 59 | # openai 60 | jmespath==1.0.1 61 | # via aws-lambda-powertools 62 | openai==1.57.4 63 | # via -r requirements.in 64 | packaging==24.2 65 | # via huggingface-hub 66 | pillow==11.1.0 67 | # via google-genai 68 | pyasn1==0.6.1 69 | # via 70 | # pyasn1-modules 71 | # rsa 72 | pyasn1-modules==0.4.1 73 | # via google-auth 74 | pydantic==2.10.3 75 | # via 76 | # ai21 77 | # anthropic 78 | # google-genai 79 | # openai 80 | pydantic-core==2.27.1 81 | # via pydantic 82 | pyyaml==6.0.2 83 | # via huggingface-hub 84 | requests==2.32.3 85 | # via 86 | # google-genai 87 | # huggingface-hub 88 | rsa==4.9 89 | # via google-auth 90 | sentencepiece==0.2.0 91 | # via ai21-tokenizer 92 | sniffio==1.3.1 93 | # via 94 | # anthropic 95 | # anyio 96 | # httpx 97 | # openai 98 | tenacity==8.5.0 99 | # via ai21 100 | tokenizers==0.21.0 101 | # via 102 | # ai21-tokenizer 103 | # anthropic 104 | tqdm==4.67.1 105 | # via 106 | # huggingface-hub 107 | # openai 108 | typing-extensions==4.12.2 109 | # via 110 | # ai21 111 | # anthropic 112 | # aws-lambda-powertools 113 | # huggingface-hub 114 | # openai 115 | # pydantic 116 | # pydantic-core 117 | urllib3==2.2.3 118 | # via requests 119 | websockets==14.2 120 | # via google-genai 121 | -------------------------------------------------------------------------------- /lambda/call_llm/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/lambda/call_llm/tests/__init__.py -------------------------------------------------------------------------------- /lambda/call_llm/tests/conftest.py: -------------------------------------------------------------------------------- 1 | # tests/conftest.py 2 | import os 3 | import sys 4 | 5 | # Ensure the lambda_layer/python directory is in sys.path 6 | LAMBDA_LAYER_PATH = os.path.abspath( 7 | os.path.join(os.path.dirname(__file__), "../lambda_layer/python") 8 | ) 9 | sys.path.insert(0, LAMBDA_LAYER_PATH) 10 | 11 | # Ensure the functions directory (Lambda handlers) is in sys.path 12 | FUNCTIONS_DIR = os.path.abspath( 13 | os.path.join(os.path.dirname(__file__), "../functions") 14 | ) 15 | sys.path.insert(0, FUNCTIONS_DIR) 16 | 17 | # Print for debugging 18 | print("Updated sys.path for pytest:") 19 | for p in sys.path: 20 | print(p) 21 | 22 | import sys 23 | import importlib 24 | 25 | print("Checking module availability:") 26 | try: 27 | common = importlib.import_module("common.base_llm") 28 | print("✅ common.base_llm loaded successfully") 29 | except ModuleNotFoundError: 30 | print("❌ Module common.base_llm not found!") -------------------------------------------------------------------------------- /lambda/call_llm/tests/events/multiple-places-weather-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "system": "You are a helpful AI assistant.", 3 | "messages": [ 4 | { 5 | "role": "user", 6 | "content": "What is the weather like in Boston, MA and in Seattle, WA?" 7 | } 8 | ], 9 | "tools": [ 10 | { 11 | "name": "get_current_weather", 12 | "description": "Get the current weather in a given location", 13 | "input_schema": { 14 | "type": "object", 15 | "properties": { 16 | "location": { 17 | "type": "string", 18 | "description": "The city and state, e.g. San Francisco, CA." 19 | } 20 | }, 21 | "required": ["location"] 22 | } 23 | }, 24 | { 25 | "name": "get_current_UTC_time", 26 | "description": "Get the current time in UTC timezone", 27 | "input_schema": { 28 | "type": "object", 29 | "properties": {} 30 | } 31 | } 32 | ] 33 | } -------------------------------------------------------------------------------- /lambda/call_llm/tests/requirements-test.txt: -------------------------------------------------------------------------------- 1 | # tests/requirements-test.txt 2 | pytest>=7.4.0 3 | pytest-mock>=3.12.0 4 | anthropic>=0.18.0 5 | openai>=1.12.0 6 | google-genai>=0.6.0 7 | aws-lambda-powertools>=2.34.1 8 | boto3>=1.34.34 9 | moto>=4.2.14 # For mocking AWS services in tests -------------------------------------------------------------------------------- /lambda/cookiecutter/README.md: -------------------------------------------------------------------------------- 1 | # Cookiecutter for Tools 2 | 3 | A simple way to create a new tool for the Step Functions Agent using [Cookiecutter](https://cookiecutter.readthedocs.io/en/latest/). You can use your favorite programming language and bootstrap the files that are needed to add a new tool to the framework. 4 | 5 | ## Usage 6 | 7 | You can either install the cookiecutter package or use it with `uvx`. The rest of the documentation here will use `uvx` to run the cookiecutter. 8 | 9 | Start with the following command to get the tools folder: 10 | 11 | ```bash 12 | cd lambda/tools 13 | ``` 14 | 15 | ### Python 16 | 17 | ```bash 18 | uvx cookiecutter https://github.com/guyernest/step-functions-agent --directory="lambda/cookiecutter/tools/python" 19 | ``` 20 | 21 | ### Typescript 22 | 23 | ```bash 24 | uvx cookiecutter https://github.com/guyernest/step-functions-agent --directory="lambda/cookiecutter/tools/typescript" 25 | ``` 26 | 27 | ### Go 28 | 29 | ```bash 30 | uvx cookiecutter https://github.com/guyernest/step-functions-agent --directory="lambda/cookiecutter/tools/go" 31 | ``` 32 | 33 | ### Rust 34 | 35 | ```bash 36 | uvx cookiecutter https://github.com/guyernest/step-functions-agent --directory="lambda/cookiecutter/tools/rust" 37 | ``` 38 | 39 | ### Java 40 | 41 | ```bash 42 | uvx cookiecutter https://github.com/guyernest/step-functions-agent --directory="lambda/cookiecutter/tools/java" 43 | ``` 44 | -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/python/cookiecutter.json: -------------------------------------------------------------------------------- 1 | { 2 | "tool_name": "EarthQuakeQuery", 3 | "tool_description": "Query interface to the USGS Earthquake Catalog API", 4 | "tool_functions": { 5 | "functions_names": [ 6 | "{{ cookiecutter.tool_name | to_snake_case }}" 7 | ] 8 | }, 9 | "input_param_name": "start_date", 10 | "input_param_description": "The start date of the query in YYYY-MM-DD format", 11 | "input_test_value": "2024-01-01", 12 | "__prompts__": { 13 | "tool_name": "Select your tool package name", 14 | "tool_description": "Describe the main functionality of the tool set", 15 | "tool_functions": "If you want more than one function in your tool set, list them here as list in json format", 16 | "input_param_name": "Name of the input parameter", 17 | "input_param_description": "Description of the input parameter" 18 | }, 19 | "_extensions": ["local_extensions.FunctionNamesExtension"] 20 | } -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/python/local_extensions.py: -------------------------------------------------------------------------------- 1 | from jinja2.ext import Extension 2 | 3 | import re 4 | 5 | def to_snake_case(value): 6 | pattern = re.compile(r'(? str: 20 | """{{cookiecutter.tool_description}}. 21 | Args: 22 | {{cookiecutter.input_param_name}} (str): {{cookiecutter.input_param_description}}. 23 | 24 | Returns: 25 | str: {{cookiecutter.tool_description}}, 26 | or an error message if the execution fails. 27 | 28 | Raises: 29 | Exception: Any exception during query execution will be caught and returned as an error message. 30 | """ 31 | try: 32 | 33 | result = "Logic Implementation Here (generated by LLM)" 34 | return json.dumps(result, indent=2) 35 | except Exception as e: 36 | return f"Error executing query: {str(e)}" 37 | {% endfor %} 38 | 39 | 40 | @tracer.capture_method 41 | def lambda_handler(event, context): 42 | # Get the tool name from the input event 43 | tool_use = event 44 | tool_name = tool_use['name'] 45 | tool_input = tool_use['input'] 46 | 47 | logger.info(f"Tool name: {tool_name}") 48 | match tool_name: 49 | {% for name in cookiecutter.tool_functions.functions_names %} 50 | case '{{name}}': 51 | result = {{name}}(tool_input['{{cookiecutter.input_param_name}}']) 52 | {% endfor %} 53 | 54 | # Add more tools functions here as needed 55 | 56 | case _: 57 | result = json.dumps({ 58 | 'error': f"Unknown tool name: {tool_name}" 59 | }) 60 | 61 | return { 62 | "type": "tool_result", 63 | "name": tool_name, 64 | "tool_use_id": tool_use["id"], 65 | "content": result 66 | } 67 | 68 | if __name__ == "__main__": 69 | 70 | {% for name in cookiecutter.tool_functions.functions_names %} 71 | # Test {{name}} function 72 | # Test event 73 | test_event = { 74 | "name": "{{name}}", 75 | "id": "execute_unique_id", 76 | "input": { 77 | "{{cookiecutter.input_param_name}}": "{{cookiecutter.input_test_value}}" 78 | }, 79 | "type": "tool_use" 80 | } 81 | 82 | # Call lambda handler with test events 83 | print("\nTesting tool {{name}}:") 84 | response = lambda_handler(test_event, None) 85 | print(response) 86 | {% endfor %} -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/python/{{cookiecutter.tool_name}}/requirements.in: -------------------------------------------------------------------------------- 1 | aws-lambda-powertools 2 | aws_xray_sdk -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/python/{{cookiecutter.tool_name}}/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | # {{cookiecutter.tool_description}} 7 | {{cookiecutter.tool_name}}: 8 | Type: AWS::Serverless::Function 9 | Properties: 10 | CodeUri: . 11 | Handler: index.lambda_handler 12 | Runtime: python3.12 13 | Timeout: 90 14 | MemorySize: 128 15 | Environment: 16 | Variables: 17 | POWERTOOLS_SERVICE_NAME: {{cookiecutter.tool_name}} 18 | Architectures: 19 | - arm64 20 | Policies: 21 | - SecretsManagerRead 22 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/python/{{cookiecutter.tool_name}}/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/lambda/cookiecutter/tools/python/{{cookiecutter.tool_name}}/tests/__init__.py -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/python/{{cookiecutter.tool_name}}/tests/requirements-test.txt: -------------------------------------------------------------------------------- 1 | pytest>=7.4.0 -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/python/{{cookiecutter.tool_name}}/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_tool_use_id", 3 | "input": { 4 | "{{cookiecutter.input_param_name}}": "{{cookiecutter.input_test_value}}" 5 | }, 6 | "name": "{{cookiecutter.tool_name}}", 7 | "type": "tool_use" 8 | } -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/python/{{cookiecutter.tool_name}}/tests/test_tool.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from index import lambda_handler 4 | 5 | {% for name in cookiecutter.tool_functions.functions_names %} 6 | 7 | @pytest.fixture 8 | def input_event_{{ name }}(): 9 | return { 10 | "id": "uniquetooluseid", 11 | "input": { 12 | "{{cookiecutter.input_param_name}}": "{{cookiecutter.input_test_value}}" 13 | }, 14 | "name": "{{name}}", 15 | "type": "tool_use" 16 | } 17 | 18 | 19 | def test_lambda_handler_{{ name }}(input_event_{{ name }}): 20 | # Test the handler 21 | response = lambda_handler(input_event_{{ name }}, None) 22 | 23 | # Assert response structure 24 | assert response["type"] == "tool_result" 25 | assert response["tool_use_id"] == "uniquetooluseid" 26 | assert "content" in response 27 | 28 | {% endfor %} 29 | 30 | -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/rust/cookiecutter.json: -------------------------------------------------------------------------------- 1 | { 2 | "tool_name": "ToolNameRust", 3 | "tool_description": "Tool description", 4 | "input_param_name": "input_param_name", 5 | "input_param_description": "input_param_description", 6 | "input_test_value": "2024-01-01" 7 | } -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/rust/{{cookiecutter.tool_name}}/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "{{cookiecutter.tool_name}}" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | serde_json = "1" 8 | 9 | lambda_runtime = "0.13.0" 10 | tokio = { version = "1", features = ["macros"] } 11 | serde = "1.0.217" 12 | anyhow = "1.0" 13 | 14 | 15 | -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/rust/{{cookiecutter.tool_name}}/src/main.rs: -------------------------------------------------------------------------------- 1 | use lambda_runtime::{run, service_fn, tracing, Error}; 2 | 3 | mod event_handler; 4 | use event_handler::function_handler; 5 | 6 | 7 | #[tokio::main] 8 | async fn main() -> Result<(), Error> { 9 | tracing::init_default_subscriber(); 10 | 11 | run(service_fn(function_handler)).await 12 | } 13 | -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/rust/{{cookiecutter.tool_name}}/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | # Semantic search using vector database (Qdrant) in Rust 7 | SemanticSearchRust: 8 | Type: AWS::Serverless::Function 9 | Metadata: 10 | BuildMethod: rust-cargolambda 11 | Properties: 12 | CodeUri: . 13 | Handler: bootstrap 14 | Runtime: provided.al2 15 | Timeout: 90 16 | MemorySize: 128 17 | Environment: 18 | Variables: 19 | POWERTOOLS_SERVICE_NAME: SemanticSearchRust 20 | Architectures: 21 | - arm64 22 | Policies: 23 | - SecretsManagerRead 24 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/rust/{{cookiecutter.tool_name}}/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_tool_use_id", 3 | "input": { 4 | "{{cookiecutter.input_param_name}}": "{{cookiecutter.input_test_value}}" 5 | }, 6 | "name": "{{cookiecutter.tool_name}}", 7 | "type": "tool_use" 8 | } -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/typescript/cookiecutter.json: -------------------------------------------------------------------------------- 1 | { 2 | "tool_name": "EarthQuakeQueryTS", 3 | "tool_description": "Query interface to the USGS Earthquake Catalog API", 4 | "input_param_name": "start_date", 5 | "input_param_description": "The start date of the query in YYYY-MM-DD format", 6 | "input_test_value": "2024-01-01" 7 | } -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/typescript/{{cookiecutter.tool_name}}/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "{{cookiecutter.tool_name}}", 3 | "version": "1.0.0", 4 | "description": "{{cookiecutter.tool_description}}", 5 | "main": "dist/index.js", 6 | "scripts": { 7 | "build": "tsc", 8 | "watch": "tsc -w", 9 | "test": "tsx src/local-test.ts", 10 | "prebuild": "rm -rf dist/", 11 | "prestart": "npm run build" 12 | }, 13 | "keywords": [], 14 | "author": "", 15 | "license": "ISC", 16 | "dependencies": { 17 | "@types/aws-lambda": "^8.10.147", 18 | "@aws-lambda-powertools/logger": "^2.12.0", 19 | "@aws-lambda-powertools/parameters": "^2.12.0", 20 | "@aws-lambda-powertools/tracer": "2.13.1", 21 | "@aws-sdk/client-secrets-manager": "^3.741.0", 22 | "node-fetch": "2.6.7", 23 | "@types/node-fetch": "^2.6.4" 24 | }, 25 | "devDependencies": { 26 | "@types/jest": "^29.x", 27 | "@types/node": "22.10.6", 28 | "jest": "^29.x", 29 | "ts-jest": "^29.x", 30 | "typescript": "5.7.3", 31 | "tsx": "4.19.2", 32 | "dotenv": "^16.x" 33 | } 34 | } -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/typescript/{{cookiecutter.tool_name}}/src/local-test.ts: -------------------------------------------------------------------------------- 1 | const { handler } = require('./index'); 2 | import { Context } from 'aws-lambda'; 3 | 4 | async function runTest() { 5 | try { 6 | console.log('Starting integration test...'); 7 | 8 | // Create a test event 9 | console.log('Creating test event...'); 10 | const testEvent = { 11 | "name": "{{cookiecutter.tool_name}}", 12 | "id": "unique_request_id", 13 | "input": { 14 | "{{cookiecutter.input_param_name}}": "{{cookiecutter.input_test_value}}" 15 | } 16 | }; 17 | 18 | // Create a mock context object 19 | const mockContext: Context = { 20 | callbackWaitsForEmptyEventLoop: true, 21 | functionName: 'BooksAPILambda', 22 | functionVersion: '1', 23 | invokedFunctionArn: 'arn:aws:lambda:local:000000000000:function:{{cookiecutter.tool_name}}', 24 | memoryLimitInMB: '128', 25 | awsRequestId: 'local-test', 26 | logGroupName: '/aws/lambda/{{cookiecutter.tool_name}}', 27 | logStreamName: 'local-test', 28 | getRemainingTimeInMillis: () => 30000, 29 | done: () => {}, 30 | fail: () => {}, 31 | succeed: () => {}, 32 | }; 33 | const mockCallback = () => null; // Simple null callback 34 | 35 | // Then process it with our handler 36 | console.log('Testing handler...'); 37 | const result = await handler(testEvent, mockContext, ); 38 | 39 | // Print results 40 | console.log('\nTest Results:'); 41 | console.log('------------------------'); 42 | console.log('result:', result); 43 | console.log('------------------------'); 44 | // if (result.statusCode === 200) { 45 | // console.log('\nExtracted Text:'); 46 | // console.log('------------------------'); 47 | // console.log(result.body.text); 48 | // console.log('------------------------'); 49 | // } else { 50 | // console.log('Error:', result.body); 51 | // } 52 | 53 | } catch (error) { 54 | console.error('Test failed:', error); 55 | } 56 | } 57 | 58 | runTest(); 59 | -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/typescript/{{cookiecutter.tool_name}}/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | # {{cookiecutter.tool_description}} 7 | {{cookiecutter.tool_name}}: 8 | Type: AWS::Serverless::Function 9 | Properties: 10 | CodeUri: . 11 | Handler: dist/index.handler 12 | Description: "{{cookiecutter.tool_description}}" 13 | Runtime: nodejs18.x 14 | Timeout: 90 15 | MemorySize: 128 16 | Environment: 17 | Variables: 18 | POWERTOOLS_SERVICE_NAME: {{cookiecutter.tool_name}} 19 | Architectures: 20 | - arm64 21 | Policies: 22 | - SecretsManagerRead 23 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/typescript/{{cookiecutter.tool_name}}/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/lambda/cookiecutter/tools/typescript/{{cookiecutter.tool_name}}/tests/__init__.py -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/typescript/{{cookiecutter.tool_name}}/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_tool_use_id", 3 | "input": { 4 | "{{cookiecutter.input_param_name}}": "{{cookiecutter.input_test_value}}" 5 | }, 6 | "name": "{{cookiecutter.tool_name}}", 7 | "type": "tool_use" 8 | } -------------------------------------------------------------------------------- /lambda/cookiecutter/tools/typescript/{{cookiecutter.tool_name}}/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2022", 4 | "strict": true, 5 | "preserveConstEnums": true, 6 | "sourceMap": false, 7 | "module": "commonjs", 8 | "moduleResolution": "node", 9 | "esModuleInterop": true, 10 | "skipLibCheck": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "isolatedModules": true, 13 | "outDir": "./dist", 14 | "rootDir": "./src" 15 | }, 16 | "include": [ 17 | "src/**/*" 18 | ], 19 | "exclude": [ 20 | "node_modules", 21 | "**/*.test.ts" 22 | ] 23 | } -------------------------------------------------------------------------------- /lambda/extensions/long-content/.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "makefile.configureOnOpen": false 3 | } -------------------------------------------------------------------------------- /lambda/extensions/long-content/CLAUDE.md: -------------------------------------------------------------------------------- 1 | # Lambda Runtime API Proxy * Developer Guide 2 | 3 | There are a few ways to build Lambda extensions that we tried out. This one is built on the lessons learned from the other approaches. We need to built a proxy and not a standalone extension, as the events that are received by the extension don't include the input and output of the Lambda function. Since we want to be able eventually to manipulate the input and output of the Lambda function, we need to build a proxy. 4 | 5 | We will do it in Rust, as it is a systems programming language that is fast and safe. 6 | 7 | We will do it in small steps: 8 | 9 | * Building a simple Lambda extension in python that simply logs the input event. This lambda function will use the extension as a layer that will be deployed with the function to the /opt/extensions directory. 10 | * building the proxy first as it is without any changes, and deploy it with the above Lambda function. We can see that the logs are written and can everything is working, without any changes. 11 | * Next we will upgrade the proxy to newer version of hyper and tokio, and make sure that it is still working. 12 | * Then adding the ability to manipulate the input and output of the Lambda function. 13 | 14 | ## Build & Test Commands 15 | 16 | * `cargo lambda build --extension --target x86_64-unknown-linux-musl` * Standard build of the lambda extension 17 | * `cargo lambda build --extension --target aarch64-unknown-linux-musl` * Cross-compile for ARM64 18 | * `cargo test` * Run tests 19 | * `make` or `make default` * Build and deploy layer 20 | * `make clean` * Clean build and cargo artifacts 21 | * `make zip` * Create deployment zip file 22 | * `make doc` * Generate documentation 23 | * `cross build --release --target x86_64-unknown-linux-musl` * Cross-compile for x86_64 24 | * `cross build --release --target aarch64-unknown-linux-musl` * Cross-compile for ARM64 25 | 26 | ## Code Style Guidelines 27 | 28 | * Follow standard Rust naming conventions: 29 | * `snake_case` for functions/variables 30 | * `SCREAMING_SNAKE_CASE` for constants 31 | * Group imports logically, with stdlib first, then external crates 32 | * Use doc comments with `///` for items and `//!` for modules 33 | * Use descriptive error messages in `expect()` and `unwrap_or_else()` 34 | * Prefer strong typing over raw strings/integers 35 | * Organize modules with clear hierarchical structure 36 | * Use Tokio async/await consistently for async operations 37 | * Make liberal use of Rust's type system for safety guarantees 38 | 39 | ## Architecture 40 | 41 | This project is a Lambda Runtime API Proxy extension written in Rust, designed to intercept and process Lambda function invocations. 42 | -------------------------------------------------------------------------------- /lambda/extensions/long-content/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aws-lambda-runtime-api-proxy-rs" 3 | version = "0.2.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | futures = "0.3.31" 10 | once_cell = "1.20.3" 11 | parking_lot = "0.12.3" 12 | tokio = { version = "1.44.0", features = ["full"] } 13 | tokio-util = { version = "0.7.13", features = ["io"] } 14 | hyper = { version = "0.14.28", features = ["client", "server", "runtime", "tcp", "stream", "http1"] } 15 | httprouter = "0.5.0" 16 | serde = { version = "1.0", features = ["derive"] } 17 | serde_json = "1.0.140" 18 | uuid = { version = "1.4", features = ["v4"] } 19 | chrono = "0.4" 20 | 21 | # AWS SDK 22 | aws-config = "1.5.18" 23 | aws-sdk-dynamodb = "1.67.0" 24 | aws-smithy-types = "1.2.13" 25 | 26 | [features] 27 | default = [] 28 | -------------------------------------------------------------------------------- /lambda/extensions/long-content/NEXT_STEPS.md: -------------------------------------------------------------------------------- 1 | # CI/CD Setup - Complete 2 | 3 | The CI/CD pipeline for building Lambda Extensions is now configured and ready to use. Here's what has been set up: 4 | 5 | ## Files Created/Modified 6 | 7 | 1. **GitHub Actions Workflow** 8 | - Created at repository root: `.github/workflows/lambda-extension-build.yml` 9 | - This workflow will notify when changes to the Lambda extension are detected 10 | - It serves as documentation/notification for builds 11 | 12 | 2. **Root-level buildspec.yml** 13 | - Created at repository root: `buildspec.yml` 14 | - Contains all build instructions for both x86_64 and ARM64 extensions 15 | - Uses region and account-specific S3 bucket names 16 | 17 | 3. **README.md** 18 | - Updated with CI/CD information in the "CI/CD Pipeline" section 19 | 20 | ## How the Pipeline Works 21 | 22 | 1. When code is pushed to the repository, GitHub webhook notifies CodeBuild 23 | 2. CodeBuild reads the buildspec.yml at the root of the repository 24 | 3. The buildspec.yml file: 25 | - Installs all necessary dependencies 26 | - Builds both ARM64 and x86_64 extensions using the Makefile 27 | - Creates a region and account-specific S3 bucket 28 | - Uploads the built extensions to this bucket 29 | 30 | 4. The GitHub Actions workflow provides a notification that a build should be triggered 31 | - It doesn't perform the actual build 32 | - It provides a link to monitor build status 33 | 34 | ## Testing the Pipeline 35 | 36 | To verify everything is working: 37 | 1. Make a small change to a file in `lambda/extensions/long-content` 38 | 2. Commit and push to GitHub 39 | 3. Monitor the GitHub Actions workflow execution 40 | 4. Check the build status in CodeBuild console 41 | 5. After a successful build, verify the extensions are in the S3 bucket 42 | 43 | ## Using Built Extensions 44 | 45 | After a successful build, the extension ZIPs will be available at: 46 | ``` 47 | s3://step-functions-agent-artifacts-{region}-{account-id}/lambda-layers/extension-arm.zip 48 | s3://step-functions-agent-artifacts-{region}-{account-id}/lambda-layers/extension-x86.zip 49 | ``` 50 | 51 | Create Lambda layers from these ZIPs using: 52 | ```bash 53 | # For ARM64 54 | aws lambda publish-layer-version \ 55 | --layer-name lambda-runtime-api-proxy-arm \ 56 | --description "Lambda Runtime API Proxy Extension for ARM64" \ 57 | --license-info "MIT" \ 58 | --content S3Bucket=step-functions-agent-artifacts-{region}-{account-id},S3Key=lambda-layers/extension-arm.zip \ 59 | --compatible-runtimes provided provided.al2 nodejs14.x nodejs16.x nodejs18.x python3.9 python3.10 python3.11 java11 java17 \ 60 | --compatible-architectures arm64 61 | 62 | # For x86_64 63 | aws lambda publish-layer-version \ 64 | --layer-name lambda-runtime-api-proxy-x86 \ 65 | --description "Lambda Runtime API Proxy Extension for x86_64" \ 66 | --license-info "MIT" \ 67 | --content S3Bucket=step-functions-agent-artifacts-{region}-{account-id},S3Key=lambda-layers/extension-x86.zip \ 68 | --compatible-runtimes provided provided.al2 nodejs14.x nodejs16.x nodejs18.x python3.9 python3.10 python3.11 java11 java17 \ 69 | --compatible-architectures x86_64 70 | ``` -------------------------------------------------------------------------------- /lambda/extensions/long-content/buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.3 2 | 3 | phases: 4 | install: 5 | runtime-versions: 6 | rust: latest 7 | commands: 8 | # Install required dependencies 9 | - apt-get update && apt-get install -y zip cmake pkg-config libssl-dev musl-tools 10 | # Install AWS SAM CLI 11 | - pip install aws-sam-cli 12 | # Install cargo-lambda 13 | - pip install cargo-lambda 14 | # Setup cross-compilation for ARM64 15 | - rustup target add aarch64-unknown-linux-musl 16 | - rustup target add x86_64-unknown-linux-musl 17 | 18 | build: 19 | commands: 20 | # Navigate to the lambda extension directory 21 | - cd lambda/extensions/long-content 22 | # Build both extensions 23 | - make build 24 | 25 | post_build: 26 | commands: 27 | # Deploy the extensions as Lambda layers (if needed) 28 | # - make deploy 29 | 30 | # Get AWS account ID and region for S3 bucket name 31 | - export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 32 | - export AWS_REGION=$(aws configure get region) 33 | - export S3_BUCKET="step-functions-agent-artifacts-${AWS_REGION}-${AWS_ACCOUNT_ID}" 34 | 35 | # Create S3 bucket if it doesn't exist 36 | - aws s3api head-bucket --bucket ${S3_BUCKET} 2>/dev/null || aws s3 mb s3://${S3_BUCKET} --region ${AWS_REGION} 37 | 38 | # Upload the extension ZIP files to S3 39 | - aws s3 cp extension-arm.zip s3://${S3_BUCKET}/lambda-layers/ 40 | - aws s3 cp extension-x86.zip s3://${S3_BUCKET}/lambda-layers/ 41 | 42 | # Output the S3 URLs for the extension ZIPs 43 | - echo "ARM64 extension available at s3://${S3_BUCKET}/lambda-layers/extension-arm.zip" 44 | - echo "x86_64 extension available at s3://${S3_BUCKET}/lambda-layers/extension-x86.zip" 45 | 46 | artifacts: 47 | files: 48 | - lambda/extensions/long-content/extension-arm.zip 49 | - lambda/extensions/long-content/extension-x86.zip 50 | discard-paths: no 51 | 52 | cache: 53 | paths: 54 | - '/root/.cargo/registry/**/*' 55 | - '/root/.cargo/git/**/*' 56 | - 'lambda/extensions/long-content/target/**/*' -------------------------------------------------------------------------------- /lambda/extensions/long-content/images/diagram1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/lambda/extensions/long-content/images/diagram1.png -------------------------------------------------------------------------------- /lambda/extensions/long-content/images/diagram2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/lambda/extensions/long-content/images/diagram2.png -------------------------------------------------------------------------------- /lambda/extensions/long-content/opt/entrypoint: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # 4 | # Entrypoint for Lambda sandbox runtime. 5 | # This is a simplified version that only works for one architecture. 6 | # 7 | # This script, when deployed as a layer, must be named the same as crate::EXTENSION_NAME (main.rs) 8 | # 9 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 10 | # SPDX-License-Identifier: MIT-0 11 | # 12 | 13 | # Execute the extension binary 14 | args=("$@") 15 | exec /opt/lrap "${args[@]}" 16 | -------------------------------------------------------------------------------- /lambda/extensions/long-content/opt/wrapper: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Launch the application runtime with the Runtime API pointing to LRAP extension. 3 | # NOTE: This port must be the same as defined in crate::DEFAULT_PROXY_PORT. 4 | 5 | export AWS_LAMBDA_RUNTIME_API="127.0.0.1:9009" 6 | exec "$@" 7 | 8 | -------------------------------------------------------------------------------- /lambda/extensions/long-content/samconfig.toml: -------------------------------------------------------------------------------- 1 | version = 0.1 2 | [default.deploy.parameters] 3 | stack_name = "long-content-extension-arm" 4 | resolve_s3 = true 5 | s3_prefix = "long-content-extension-arm" 6 | region = "us-west-2" 7 | capabilities = "CAPABILITY_IAM" 8 | parameter_overrides = "DeploymentStage=\"test\" AgentContextTableName=\"AgentContext\" MaxContentSize=\"5000\" CreateDynamoDBTable=\"false\"" 9 | image_repositories = [] 10 | -------------------------------------------------------------------------------- /lambda/extensions/long-content/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Lambda Runtime API Proxy Extension library 2 | 3 | pub mod env; 4 | pub mod route; 5 | pub mod sandbox; 6 | pub mod stats; 7 | pub mod transform; 8 | 9 | /// Name to register with the Lambda Extension API. 10 | pub const EXTENSION_NAME: &str = "lrap"; 11 | 12 | /// Default port to listen on, overridden by LRAP_LISTENER_PORT environment variable 13 | pub const DEFAULT_PROXY_PORT: u16 = 9009; 14 | 15 | /// Lambda Runtime API version 16 | pub static LAMBDA_RUNTIME_API_VERSION: &str = "2018-06-01"; 17 | -------------------------------------------------------------------------------- /lambda/extensions/long-content/src/stats.rs: -------------------------------------------------------------------------------- 1 | //! Hold global-state of timing metrics for Application processing event and LRAP extension latency 2 | //! 3 | use std::time::Instant; 4 | 5 | use once_cell::sync::OnceCell; 6 | use parking_lot::Mutex; 7 | 8 | static INIT_START: OnceCell = OnceCell::new(); 9 | static APP_START: OnceCell = OnceCell::new(); 10 | 11 | static EVENT_START: Mutex> = Mutex::new(None); 12 | 13 | pub fn init_start() { 14 | INIT_START.set(Instant::now()).unwrap(); 15 | } 16 | pub fn app_start() { 17 | APP_START.set(Instant::now()).unwrap(); 18 | } 19 | 20 | #[allow(dead_code)] 21 | pub fn get_next_event() { 22 | match *EVENT_START.lock() { 23 | None => { 24 | eprintln!( 25 | "[LRAP] LRAP init : {} us", 26 | APP_START 27 | .get() 28 | .unwrap() 29 | .duration_since(*INIT_START.get().unwrap()) 30 | .as_micros() 31 | ); 32 | eprintln!( 33 | "[LRAP] App init : {} us", 34 | APP_START.get().unwrap().elapsed().as_micros() 35 | ); 36 | } 37 | Some(event_start) => { 38 | eprintln!( 39 | "[LRAP] App run time : {} us", 40 | event_start.elapsed().as_micros() 41 | ); 42 | } 43 | } 44 | } 45 | 46 | #[allow(dead_code)] 47 | pub fn event_start() { 48 | EVENT_START.lock().replace(Instant::now()); 49 | } 50 | -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQuery/index.py: -------------------------------------------------------------------------------- 1 | # This lambda function will be used as a tool EarthQuakeQuery for the AI Agent platform 2 | 3 | # Imports for Tool 4 | import requests 5 | import json 6 | 7 | # Imports for Lambda 8 | from aws_lambda_powertools import Logger 9 | from aws_lambda_powertools import Tracer 10 | from aws_lambda_powertools.utilities import parameters 11 | 12 | # Initialize the logger and tracer 13 | logger = Logger(level="INFO") 14 | tracer = Tracer() 15 | 16 | # Tool Functions 17 | def query_earthquakes(starttime, endtime): 18 | """ 19 | Function to retrieve earthquake data from the USGS Earthquake Hazards Program API. 20 | 21 | :param starttime: The start time for the earthquake data in YYYY-MM-DD format. 22 | :param endtime: The end time for the earthquake data in YYYY-MM-DD format. 23 | :return: The result of the earthquake data retrieval as a dictionary. 24 | """ 25 | url = "https://earthquake.usgs.gov/fdsnws/event/1/query" 26 | params = { 27 | "format": "geojson", 28 | "starttime": starttime, 29 | "endtime": endtime, 30 | "minmagnitude": 4.5, 31 | "limit": 1000 32 | } 33 | 34 | try: 35 | response = requests.get(url, params=params) 36 | logger.info(f"Earthquake API response: {response}") 37 | response.raise_for_status() # Raises an error for HTTP errors 38 | return json.dumps(response.json(), indent=2) 39 | 40 | except requests.exceptions.RequestException as e: 41 | logger.error(f"Error retrieving earthquake data: {e}") 42 | return {"error": "An error occurred while retrieving earthquake data."} 43 | 44 | api_tool = { 45 | "function": query_earthquakes, 46 | "definition": { 47 | "name": "query_earthquakes", 48 | "description": "Retrieve earthquake data from the USGS Earthquake Hazards Program API and display the .", 49 | "parameters": { 50 | "type": "object", 51 | "properties": { 52 | "start_date": { 53 | "type": "string", 54 | "description": "The start date for the earthquake data in YYYY-MM-DD format." 55 | }, 56 | "end_date": { 57 | "type": "string", 58 | "description": "The end date for the earthquake data in YYYY-MM-DD format." 59 | } 60 | }, 61 | "required": [ 62 | "start_date", 63 | "end_date" 64 | ] 65 | } 66 | } 67 | } 68 | 69 | 70 | @tracer.capture_method 71 | def lambda_handler(event, context): 72 | # Get the tool name from the input event 73 | tool_use = event 74 | tool_name = tool_use['name'] 75 | tool_input = tool_use['input'] 76 | 77 | logger.info(f"Tool name: {tool_name}") 78 | match tool_name: 79 | case 'query_earthquakes': 80 | result = query_earthquakes( 81 | tool_input['start_date'], 82 | tool_input['end_date'] 83 | ) 84 | 85 | # Add more tools functions here as needed 86 | 87 | case _: 88 | result = json.dumps({ 89 | 'error': f"Unknown tool name: {tool_name}" 90 | }) 91 | 92 | return { 93 | "type": "tool_result", 94 | "name": tool_name, 95 | "tool_use_id": tool_use["id"], 96 | "content": result 97 | } -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQuery/requirements.in: -------------------------------------------------------------------------------- 1 | requests 2 | aws-lambda-powertools 3 | aws_xray_sdk 4 | -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQuery/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in --output-file requirements.txt 3 | aws-lambda-powertools==3.6.0 4 | # via -r requirements.in 5 | aws-xray-sdk==2.14.0 6 | # via -r requirements.in 7 | botocore==1.36.20 8 | # via aws-xray-sdk 9 | certifi==2025.1.31 10 | # via requests 11 | charset-normalizer==3.4.1 12 | # via requests 13 | idna==3.10 14 | # via requests 15 | jmespath==1.0.1 16 | # via 17 | # aws-lambda-powertools 18 | # botocore 19 | python-dateutil==2.9.0.post0 20 | # via botocore 21 | requests==2.32.3 22 | # via -r requirements.in 23 | six==1.17.0 24 | # via python-dateutil 25 | typing-extensions==4.12.2 26 | # via aws-lambda-powertools 27 | urllib3==2.3.0 28 | # via 29 | # botocore 30 | # requests 31 | wrapt==1.17.2 32 | # via aws-xray-sdk 33 | -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQuery/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | # Query interface to the USGS Earthquake Catalog API 7 | EarthQuakeQuery: 8 | Type: AWS::Serverless::Function 9 | Properties: 10 | CodeUri: . 11 | Handler: index.lambda_handler 12 | Runtime: python3.12 13 | Timeout: 90 14 | MemorySize: 128 15 | Environment: 16 | Variables: 17 | POWERTOOLS_SERVICE_NAME: EarthQuakeQuery 18 | Architectures: 19 | - arm64 20 | Policies: 21 | - SecretsManagerRead 22 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQuery/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/lambda/tools/EarthQuakeQuery/tests/__init__.py -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQuery/tests/requirements-test.txt: -------------------------------------------------------------------------------- 1 | pytest>=7.4.0 -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQuery/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_tool_use_id", 3 | "input": { 4 | "start_date": "2024-01-01", 5 | "end_date": "2024-01-02" 6 | }, 7 | "name": "query_earthquakes", 8 | "type": "tool_use" 9 | } -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQuery/tests/test_tool.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from index import lambda_handler 4 | 5 | @pytest.fixture 6 | def input_event(): 7 | return { 8 | "id": "uniquetooluseid", 9 | "input": { 10 | "start_date": "2024-01-01" 11 | }, 12 | "name": "EarthQuakeQuery", 13 | "type": "tool_use" 14 | } 15 | 16 | 17 | def test_lambda_handler(input_event): 18 | # Test the handler 19 | response = lambda_handler(input_event, None) 20 | 21 | # Assert response structure 22 | assert response["type"] == "tool_result" 23 | assert response["tool_use_id"] == "uniquetooluseid" 24 | assert "content" in response 25 | 26 | 27 | -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQueryTS/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "EarthQuakeQueryTS", 3 | "version": "1.0.0", 4 | "description": "Query interface to the USGS Earthquake Catalog API", 5 | "main": "dist/index.js", 6 | "scripts": { 7 | "build": "tsc", 8 | "watch": "tsc -w", 9 | "test": "tsx src/local-test.ts", 10 | "prebuild": "rm -rf dist/", 11 | "prestart": "npm run build" 12 | }, 13 | "keywords": [], 14 | "author": "", 15 | "license": "ISC", 16 | "dependencies": { 17 | "@types/aws-lambda": "^8.10.147", 18 | "@aws-lambda-powertools/logger": "^2.12.0", 19 | "@aws-lambda-powertools/parameters": "^2.12.0", 20 | "@aws-lambda-powertools/tracer": "2.13.1", 21 | "@aws-sdk/client-secrets-manager": "^3.741.0", 22 | "node-fetch": "2.6.7", 23 | "@types/node-fetch": "^2.6.4" 24 | }, 25 | "devDependencies": { 26 | "@types/jest": "^29.x", 27 | "@types/node": "22.10.6", 28 | "jest": "^29.x", 29 | "ts-jest": "^29.x", 30 | "typescript": "5.7.3", 31 | "tsx": "4.19.2", 32 | "dotenv": "^16.x" 33 | } 34 | } -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQueryTS/src/local-test.ts: -------------------------------------------------------------------------------- 1 | const { handler } = require('./index'); 2 | import { Context } from 'aws-lambda'; 3 | 4 | async function runTest() { 5 | try { 6 | console.log('Starting integration test...'); 7 | 8 | // Create a test event 9 | console.log('Creating test event...'); 10 | const testEvent = { 11 | "name": "query_earthquakes", 12 | "id": "unique_request_id", 13 | "input": { 14 | "start_date": "2024-01-01", 15 | "end_date": "2024-01-02" 16 | } 17 | }; 18 | 19 | // Create a mock context object 20 | const mockContext: Context = { 21 | callbackWaitsForEmptyEventLoop: true, 22 | functionName: 'BooksAPILambda', 23 | functionVersion: '1', 24 | invokedFunctionArn: 'arn:aws:lambda:local:000000000000:function:EarthQuakeQueryTS', 25 | memoryLimitInMB: '128', 26 | awsRequestId: 'local-test', 27 | logGroupName: '/aws/lambda/EarthQuakeQueryTS', 28 | logStreamName: 'local-test', 29 | getRemainingTimeInMillis: () => 30000, 30 | done: () => {}, 31 | fail: () => {}, 32 | succeed: () => {}, 33 | }; 34 | const mockCallback = () => null; // Simple null callback 35 | 36 | // Then process it with our handler 37 | console.log('Testing handler...'); 38 | const result = await handler(testEvent, mockContext, ); 39 | 40 | // Print results 41 | console.log('\nTest Results:'); 42 | console.log('------------------------'); 43 | console.log('result:', result); 44 | console.log('------------------------'); 45 | // if (result.statusCode === 200) { 46 | // console.log('\nExtracted Text:'); 47 | // console.log('------------------------'); 48 | // console.log(result.body.text); 49 | // console.log('------------------------'); 50 | // } else { 51 | // console.log('Error:', result.body); 52 | // } 53 | 54 | } catch (error) { 55 | console.error('Test failed:', error); 56 | } 57 | } 58 | 59 | runTest(); 60 | -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQueryTS/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | # Query interface to the USGS Earthquake Catalog API 7 | EarthQuakeQueryTS: 8 | Type: AWS::Serverless::Function 9 | Properties: 10 | CodeUri: . 11 | Handler: dist/index.handler 12 | Description: "Query interface to the USGS Earthquake Catalog API" 13 | Runtime: nodejs18.x 14 | Timeout: 90 15 | MemorySize: 128 16 | Environment: 17 | Variables: 18 | POWERTOOLS_SERVICE_NAME: EarthQuakeQueryTS 19 | Architectures: 20 | - arm64 21 | Policies: 22 | - SecretsManagerRead 23 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQueryTS/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/lambda/tools/EarthQuakeQueryTS/tests/__init__.py -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQueryTS/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_tool_use_id", 3 | "input": { 4 | "start_date": "2024-01-01", 5 | "end_date": "2024-01-02" 6 | }, 7 | "name": "query_earthquakes", 8 | "type": "tool_use" 9 | } -------------------------------------------------------------------------------- /lambda/tools/EarthQuakeQueryTS/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2022", 4 | "strict": true, 5 | "preserveConstEnums": true, 6 | "sourceMap": false, 7 | "module": "commonjs", 8 | "moduleResolution": "node", 9 | "esModuleInterop": true, 10 | "skipLibCheck": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "isolatedModules": true, 13 | "outDir": "./dist", 14 | "rootDir": "./src" 15 | }, 16 | "include": [ 17 | "src/**/*" 18 | ], 19 | "exclude": [ 20 | "node_modules", 21 | "**/*.test.ts" 22 | ] 23 | } -------------------------------------------------------------------------------- /lambda/tools/MicrosoftGraphAPI/requirements.in: -------------------------------------------------------------------------------- 1 | aws-lambda-powertools 2 | aws_xray_sdk 3 | requests -------------------------------------------------------------------------------- /lambda/tools/MicrosoftGraphAPI/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in --output-file requirements.txt 3 | aws-lambda-powertools==3.6.0 4 | # via -r requirements.in 5 | aws-xray-sdk==2.14.0 6 | # via -r requirements.in 7 | botocore==1.36.24 8 | # via aws-xray-sdk 9 | certifi==2025.1.31 10 | # via requests 11 | charset-normalizer==3.4.1 12 | # via requests 13 | idna==3.10 14 | # via requests 15 | jmespath==1.0.1 16 | # via 17 | # aws-lambda-powertools 18 | # botocore 19 | python-dateutil==2.9.0.post0 20 | # via botocore 21 | requests==2.32.3 22 | # via -r requirements.in 23 | six==1.17.0 24 | # via python-dateutil 25 | typing-extensions==4.12.2 26 | # via aws-lambda-powertools 27 | urllib3==2.3.0 28 | # via 29 | # botocore 30 | # requests 31 | wrapt==1.17.2 32 | # via aws-xray-sdk 33 | -------------------------------------------------------------------------------- /lambda/tools/MicrosoftGraphAPI/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | # Interface to the Microsoft Graph API of a specific tenant. 7 | MicrosoftGraphAPI: 8 | Type: AWS::Serverless::Function 9 | Properties: 10 | CodeUri: . 11 | Handler: index.lambda_handler 12 | Runtime: python3.12 13 | Timeout: 90 14 | MemorySize: 128 15 | Environment: 16 | Variables: 17 | POWERTOOLS_SERVICE_NAME: MicrosoftGraphAPI 18 | Architectures: 19 | - arm64 20 | Policies: 21 | - SecretsManagerRead 22 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/tools/MicrosoftGraphAPI/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/lambda/tools/MicrosoftGraphAPI/tests/__init__.py -------------------------------------------------------------------------------- /lambda/tools/MicrosoftGraphAPI/tests/requirements-test.txt: -------------------------------------------------------------------------------- 1 | pytest>=7.4.0 -------------------------------------------------------------------------------- /lambda/tools/MicrosoftGraphAPI/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_tool_use_id", 3 | "input": { 4 | "endpoint": "users", 5 | "method": "GET" 6 | }, 7 | "name": "MicrosoftGraphAPI", 8 | "type": "tool_use" 9 | } -------------------------------------------------------------------------------- /lambda/tools/MicrosoftGraphAPI/tests/test_tool.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from index import lambda_handler 4 | 5 | @pytest.fixture 6 | def get_event(): 7 | return { 8 | "id": "uniquetooluseid", 9 | "input": { 10 | "endpoint": "users", 11 | "method": "GET" 12 | }, 13 | "name": "MicrosoftGraphAPI", 14 | "type": "tool_use" 15 | } 16 | 17 | @pytest.fixture 18 | def post_event(): 19 | return { 20 | "id": "uniquetooluseid", 21 | "input": { 22 | "endpoint": "me/sendMail", 23 | "method": "POST", 24 | "data": { 25 | "message": { 26 | "subject": "Test email", 27 | "body": { 28 | "contentType": "HTML", 29 | "content": "

This is a test email.

" 30 | }, 31 | "toRecipients": [ 32 | { 33 | "emailAddress": { 34 | "address": "test@example.com" 35 | } 36 | } 37 | ] 38 | }, 39 | "saveToSentItems": True 40 | } 41 | }, 42 | "name": "MicrosoftGraphAPI", 43 | "type": "tool_use" 44 | } 45 | 46 | @pytest.fixture 47 | def legacy_event(): 48 | return { 49 | "id": "uniquetooluseid", 50 | "input": { 51 | "query": "users" 52 | }, 53 | "name": "MicrosoftGraphAPI", 54 | "type": "tool_use" 55 | } 56 | 57 | 58 | def test_get_request(get_event): 59 | # Test the handler with GET request 60 | response = lambda_handler(get_event, None) 61 | 62 | # Assert response structure 63 | assert response["type"] == "tool_result" 64 | assert response["tool_use_id"] == "uniquetooluseid" 65 | assert "content" in response 66 | assert "Error" not in response["content"] 67 | 68 | def test_legacy_request(legacy_event): 69 | # Test the handler with legacy format 70 | response = lambda_handler(legacy_event, None) 71 | 72 | # Assert response structure 73 | assert response["type"] == "tool_result" 74 | assert response["tool_use_id"] == "uniquetooluseid" 75 | assert "content" in response 76 | assert "Error" not in response["content"] 77 | 78 | -------------------------------------------------------------------------------- /lambda/tools/SemanticSearchRust/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "SemanticSearchRust" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | serde_json = "1" 8 | 9 | lambda_runtime = "0.13.0" 10 | tokio = { version = "1", features = ["macros"] } 11 | serde = "1.0.217" 12 | anyhow = "1.0" 13 | 14 | qdrant-client = "1.13.0" 15 | reqwest = { version = "0.11.18", default-features = false, features = ["json", "rustls-tls"] } 16 | aws-sdk-secretsmanager = "1.63.0" 17 | aws-sdk-ssm = "1.63.0" 18 | aws-config = "1.5.16" 19 | -------------------------------------------------------------------------------- /lambda/tools/SemanticSearchRust/src/main.rs: -------------------------------------------------------------------------------- 1 | use lambda_runtime::{run, service_fn, tracing, Error}; 2 | 3 | mod event_handler; 4 | use event_handler::function_handler; 5 | 6 | 7 | #[tokio::main] 8 | async fn main() -> Result<(), Error> { 9 | tracing::init_default_subscriber(); 10 | 11 | run(service_fn(function_handler)).await 12 | } 13 | -------------------------------------------------------------------------------- /lambda/tools/SemanticSearchRust/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | # Semantic search using vector database (Qdrant) in Rust 7 | SemanticSearchRust: 8 | Type: AWS::Serverless::Function 9 | Metadata: 10 | BuildMethod: rust-cargolambda 11 | Properties: 12 | CodeUri: . 13 | Handler: bootstrap 14 | Runtime: provided.al2 15 | Timeout: 90 16 | MemorySize: 128 17 | Environment: 18 | Variables: 19 | POWERTOOLS_SERVICE_NAME: SemanticSearchRust 20 | Architectures: 21 | - arm64 22 | Policies: 23 | - SecretsManagerRead 24 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/tools/SemanticSearchRust/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_tool_use_id", 3 | "input": { 4 | "search_query": "How many vacation days can I take?" 5 | }, 6 | "name": "semantic_search_rust", 7 | "type": "tool_use" 8 | } -------------------------------------------------------------------------------- /lambda/tools/WebScraperMemory/CLAUDE.md: -------------------------------------------------------------------------------- 1 | # CLAUDE.md - WebScraperMemory Codebase Guide 2 | 3 | ## Purpose 4 | 5 | The WebScraperMemory Lambda function tool is part of an AI agent that learns and extracts information from websites. The Memory component is responsible for storing and retrieving data from a DynamoDB table, for the extraction scripts that are performed by the WebScraper tool. The tool read and writes two types of records: 6 | 7 | * Site Schema - Describes the site functionality and the set of scripts that are already available to extract information from the site. 8 | * Extraction Script - Describes a script that can be used to extract specific type of information from the site. 9 | 10 | The WebScraperMemory tool helps the AI Agent, and its LLM model to quickly learn and adapt to new sites, and to be able to extract information from them, efficiently once a successful script is created. 11 | 12 | ## Build/Test Commands 13 | 14 | - Build for production: `cargo lambda build --arm64 --release` 15 | - Build for development: `cargo lambda build --arm64` 16 | - Run tests: `cargo test` 17 | - Run single test: `cargo test test_event_handler` 18 | - Local invoke with SAM: `sam build && sam local invoke WebScraperMemory --event tests/test-event.json` 19 | 20 | ## Code Style Guidelines 21 | 22 | - **Formatting**: Use standard Rust formatting (rustfmt) 23 | - **Error Handling**: Use `anyhow::Result` for function results, with contextual error messages 24 | - **Types**: Define custom types with proper Serde derive macros (`Serialize`, `Deserialize`) 25 | - **Naming**: 26 | - Use PascalCase for types, structs, and function names 27 | - Use snake_case for variables and module names 28 | - Prefix internal functions with `pub(crate)` 29 | - **File Structure**: 30 | - `main.rs`: Entry point with minimal logic 31 | - `event_handler.rs`: Event parsing and core logic 32 | - **Testing**: Include unit tests in the same file as the implementation with the `#[cfg(test)]` attribute 33 | - **Imports**: Order imports by standard library, then external crates, then local modules 34 | - **Error Logging**: Use `tracing` crate for all logging (info, error, debug) -------------------------------------------------------------------------------- /lambda/tools/WebScraperMemory/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "WebScraperMemory" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | serde_json = "1" 8 | lambda_runtime = "0.13.0" 9 | tokio = { version = "1", features = ["macros"] } 10 | serde = { version = "1.0.217", features = ["derive"] } 11 | anyhow = "1.0" 12 | aws-config = "1.5.17" 13 | aws-sdk-dynamodb = "1.66.0" 14 | url = "2.4.1" 15 | tracing = "0.1" 16 | async-trait = "0.1.74" 17 | 18 | -------------------------------------------------------------------------------- /lambda/tools/WebScraperMemory/src/main.rs: -------------------------------------------------------------------------------- 1 | use lambda_runtime::{run, service_fn, tracing, Error}; 2 | 3 | mod event_handler; 4 | use event_handler::function_handler; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), Error> { 8 | tracing::init_default_subscriber(); 9 | 10 | // Run the Lambda function handler 11 | run(service_fn(function_handler)).await 12 | } 13 | -------------------------------------------------------------------------------- /lambda/tools/WebScraperMemory/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | # WebScraperMemory tool in Rust 7 | WebScraperMemory: 8 | Type: AWS::Serverless::Function 9 | Metadata: 10 | BuildMethod: rust-cargolambda 11 | Properties: 12 | CodeUri: . 13 | Handler: bootstrap 14 | Runtime: provided.al2023 15 | Timeout: 30 16 | MemorySize: 128 17 | Environment: 18 | Variables: 19 | POWERTOOLS_SERVICE_NAME: WebScraperMemory 20 | SCHEMAS_TABLE_NAME: WebScraperSchemas 21 | SCRIPTS_TABLE_NAME: WebScraperScripts 22 | Architectures: 23 | - arm64 24 | Policies: 25 | - AWSLambdaBasicExecutionRole 26 | - DynamoDBCrudPolicy: 27 | TableName: WebScraperSchemas 28 | - DynamoDBCrudPolicy: 29 | TableName: WebScraperScripts -------------------------------------------------------------------------------- /lambda/tools/WebScraperMemory/tests/get-extraction-script-test.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_tool_use_id", 3 | "input": { 4 | "url": "https://www.weather.gov", 5 | "info_type": "weather_forecast" 6 | }, 7 | "name": "get_extraction_script", 8 | "type": "tool_use" 9 | } -------------------------------------------------------------------------------- /lambda/tools/WebScraperMemory/tests/save-extraction-script-test.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_tool_use_id", 3 | "input": { 4 | "url": "https://www.weather.gov", 5 | "info_type": "weather_forecast", 6 | "script": { 7 | "url": "https://www.weather.gov", 8 | "actions": [ 9 | { 10 | "type": "type", 11 | "selector": "#inputstring", 12 | "text": "New York, NY" 13 | }, 14 | { 15 | "type": "click", 16 | "selector": "#btnSearch" 17 | }, 18 | { 19 | "type": "wait", 20 | "timeMs": 2000 21 | }, 22 | { 23 | "type": "waitForSelector", 24 | "selector": "#detailed-forecast" 25 | } 26 | ], 27 | "extractSelectors": { 28 | "containers": ["#detailed-forecast", ".forecast-label", ".forecast-text", ".temp"], 29 | "links": [".forecast-icon a"], 30 | "images": [".forecast-icon img"] 31 | }, 32 | "fullPageScreenshot": true 33 | } 34 | }, 35 | "name": "save_extraction_script", 36 | "type": "tool_use" 37 | } -------------------------------------------------------------------------------- /lambda/tools/WebScraperMemory/tests/save-site-schema-test.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_tool_use_id", 3 | "input": { 4 | "url": "https://example.com", 5 | "info_types": ["product_ingredients", "product_reviews", "product_details"], 6 | "site_metadata": { 7 | "site_name": "Example Store", 8 | "site_type": "e-commerce", 9 | "product_page_pattern": "/product/.*" 10 | } 11 | }, 12 | "name": "save_site_schema", 13 | "type": "tool_use" 14 | } -------------------------------------------------------------------------------- /lambda/tools/WebScraperMemory/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_tool_use_id", 3 | "input": { 4 | "url": "https://example.com" 5 | }, 6 | "name": "get_site_schema", 7 | "type": "tool_use" 8 | } -------------------------------------------------------------------------------- /lambda/tools/books-recommender/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nyt-books", 3 | "version": "1.0.0", 4 | "description": "Tools for NYT books AI Agent", 5 | "main": "dist/index.js", 6 | "scripts": { 7 | "build": "tsc", 8 | "watch": "tsc -w", 9 | "test": "tsx src/local-test.ts", 10 | "prebuild": "rm -rf dist/", 11 | "prestart": "npm run build" 12 | }, 13 | "keywords": [], 14 | "author": "", 15 | "license": "ISC", 16 | "dependencies": { 17 | "@types/aws-lambda": "^8.10.147", 18 | "@aws-lambda-powertools/logger": "^2.12.0", 19 | "@aws-lambda-powertools/parameters": "^2.12.0", 20 | "@aws-lambda-powertools/tracer": "2.13.1", 21 | "@aws-sdk/client-secrets-manager": "^3.741.0", 22 | "node-fetch": "2.6.7", 23 | "@types/node-fetch": "^2.6.4" 24 | }, 25 | "devDependencies": { 26 | "@types/jest": "^29.x", 27 | "@types/node": "22.10.6", 28 | "jest": "^29.x", 29 | "ts-jest": "^29.x", 30 | "typescript": "5.7.3", 31 | "tsx": "4.19.2", 32 | "dotenv": "^16.x" 33 | } 34 | } -------------------------------------------------------------------------------- /lambda/tools/books-recommender/src/local-test.ts: -------------------------------------------------------------------------------- 1 | const { handler } = require('./index'); 2 | import { Context } from 'aws-lambda'; 3 | 4 | async function runTest() { 5 | try { 6 | console.log('Starting integration test...'); 7 | 8 | // Create a test event 9 | console.log('Creating test event...'); 10 | const testEvent = { 11 | "name": "get_nyt_books", 12 | "id": "unique_request_id", 13 | "input": { 14 | "genre": "hardcover-fiction" 15 | } 16 | }; 17 | 18 | // Create a mock context object 19 | const mockContext: Context = { 20 | callbackWaitsForEmptyEventLoop: true, 21 | functionName: 'BooksAPILambda', 22 | functionVersion: '1', 23 | invokedFunctionArn: 'arn:aws:lambda:local:000000000000:function:BooksAPILambda', 24 | memoryLimitInMB: '128', 25 | awsRequestId: 'local-test', 26 | logGroupName: '/aws/lambda/BooksAPILambda', 27 | logStreamName: 'local-test', 28 | getRemainingTimeInMillis: () => 30000, 29 | done: () => {}, 30 | fail: () => {}, 31 | succeed: () => {}, 32 | }; 33 | const mockCallback = () => null; // Simple null callback 34 | 35 | // Then process it with our handler 36 | console.log('Testing handler...'); 37 | const result = await handler(testEvent, mockContext, ); 38 | 39 | // Print results 40 | console.log('\nTest Results:'); 41 | console.log('------------------------'); 42 | console.log('result:', result); 43 | console.log('------------------------'); 44 | // if (result.statusCode === 200) { 45 | // console.log('\nExtracted Text:'); 46 | // console.log('------------------------'); 47 | // console.log(result.body.text); 48 | // console.log('------------------------'); 49 | // } else { 50 | // console.log('Error:', result.body); 51 | // } 52 | 53 | } catch (error) { 54 | console.error('Test failed:', error); 55 | } 56 | } 57 | 58 | runTest(); 59 | -------------------------------------------------------------------------------- /lambda/tools/books-recommender/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | 3 | AWSTemplateFormatVersion: '2010-09-09' 4 | Transform: AWS::Serverless-2016-10-31 5 | 6 | Resources: 7 | BooksToolLambda: 8 | Type: AWS::Serverless::Function 9 | Properties: 10 | CodeUri: . 11 | Handler: dist/index.handler 12 | Description: NYT Books Tool Lambda Function 13 | Runtime: nodejs18.x 14 | Timeout: 90 15 | MemorySize: 128 16 | Environment: 17 | Variables: 18 | POWERTOOLS_SERVICE_NAME: ai-agents 19 | Architectures: 20 | - arm64 21 | Policies: 22 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/tools/books-recommender/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "get_nyt_books", 3 | "id": "unique_request_id", 4 | "input": { 5 | "genre": "hardcover-fiction" 6 | } 7 | } -------------------------------------------------------------------------------- /lambda/tools/books-recommender/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2022", 4 | "strict": true, 5 | "preserveConstEnums": true, 6 | "sourceMap": false, 7 | "module": "commonjs", 8 | "moduleResolution": "node", 9 | "esModuleInterop": true, 10 | "skipLibCheck": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "isolatedModules": true, 13 | "outDir": "./dist", 14 | "rootDir": "./src" 15 | }, 16 | "include": [ 17 | "src/**/*" 18 | ], 19 | "exclude": [ 20 | "node_modules", 21 | "**/*.test.ts" 22 | ] 23 | } -------------------------------------------------------------------------------- /lambda/tools/code-interpreter/README.md: -------------------------------------------------------------------------------- 1 | # ![Python Logo](https://cdn.simpleicons.org/python?size=48) Python Example: Code Interpreter Tools 2 | 3 | This directory contains the implementation of the tools for Code Interpreter AI Agent in **Python**, based on [E2B service](https://e2b.dev/). 4 | 5 | ## Folder structure 6 | 7 | ```txt 8 | code-interpreter/ 9 | ├── index.py 10 | ├── requirements.in 11 | ├── requirements.txt 12 | └── README.md 13 | ``` 14 | 15 | ## Tool list 16 | 17 | The tools are: 18 | 19 | * `execute_code`: Execute code and return the result. 20 | 21 | ## Input and output 22 | 23 | The Lambda function for the tools receive the input as a JSON object, and return the output as a JSON object. 24 | 25 | ```python 26 | def lambda_handler(event, context): 27 | # Get the tool name from the input event 28 | tool_use = event 29 | tool_name = tool_use.get('name') 30 | tool_input = tool_use.get('input') 31 | 32 | try: 33 | match tool_name: 34 | case "code_interpreter": 35 | code = tool_input.get('code') 36 | result = code_interpret(code) 37 | ... 38 | case _: 39 | result = f"Unknown tool: {tool_name}" 40 | ``` 41 | 42 | The tools return the output as a JSON object, with the result in the `content` field as a string. 43 | 44 | ```python 45 | ... 46 | logger.info("Code execution finished", extra={"result": result}) 47 | # Return the execution results 48 | return { 49 | "type": "tool_result", 50 | "name": tool_name, 51 | "tool_use_id": tool_use["id"], 52 | "content": result 53 | } 54 | 55 | except Exception as e: 56 | logger.exception("Error executing code") 57 | return { 58 | "type": "tool_result", 59 | "name": tool_name, 60 | "tool_use_id": tool_use["id"], 61 | "content": str(e) 62 | } 63 | ``` 64 | 65 | ## API Key 66 | 67 | Tools often need to make requests to external APIs, such as Google Maps API. This requires an API key. Although it is possible to use environment variables to store the API key, it is recommended to use a Secrets Manager to store the API key. The secrets are stored from the main CDK stack that reads the local various API keys from an .env file. 68 | 69 | The following code snippet shows how to initialize the API key. 70 | 71 | ```python 72 | # Global API key 73 | E2B_API_KEY = json.loads(parameters.get_secret("/ai-agent/E2B_API_KEY"))["E2B_API_KEY"] 74 | ``` 75 | -------------------------------------------------------------------------------- /lambda/tools/code-interpreter/requirements.in: -------------------------------------------------------------------------------- 1 | e2b_code_interpreter==1.0.0 # Needed for dynamic code execution 2 | aws-lambda-powertools>=2.30.2 # AWS Lambda Powertools for Python 3 | -------------------------------------------------------------------------------- /lambda/tools/code-interpreter/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile lambda/code-interpreter/requirements.in --output-file lambda/code-interpreter/requirements.txt 3 | anyio==4.7.0 4 | # via httpx 5 | attrs==24.3.0 6 | # via 7 | # e2b 8 | # e2b-code-interpreter 9 | aws-lambda-powertools==3.3.0 10 | # via -r lambda/code-interpreter/requirements.in 11 | certifi==2024.12.14 12 | # via 13 | # httpcore 14 | # httpx 15 | e2b==1.0.5 16 | # via e2b-code-interpreter 17 | e2b-code-interpreter==1.0.0 18 | # via -r lambda/code-interpreter/requirements.in 19 | h11==0.14.0 20 | # via httpcore 21 | httpcore==1.0.7 22 | # via 23 | # e2b 24 | # httpx 25 | httpx==0.27.2 26 | # via 27 | # e2b 28 | # e2b-code-interpreter 29 | idna==3.10 30 | # via 31 | # anyio 32 | # httpx 33 | jmespath==1.0.1 34 | # via aws-lambda-powertools 35 | packaging==24.2 36 | # via e2b 37 | protobuf==5.29.1 38 | # via e2b 39 | python-dateutil==2.9.0.post0 40 | # via e2b 41 | six==1.17.0 42 | # via python-dateutil 43 | sniffio==1.3.1 44 | # via 45 | # anyio 46 | # httpx 47 | typing-extensions==4.12.2 48 | # via 49 | # anyio 50 | # aws-lambda-powertools 51 | # e2b 52 | -------------------------------------------------------------------------------- /lambda/tools/db-interface/requirements.in: -------------------------------------------------------------------------------- 1 | pandas -------------------------------------------------------------------------------- /lambda/tools/db-interface/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile lambda/db-interface/requirements.in --output-file lambda/db-interface/requirements.txt 3 | numpy==2.2.0 4 | # via pandas 5 | pandas==2.2.3 6 | # via -r lambda/db-interface/requirements.in 7 | python-dateutil==2.9.0.post0 8 | # via pandas 9 | pytz==2024.2 10 | # via pandas 11 | six==1.17.0 12 | # via python-dateutil 13 | tzdata==2024.2 14 | # via pandas 15 | -------------------------------------------------------------------------------- /lambda/tools/google-maps/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "google-maps", 3 | "version": "1.0.0", 4 | "description": "Tools for google maps AI Agent", 5 | "main": "dist/index.js", 6 | "scripts": { 7 | "build": "tsc", 8 | "watch": "tsc -w", 9 | "test": "tsx src/local-test.ts", 10 | "prebuild": "rm -rf dist/", 11 | "prestart": "npm run build" 12 | }, 13 | "keywords": [], 14 | "author": "", 15 | "license": "ISC", 16 | "dependencies": { 17 | "@types/aws-lambda": "^8.10.147", 18 | "@aws-lambda-powertools/logger": "^2.12.0", 19 | "@aws-lambda-powertools/parameters": "^2.12.0", 20 | "@aws-lambda-powertools/tracer": "2.13.1", 21 | "@aws-sdk/client-secrets-manager": "^3.741.0", 22 | "node-fetch": "2.6.7", 23 | "@types/node-fetch": "^2.6.4" 24 | }, 25 | "devDependencies": { 26 | "@types/jest": "^29.x", 27 | "@types/node": "22.13.1", 28 | "jest": "^29.x", 29 | "ts-jest": "^29.x", 30 | "typescript": "5.7.3", 31 | "tsx": "4.19.2", 32 | "dotenv": "^16.x" 33 | } 34 | } -------------------------------------------------------------------------------- /lambda/tools/google-maps/src/local-test.ts: -------------------------------------------------------------------------------- 1 | import test from 'node:test'; 2 | import { handler } from './index'; 3 | import { Context } from 'aws-lambda'; 4 | 5 | async function runTest() { 6 | try { 7 | console.log('Starting integration test...'); 8 | 9 | // Create a test event 10 | console.log('Creating test event...'); 11 | const testEvent = { 12 | "id": "toolu_01VP3mpAtB5beEzV7HuAvYvU", 13 | "input": { 14 | "origin": "Narita International Airport, Narita, Chiba, Japan", 15 | "destination": "Shibuya Station, Tokyo, Japan", 16 | "travel_mode": "TRANSIT" 17 | }, 18 | "name": "maps_directions", 19 | "type": "tool_use" 20 | }; 21 | 22 | // Create a mock context object 23 | const mockContext: Context = { 24 | callbackWaitsForEmptyEventLoop: true, 25 | functionName: 'GoogleMapsLambda', 26 | functionVersion: '1', 27 | invokedFunctionArn: 'arn:aws:lambda:local:000000000000:function:GoogleMapsLambda', 28 | memoryLimitInMB: '128', 29 | awsRequestId: 'local-test', 30 | logGroupName: '/aws/lambda/GoogleMapsLambda', 31 | logStreamName: 'local-test', 32 | getRemainingTimeInMillis: () => 30000, 33 | done: () => {}, 34 | fail: () => {}, 35 | succeed: () => {}, 36 | }; 37 | const mockCallback = () => null; // Simple null callback 38 | 39 | // Then process it with our handler 40 | console.log('Testing handler...'); 41 | const result = await handler(testEvent, mockContext, mockCallback); 42 | 43 | // Print results 44 | console.log('\nTest Results:'); 45 | console.log('------------------------'); 46 | console.log('result:', result); 47 | console.log('------------------------'); 48 | // if (result.statusCode === 200) { 49 | // console.log('\nExtracted Text:'); 50 | // console.log('------------------------'); 51 | // console.log(result.body.text); 52 | // console.log('------------------------'); 53 | // } else { 54 | // console.log('Error:', result.body); 55 | // } 56 | 57 | } catch (error) { 58 | console.error('Test failed:', error); 59 | } 60 | } 61 | 62 | runTest(); 63 | -------------------------------------------------------------------------------- /lambda/tools/google-maps/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | GoogleMapsLambda: 7 | Type: AWS::Serverless::Function 8 | Properties: 9 | CodeUri: ./ 10 | Handler: dist/index.handler 11 | Description: GoogleMaps Tool Lambda Function 12 | Runtime: nodejs18.x 13 | Timeout: 90 14 | MemorySize: 128 15 | Environment: 16 | Variables: 17 | POWERTOOLS_SERVICE_NAME: ai-agents 18 | Architectures: 19 | - arm64 20 | Policies: 21 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/tools/google-maps/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "toolu_01VP3mpAtB5beEzV7HuAvYvU", 3 | "input": { 4 | "origin": "Narita International Airport, Narita, Chiba, Japan", 5 | "destination": "Shibuya Station, Tokyo, Japan", 6 | "travel_mode": "TRANSIT" 7 | }, 8 | "name": "maps_directions", 9 | "type": "tool_use" 10 | } -------------------------------------------------------------------------------- /lambda/tools/google-maps/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2022", 4 | "strict": true, 5 | "preserveConstEnums": true, 6 | "sourceMap": false, 7 | "module": "commonjs", 8 | "moduleResolution": "node", 9 | "esModuleInterop": true, 10 | "skipLibCheck": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "isolatedModules": true, 13 | "outDir": "./dist", 14 | "rootDir": "./src" 15 | }, 16 | "include": [ 17 | "src/**/*" 18 | ], 19 | "exclude": [ 20 | "node_modules", 21 | "**/*.test.ts" 22 | ] 23 | } -------------------------------------------------------------------------------- /lambda/tools/graphql-interface/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/lambda/tools/graphql-interface/__init__.py -------------------------------------------------------------------------------- /lambda/tools/graphql-interface/requirements.in: -------------------------------------------------------------------------------- 1 | gql>=3.5.0 2 | aiohttp>=3.9.0 3 | graphql-core>=3.2.3 4 | aws-lambda-powertools>=2.30.2 # AWS Lambda Powertools for Python 5 | aws_xray_sdk # AWS X-Ray SDK for Python for lambda tracing -------------------------------------------------------------------------------- /lambda/tools/graphql-interface/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in --output-file requirements.txt 3 | aiohappyeyeballs==2.4.4 4 | # via aiohttp 5 | aiohttp==3.11.11 6 | # via -r requirements.in 7 | aiosignal==1.3.2 8 | # via aiohttp 9 | anyio==4.8.0 10 | # via gql 11 | attrs==25.1.0 12 | # via aiohttp 13 | aws-lambda-powertools==3.5.0 14 | # via -r requirements.in 15 | aws-xray-sdk==2.14.0 16 | # via -r requirements.in 17 | backoff==2.2.1 18 | # via gql 19 | botocore==1.36.9 20 | # via aws-xray-sdk 21 | frozenlist==1.5.0 22 | # via 23 | # aiohttp 24 | # aiosignal 25 | gql==3.5.0 26 | # via -r requirements.in 27 | graphql-core==3.2.6 28 | # via 29 | # -r requirements.in 30 | # gql 31 | idna==3.10 32 | # via 33 | # anyio 34 | # yarl 35 | jmespath==1.0.1 36 | # via 37 | # aws-lambda-powertools 38 | # botocore 39 | multidict==6.1.0 40 | # via 41 | # aiohttp 42 | # yarl 43 | propcache==0.2.1 44 | # via 45 | # aiohttp 46 | # yarl 47 | python-dateutil==2.9.0.post0 48 | # via botocore 49 | six==1.17.0 50 | # via python-dateutil 51 | sniffio==1.3.1 52 | # via anyio 53 | typing-extensions==4.12.2 54 | # via 55 | # anyio 56 | # aws-lambda-powertools 57 | urllib3==2.3.0 58 | # via botocore 59 | wrapt==1.17.2 60 | # via aws-xray-sdk 61 | yarl==1.18.3 62 | # via 63 | # aiohttp 64 | # gql 65 | -------------------------------------------------------------------------------- /lambda/tools/graphql-interface/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | # GraphQL Lambda Function Tool 7 | GraphQLToolLambda: 8 | Type: AWS::Serverless::Function 9 | Properties: 10 | CodeUri: . 11 | Handler: index.lambda_handler 12 | Runtime: python3.12 13 | Timeout: 90 14 | MemorySize: 128 15 | Environment: 16 | Variables: 17 | POWERTOOLS_SERVICE_NAME: graphql-tool 18 | Architectures: 19 | - arm64 20 | Policies: 21 | - SecretsManagerRead 22 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/tools/graphql-interface/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/lambda/tools/graphql-interface/tests/__init__.py -------------------------------------------------------------------------------- /lambda/tools/graphql-interface/tests/requirements-test.txt: -------------------------------------------------------------------------------- 1 | pytest>=7.4.0 2 | -------------------------------------------------------------------------------- /lambda/tools/graphql-interface/tests/test-query-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_tool_use_id", 3 | "input": { 4 | "graphql_query": "query test { organization { name } }" 5 | }, 6 | "name": "execute_graphql_query", 7 | "type": "tool_use" 8 | } -------------------------------------------------------------------------------- /lambda/tools/graphql-interface/tests/test_tool.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from index import lambda_handler 4 | 5 | @pytest.fixture 6 | def generate_prompt_input_event(): 7 | return { 8 | "id": "uniquetooluseid", 9 | "input": { 10 | "description": "Can I ship wine from California to Washington?", 11 | }, 12 | "name": "generate_query_prompt", 13 | "type": "tool_use" 14 | } 15 | 16 | @pytest.fixture 17 | def execute_graphql_query_input_event(): 18 | return { 19 | "id": "uniquetooluseid", 20 | "input": { 21 | "graphql_query": "query test { organization { name } }" 22 | }, 23 | "name": "execute_graphql_query", 24 | "type": "tool_use" 25 | } 26 | 27 | def test_lambda_handler(generate_prompt_input_event, execute_graphql_query_input_event): 28 | # Test the handler 29 | response = lambda_handler(generate_prompt_input_event, None) 30 | 31 | # Assert response structure 32 | assert response["type"] == "tool_result" 33 | assert response["tool_use_id"] == "uniquetooluseid" 34 | assert "content" in response 35 | assert "Given" in response["content"] 36 | 37 | 38 | response = lambda_handler(execute_graphql_query_input_event, None) 39 | print(response) 40 | assert response["type"] == "tool_result" 41 | assert response["tool_use_id"] == "uniquetooluseid" 42 | assert "content" in response 43 | assert "organization" in response["content"] 44 | -------------------------------------------------------------------------------- /lambda/tools/image-analysis/requirements.in: -------------------------------------------------------------------------------- 1 | google-genai==0.6.0 2 | aws-lambda-powertools>=2.30.2 # AWS Lambda Powertools for Python 3 | aws_xray_sdk # AWS X-Ray SDK for Python for lambda tracing -------------------------------------------------------------------------------- /lambda/tools/image-analysis/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in --output-file requirements.txt 3 | annotated-types==0.7.0 4 | # via pydantic 5 | aws-lambda-powertools==3.5.0 6 | # via -r requirements.in 7 | aws-xray-sdk==2.14.0 8 | # via -r requirements.in 9 | botocore==1.36.17 10 | # via aws-xray-sdk 11 | cachetools==5.5.1 12 | # via google-auth 13 | certifi==2025.1.31 14 | # via requests 15 | charset-normalizer==3.4.1 16 | # via requests 17 | google-auth==2.38.0 18 | # via google-genai 19 | google-genai==0.6.0 20 | # via -r requirements.in 21 | idna==3.10 22 | # via requests 23 | jmespath==1.0.1 24 | # via 25 | # aws-lambda-powertools 26 | # botocore 27 | pillow==10.2.0 28 | # via google-genai 29 | pyasn1==0.6.1 30 | # via 31 | # pyasn1-modules 32 | # rsa 33 | pyasn1-modules==0.4.1 34 | # via google-auth 35 | pydantic==2.10.6 36 | # via google-genai 37 | pydantic-core==2.27.2 38 | # via pydantic 39 | python-dateutil==2.9.0.post0 40 | # via botocore 41 | requests==2.32.3 42 | # via google-genai 43 | rsa==4.9 44 | # via google-auth 45 | six==1.17.0 46 | # via python-dateutil 47 | typing-extensions==4.12.2 48 | # via 49 | # aws-lambda-powertools 50 | # pydantic 51 | # pydantic-core 52 | urllib3==2.3.0 53 | # via 54 | # botocore 55 | # requests 56 | websockets==14.2 57 | # via google-genai 58 | wrapt==1.17.2 59 | # via aws-xray-sdk 60 | -------------------------------------------------------------------------------- /lambda/tools/image-analysis/template.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Transform: AWS::Serverless-2016-10-31 3 | Description: SAM template for Image Analysis Lambda function using Gemini API 4 | 5 | Globals: 6 | Function: 7 | Timeout: 60 8 | MemorySize: 512 9 | Runtime: python3.11 10 | Architectures: 11 | - x86_64 12 | Environment: 13 | Variables: 14 | LOG_LEVEL: INFO 15 | POWERTOOLS_SERVICE_NAME: image-analysis-service 16 | 17 | Resources: 18 | ImageAnalysisFunction: 19 | Type: AWS::Serverless::Function 20 | Properties: 21 | CodeUri: ./ 22 | Handler: index.lambda_handler 23 | Description: Lambda function for analyzing images using Gemini API 24 | Policies: 25 | - S3ReadPolicy: 26 | BucketName: '*' # Allow reading from any S3 bucket. Adjust as needed for your security requirements 27 | - Statement: 28 | - Effect: Allow 29 | Action: 30 | - secretsmanager:GetSecretValue 31 | Resource: !Sub 'arn:aws:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:/ai-agent/api-keys' 32 | Tags: 33 | Purpose: ImageAnalysis 34 | Service: AI-Agent 35 | 36 | Outputs: 37 | ImageAnalysisFunction: 38 | Description: Image Analysis Lambda Function ARN 39 | Value: !GetAtt ImageAnalysisFunction.Arn 40 | ImageAnalysisFunctionIamRole: 41 | Description: Implicit IAM Role created for Image Analysis function 42 | Value: !GetAtt ImageAnalysisFunctionRole.Arn -------------------------------------------------------------------------------- /lambda/tools/image-analysis/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "analyze_images", 3 | "id": "analyze_images_unique_id", 4 | "input": { 5 | "image_locations": [ 6 | { 7 | "bucket": "ai-agent-test-bucket-672915487120-us-west-2", 8 | "key": "uploads/tehini_image.jpg" 9 | } 10 | ], 11 | "query": "What products are shown in these images and what are their ingredients?" 12 | }, 13 | "type": "tool_use" 14 | } -------------------------------------------------------------------------------- /lambda/tools/local-agent/CLAUDE.md: -------------------------------------------------------------------------------- 1 | # CLAUDE.md - Local SQS Agent Guidelines 2 | 3 | ## Build & Test Commands 4 | ```bash 5 | # Build the project 6 | cargo build 7 | 8 | # Run the project 9 | cargo run 10 | 11 | # Run all tests 12 | cargo test 13 | 14 | # Run a specific test 15 | cargo test test_process_message 16 | 17 | # Run integration tests (requires AWS credentials) 18 | cargo test -- --ignored 19 | 20 | # Check code coverage 21 | cargo tarpaulin --out Html 22 | 23 | # Lint the code 24 | cargo clippy 25 | ``` 26 | 27 | ## Code Style Guidelines 28 | - **Imports**: Group standard library, external, and internal imports separately 29 | - **Error Handling**: Use `anyhow` for application errors with context via `.context()` or `anyhow!()` 30 | - **Naming**: Use snake_case for variables/functions, CamelCase for types/structs 31 | - **Types**: Always provide explicit types for struct fields and function returns 32 | - **Logging**: Use log crate levels (info, error, etc.) appropriately based on severity 33 | - **Comments**: Document public functions/structs with doc comments (///) 34 | - **Testing**: Write unit tests for all functions, mock external services 35 | - **Formatting**: Use rustfmt for consistent code style (max 100 chars per line) 36 | - **Config**: External configuration should be loaded from JSON files via config crate -------------------------------------------------------------------------------- /lambda/tools/local-agent/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "local_sfn_agent" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | aws-config = "1.5.17" 8 | aws-sdk-sqs = "1.60.0" 9 | tokio = { version = "1.28.0", features = ["full"] } 10 | serde = { version = "1.0.163", features = ["derive"] } 11 | serde_json = "1.0.96" 12 | config = "0.15.8" 13 | log = "0.4.17" 14 | env_logger = "0.11.6" 15 | futures = "0.3.28" 16 | anyhow = "1.0.71" 17 | aws-sdk-sfn = "1.64.0" 18 | tempfile = "3.8.0" 19 | 20 | [dev-dependencies] 21 | mockall = "0.13.1" 22 | tempfile = "3.8.0" 23 | aws-smithy-types = "1.2.13" 24 | test-log = "0.2.12" 25 | -------------------------------------------------------------------------------- /lambda/tools/local-agent/daemon_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "activity_arn": "", 3 | "app_path": "uv run script_executor.py", 4 | "poll_interval_ms": 5000, 5 | "worker_name": "local-agent-worker", 6 | "profile_name": "" 7 | } -------------------------------------------------------------------------------- /lambda/tools/local-agent/examples/New_Document_Button.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guyernest/step-functions-agent/a4d4b3d145934e995afa680581a4e246940dcb85/lambda/tools/local-agent/examples/New_Document_Button.png -------------------------------------------------------------------------------- /lambda/tools/local-agent/examples/notepad_windows_example.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Notepad Windows Automation Example", 3 | "description": "Creates and edits a document in Notepad on Windows", 4 | "abort_on_error": true, 5 | "actions": [ 6 | { 7 | "type": "launch", 8 | "app": "notepad", 9 | "wait": 1.5, 10 | "description": "Launch Notepad application" 11 | }, 12 | { 13 | "type": "wait", 14 | "seconds": 1.0, 15 | "description": "Wait for Notepad to open" 16 | }, 17 | { 18 | "type": "type", 19 | "text": "Hello, this is a document created with PyAutoGUI on Windows!\r\n\r\n", 20 | "interval": 0.05, 21 | "description": "Type the first line" 22 | }, 23 | { 24 | "type": "type", 25 | "text": "This script demonstrates how to automate Notepad on Windows.\r\n", 26 | "interval": 0.05, 27 | "description": "Type the second line" 28 | }, 29 | { 30 | "type": "type", 31 | "text": "Features demonstrated:\r\n", 32 | "interval": 0.05, 33 | "description": "Type the features header" 34 | }, 35 | { 36 | "type": "type", 37 | "text": "- Launching applications\r\n- Typing text\r\n- Using keyboard shortcuts\r\n- Saving files\r\n", 38 | "interval": 0.05, 39 | "description": "Type the features list" 40 | }, 41 | { 42 | "type": "hotkey", 43 | "keys": ["ctrl", "a"], 44 | "description": "Select all text" 45 | }, 46 | { 47 | "type": "wait", 48 | "seconds": 0.5, 49 | "description": "Wait briefly" 50 | }, 51 | { 52 | "type": "press", 53 | "key": "escape", 54 | "description": "Press escape to deselect text" 55 | }, 56 | { 57 | "type": "wait", 58 | "seconds": 0.5, 59 | "description": "Wait briefly" 60 | }, 61 | { 62 | "type": "hotkey", 63 | "keys": ["ctrl", "s"], 64 | "description": "Save the document" 65 | }, 66 | { 67 | "type": "wait", 68 | "seconds": 1.0, 69 | "description": "Wait for save dialog" 70 | }, 71 | { 72 | "type": "type", 73 | "text": "PyAutoGUI_Windows_Example.txt", 74 | "interval": 0.05, 75 | "description": "Type filename" 76 | }, 77 | { 78 | "type": "press", 79 | "key": "enter", 80 | "description": "Press enter to save" 81 | }, 82 | { 83 | "type": "wait", 84 | "seconds": 1.0, 85 | "description": "Wait for save to complete" 86 | }, 87 | { 88 | "type": "hotkey", 89 | "keys": ["alt", "f4"], 90 | "description": "Close Notepad" 91 | } 92 | ] 93 | } -------------------------------------------------------------------------------- /lambda/tools/local-agent/examples/textedit_mac_example.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "TextEdit Mac Automation Example with Image Detection", 3 | "description": "Creates and edits a document in TextEdit on macOS, using image detection to find buttons", 4 | "comment": "This example uses the standard PyAutoGUI pattern of clicking directly on images using the 'click' action with an 'image' parameter. This is equivalent to pyautogui.click('image.png') in Python code.", 5 | "abort_on_error": true, 6 | "actions": [ 7 | { 8 | "type": "launch", 9 | "app": "TextEdit", 10 | "wait": 3.0, 11 | "description": "Launch TextEdit application" 12 | }, 13 | { 14 | "type": "click", 15 | "image": "examples/New_Document_Button.png", 16 | "confidence": 0.9, 17 | "description": "Click on the New Document button" 18 | }, 19 | { 20 | "type": "wait", 21 | "seconds": 1.0, 22 | "description": "Wait for the new document to open" 23 | }, 24 | { 25 | "type": "type", 26 | "text": "Hello, this is a document created with PyAutoGUI!\n\n", 27 | "interval": 0.05, 28 | "description": "Type the first line" 29 | }, 30 | { 31 | "type": "type", 32 | "text": "This script demonstrates how to automate TextEdit on macOS.\n", 33 | "interval": 0.05, 34 | "description": "Type the second line" 35 | }, 36 | { 37 | "type": "type", 38 | "text": "Features demonstrated:\n", 39 | "interval": 0.05, 40 | "description": "Type the features header" 41 | }, 42 | { 43 | "type": "type", 44 | "text": "- Launching applications\n- Image recognition for UI elements\n- Typing text\n- Using keyboard shortcuts\n- Formatting text\n", 45 | "interval": 0.05, 46 | "description": "Type the features list" 47 | }, 48 | { 49 | "type": "hotkey", 50 | "keys": ["command", "a"], 51 | "description": "Select all text" 52 | }, 53 | { 54 | "type": "hotkey", 55 | "keys": ["command", "b"], 56 | "description": "Make text bold" 57 | }, 58 | { 59 | "type": "press", 60 | "key": "escape", 61 | "description": "Press escape to deselect text" 62 | }, 63 | { 64 | "type": "wait", 65 | "seconds": 1.0, 66 | "description": "Wait briefly" 67 | }, 68 | { 69 | "type": "hotkey", 70 | "keys": ["command", "s"], 71 | "description": "Save the document" 72 | }, 73 | { 74 | "type": "wait", 75 | "seconds": 2.0, 76 | "description": "Wait for save dialog" 77 | }, 78 | { 79 | "type": "type", 80 | "text": "PyAutoGUI_Example.txt", 81 | "interval": 0.05, 82 | "description": "Type filename" 83 | }, 84 | { 85 | "type": "press", 86 | "key": "return", 87 | "description": "Press return to save" 88 | }, 89 | { 90 | "type": "wait", 91 | "seconds": 1.0, 92 | "description": "Wait for save to complete" 93 | }, 94 | { 95 | "type": "hotkey", 96 | "keys": ["command", "q"], 97 | "description": "Quit TextEdit" 98 | } 99 | ] 100 | } -------------------------------------------------------------------------------- /lambda/tools/local-agent/requirements.in: -------------------------------------------------------------------------------- 1 | pyautogui 2 | opencv-python -------------------------------------------------------------------------------- /lambda/tools/local-agent/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in --output-file requirements.txt 3 | mouseinfo==0.1.3 4 | # via pyautogui 5 | numpy==1.24.4 6 | # via opencv-python 7 | opencv-python==4.11.0.86 8 | # via -r requirements.in 9 | pillow==10.4.0 10 | # via pyscreeze 11 | pyautogui==0.9.54 12 | # via -r requirements.in 13 | pygetwindow==0.0.9 14 | # via pyautogui 15 | pymsgbox==1.0.9 16 | # via pyautogui 17 | pyobjc-core==11.0 18 | # via 19 | # pyautogui 20 | # pyobjc-framework-cocoa 21 | # pyobjc-framework-quartz 22 | pyobjc-framework-cocoa==10.3.2 23 | # via pyobjc-framework-quartz 24 | pyobjc-framework-quartz==10.3.2 25 | # via pyautogui 26 | pyperclip==1.9.0 27 | # via mouseinfo 28 | pyrect==0.2.0 29 | # via pygetwindow 30 | pyscreeze==1.0.1 31 | # via pyautogui 32 | pytweening==1.2.0 33 | # via pyautogui 34 | rubicon-objc==0.4.9 35 | # via mouseinfo 36 | -------------------------------------------------------------------------------- /lambda/tools/local-agent/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | 3 | fn main() -> Result<()> { 4 | // Call the main function from the library 5 | // We don't need #[tokio::main] here because the library function already has it 6 | local_sfn_agent::main() 7 | } 8 | -------------------------------------------------------------------------------- /lambda/tools/rust-clustering/.gitignore: -------------------------------------------------------------------------------- 1 | target -------------------------------------------------------------------------------- /lambda/tools/rust-clustering/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust-clustering" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | serde_json = "1" 8 | 9 | lambda_runtime = "0.13.0" 10 | tokio = { version = "1", features = ["macros"] } 11 | serde = "1.0.217" 12 | anyhow = "1.0" 13 | 14 | hdbscan = "0.9.0" 15 | aws-config = "1.5.16" 16 | aws-sdk-s3 = "1.76.0" 17 | -------------------------------------------------------------------------------- /lambda/tools/rust-clustering/src/main.rs: -------------------------------------------------------------------------------- 1 | use lambda_runtime::{run, service_fn, tracing, Error}; 2 | 3 | mod event_handler; 4 | use event_handler::function_handler; 5 | 6 | 7 | #[tokio::main] 8 | async fn main() -> Result<(), Error> { 9 | tracing::init_default_subscriber(); 10 | 11 | run(service_fn(function_handler)).await 12 | } 13 | -------------------------------------------------------------------------------- /lambda/tools/rust-clustering/template.yaml: -------------------------------------------------------------------------------- 1 | # template.yaml 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Transform: AWS::Serverless-2016-10-31 4 | 5 | Resources: 6 | # Clusting using HDBScan in Rust 7 | ClusteringRust: 8 | Type: AWS::Serverless::Function 9 | Metadata: 10 | BuildMethod: rust-cargolambda 11 | Properties: 12 | CodeUri: . 13 | Handler: bootstrap 14 | Runtime: provided.al2 15 | Timeout: 90 16 | MemorySize: 128 17 | Environment: 18 | Variables: 19 | POWERTOOLS_SERVICE_NAME: ClusteringRust 20 | Architectures: 21 | - arm64 22 | Policies: 23 | - SecretsManagerRead 24 | - AWSLambdaBasicExecutionRole -------------------------------------------------------------------------------- /lambda/tools/rust-clustering/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "calculate_hdbscan_clusters_unique_id", 3 | "name": "calculate_hdbscan_clusters", 4 | "input": { 5 | "bucket": "yfinance-data-672915487120-us-west-2", 6 | "key": "stock_vectors/stock_data_20250107_214201.csv" 7 | } 8 | } -------------------------------------------------------------------------------- /lambda/tools/stock-analyzer/events/test_event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "toolu_01GzN3ATS4f3UZAgeV57UCC9", 3 | "input": { 4 | "bucket": "yfinance-data-672915487120-us-west-2", 5 | "key": "stock_vectors/stock_data_20250108_032729.csv" 6 | }, 7 | "name": "calculate_volatility", 8 | "type": "tool_use" 9 | } -------------------------------------------------------------------------------- /lambda/tools/stock-analyzer/src/test/java/tools/TestContext.java: -------------------------------------------------------------------------------- 1 | package tools; 2 | 3 | import com.amazonaws.services.lambda.runtime.Context; 4 | import com.amazonaws.services.lambda.runtime.CognitoIdentity; 5 | import com.amazonaws.services.lambda.runtime.ClientContext; 6 | import com.amazonaws.services.lambda.runtime.LambdaLogger; 7 | 8 | public class TestContext implements Context{ 9 | 10 | public TestContext() {} 11 | public String getAwsRequestId(){ 12 | return new String("495b12a8-xmpl-4eca-8168-160484189f99"); 13 | } 14 | public String getLogGroupName(){ 15 | return new String("/aws/lambda/test-tools-function"); 16 | } 17 | public String getLogStreamName(){ 18 | return new String("2020/02/26/[$LATEST]704f8dxmpla04097b9134246b8438f1a"); 19 | } 20 | public String getFunctionName(){ 21 | return new String("java-tools-function"); 22 | } 23 | public String getFunctionVersion(){ 24 | return new String("$LATEST"); 25 | } 26 | public String getInvokedFunctionArn(){ 27 | return new String("arn:aws:lambda:us-east-2:123456789012:function:java-tools-function"); 28 | } 29 | public CognitoIdentity getIdentity(){ 30 | return null; 31 | } 32 | public ClientContext getClientContext(){ 33 | return null; 34 | } 35 | public int getRemainingTimeInMillis(){ 36 | return 300000; 37 | } 38 | public int getMemoryLimitInMB(){ 39 | return 512; 40 | } 41 | public LambdaLogger getLogger(){ 42 | return new TestLogger(); 43 | } 44 | 45 | } -------------------------------------------------------------------------------- /lambda/tools/stock-analyzer/src/test/java/tools/TestLogger.java: -------------------------------------------------------------------------------- 1 | package tools; 2 | import org.slf4j.Logger; 3 | import org.slf4j.LoggerFactory; 4 | import com.amazonaws.services.lambda.runtime.LambdaLogger; 5 | 6 | public class TestLogger implements LambdaLogger { 7 | private static final Logger logger = LoggerFactory.getLogger(TestLogger.class); 8 | public void log(String message){ 9 | logger.info(message); 10 | } 11 | public void log(byte[] message){ 12 | logger.info(new String(message)); 13 | } 14 | } -------------------------------------------------------------------------------- /lambda/tools/stock-analyzer/template.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Transform: AWS::Serverless-2016-10-31 3 | Description: An AWS Lambda function used as a tool in AI agent application on step functions. 4 | Resources: 5 | StockAnalyzerFunction: 6 | Type: AWS::Serverless::Function 7 | Properties: 8 | Handler: tools.StockAnalyzerLambda::handleRequest 9 | Runtime: java17 10 | MemorySize: 512 11 | Timeout: 10 12 | CodeUri: target/stock-analyzer-lambda-1.0-SNAPSHOT.jar -------------------------------------------------------------------------------- /lambda/tools/web-research/Makefile: -------------------------------------------------------------------------------- 1 | build-WebResearchFunction: 2 | GOOS=linux GOARCH=arm64 go build -tags lambda.norpc -o bootstrap main.go 3 | cp ./bootstrap $(ARTIFACTS_DIR)/. -------------------------------------------------------------------------------- /lambda/tools/web-research/events/test_event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "unique_request_id", 3 | "name": "research_company", 4 | "input": { 5 | "company": "Apple", 6 | "topics": ["recent financial performance", "market position"] 7 | }, 8 | "type": "tool_use" 9 | } -------------------------------------------------------------------------------- /lambda/tools/web-research/go.mod: -------------------------------------------------------------------------------- 1 | module web-search 2 | 3 | go 1.23.4 4 | 5 | require ( 6 | github.com/aws/aws-lambda-go v1.47.0 7 | github.com/aws/aws-sdk-go v1.55.5 8 | github.com/aws/aws-sdk-go-v2/config v1.28.10 9 | github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.10 10 | github.com/sgaunet/perplexity-go v1.1.0 11 | github.com/stretchr/testify v1.9.0 12 | ) 13 | 14 | require ( 15 | github.com/aws/aws-sdk-go-v2 v1.32.8 // indirect 16 | github.com/aws/aws-sdk-go-v2/credentials v1.17.51 // indirect 17 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 // indirect 18 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect 19 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 // indirect 20 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect 21 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect 22 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 // indirect 23 | github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 // indirect 24 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 // indirect 25 | github.com/aws/aws-sdk-go-v2/service/sts v1.33.6 // indirect 26 | github.com/aws/smithy-go v1.22.1 // indirect 27 | github.com/davecgh/go-spew v1.1.1 // indirect 28 | github.com/pmezard/go-difflib v1.0.0 // indirect 29 | github.com/stretchr/objx v0.5.2 // indirect 30 | gopkg.in/yaml.v3 v3.0.1 // indirect 31 | ) 32 | -------------------------------------------------------------------------------- /lambda/tools/web-research/main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestHandler(t *testing.T) { 12 | // Test event for researching a company 13 | testEvent := ToolEvent{ 14 | ID: "research_company_unique_id", 15 | Name: "research_company", 16 | Input: json.RawMessage(`{ 17 | "company": "Apple", 18 | "topics": ["recent financial performance"] 19 | }`), 20 | Type: "tool_use", 21 | } 22 | 23 | // Call handler 24 | response, err := handler(context.Background(), testEvent) 25 | assert.NoError(t, err) 26 | 27 | // Verify response structure 28 | assert.Equal(t, "tool_result", response.Type) 29 | assert.Equal(t, "research_company_unique_id", response.ToolUseID) 30 | assert.NotEmpty(t, response.Content) 31 | 32 | // Parse the content 33 | var result ResearchResult 34 | err = json.Unmarshal([]byte(response.Content), &result) 35 | assert.NoError(t, err) 36 | 37 | // Basic content verification 38 | assert.Equal(t, "Apple", result.Company) 39 | assert.Contains(t, result.Information, "recent financial performance") 40 | assert.NotEmpty(t, result.Information["recent financial performance"]) 41 | assert.Contains(t, result.Information["recent financial performance"], "Apple") 42 | 43 | // Test with unknown tool 44 | unknownToolEvent := ToolEvent{ 45 | ID: "unknown_tool_id", 46 | Name: "unknown_tool", 47 | Input: json.RawMessage(`{ 48 | "company": "Apple" 49 | }`), 50 | Type: "tool_use", 51 | } 52 | 53 | response, err = handler(context.Background(), unknownToolEvent) 54 | assert.NoError(t, err) 55 | assert.Equal(t, "tool_result", response.Type) 56 | assert.Equal(t, "unknown_tool_id", response.ToolUseID) 57 | assert.Contains(t, response.Content, "Unknown tool") 58 | } 59 | 60 | func TestToolEventParsing(t *testing.T) { 61 | jsonData := `{ 62 | "id": "test-id", 63 | "name": "research_company", 64 | "input": { 65 | "company": "Apple", 66 | "topics": ["performance", "news"] 67 | }, 68 | "type": "tool_use" 69 | }` 70 | 71 | var event ToolEvent 72 | err := json.Unmarshal([]byte(jsonData), &event) 73 | assert.NoError(t, err) 74 | 75 | var input ResearchInput 76 | err = json.Unmarshal(event.Input, &input) 77 | assert.NoError(t, err) 78 | assert.Equal(t, "Apple", input.Company) 79 | assert.Equal(t, []string{"performance", "news"}, input.Topics) 80 | } 81 | -------------------------------------------------------------------------------- /lambda/tools/web-research/template.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Transform: AWS::Serverless-2016-10-31 3 | Description: > 4 | go-al2 5 | 6 | Sample SAM Template for go-al2 7 | 8 | # More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst 9 | Globals: 10 | Function: 11 | Timeout: 5 12 | 13 | Resources: 14 | WebResearchFunction: 15 | Type: AWS::Serverless::Function # More info about Function Resource: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#awsserverlessfunction 16 | Properties: 17 | CodeUri: . 18 | Handler: bootstrap 19 | Runtime: provided.al2 20 | Tracing: Active # https://docs.aws.amazon.com/lambda/latest/dg/lambda-x-ray.html 21 | Events: 22 | CatchAll: 23 | Type: Api # More info about API Event Source: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#api 24 | Properties: 25 | Path: /hello 26 | Method: GET 27 | Environment: # More info about Env Vars: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#environment-object 28 | Variables: 29 | PARAM1: VALUE 30 | Metadata: 31 | BuildMethod: makefile 32 | 33 | Outputs: 34 | # ServerlessRestApi is an implicit API created out of Events key under Serverless::Function 35 | # Find out more about other implicit resources you can reference within SAM 36 | # https://github.com/awslabs/serverless-application-model/blob/master/docs/internals/generated_resources.rst#api 37 | WebResearchAPI: 38 | Description: "API Gateway endpoint URL for Prod environment for First Function" 39 | Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/hello/" 40 | WebResearchFunction: 41 | Description: "First Lambda Function ARN" 42 | Value: !GetAtt WebResearchFunction.Arn 43 | WebResearchFunctionIamRole: 44 | Description: "Implicit IAM Role created for Hello World function" 45 | Value: !GetAtt WebResearchFunctionRole.Arn -------------------------------------------------------------------------------- /lambda/tools/web-scraper/CLAUDE.md: -------------------------------------------------------------------------------- 1 | # Web Scraper Project Guidelines 2 | 3 | ## Purpose 4 | 5 | The purpose of this tool is to navigate the web and extract relevant information from various websites. The tool is designed to get instructions from an LLM on how to perform searches and extract data, and then execute those instructions. The tool can be used in an iterative mode, where the LLM can refine its instructions based on the results of previous searches. 6 | 7 | ## Build Commands 8 | 9 | - `npm install` - Install dependencies 10 | - `npm run build` - Build the TypeScript code 11 | - `npm run clean` - Clean the dist directory 12 | - `sam build` - Build using SAM CLI 13 | - `sam local invoke WebScraperFunction --event tests/test-event.json` - Test locally with SAM 14 | 15 | ## Code Style Guidelines 16 | 17 | - Use TypeScript strict mode and strong typing 18 | - Use interfaces for input/output type definitions 19 | - Follow camelCase for variables and functions 20 | - Use async/await pattern for asynchronous operations 21 | - Always include proper error handling with try/catch blocks 22 | - Use structured logging with @aws-lambda-powertools/logger 23 | - Lambda handler should maintain Handler typing pattern 24 | 25 | ## Project Structure 26 | 27 | - Source code in `src/` directory 28 | - Tests in `tests/` directory 29 | - Build output in `dist/` directory 30 | - Chrome binary in Lambda layer 31 | 32 | ## Roadmap 33 | 34 | - Get instructions from LLM 35 | - Execute instructions 36 | - Return results to LLM 37 | - Iterate on instructions and results 38 | - Refine results based on LLM feedback 39 | -------------------------------------------------------------------------------- /lambda/tools/web-scraper/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "web-scraper", 3 | "version": "1.0.0", 4 | "description": "Web scraper Lambda function using @sparticuz/chromium", 5 | "main": "index.js", 6 | "scripts": { 7 | "build": "tsc", 8 | "clean": "rm -rf dist/", 9 | "predeploy": "./setup.sh", 10 | "deploy": "sam build && sam deploy", 11 | "test": "tsc && node dist/local-test.js" 12 | }, 13 | "dependencies": { 14 | "puppeteer-core": "24.2.0", 15 | "@sparticuz/chromium": "132.0.0", 16 | "@aws-lambda-powertools/logger": "^2.12.0" 17 | }, 18 | "devDependencies": { 19 | "@types/aws-lambda": "^8.10.129", 20 | "@types/node": "^18.19.3", 21 | "typescript": "^5.3.3" 22 | } 23 | } -------------------------------------------------------------------------------- /lambda/tools/web-scraper/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Clean up any existing files 4 | rm -rf dist/ 5 | rm -rf layers/chromium/nodejs 6 | rm -f layers/chromium/chromium.zip 7 | 8 | # Install project dependencies and build TypeScript 9 | echo "Installing project dependencies..." 10 | npm install 11 | npm run build 12 | 13 | # Copy package.json and install production dependencies in dist 14 | echo "Setting up production dependencies..." 15 | cp package.json dist/ 16 | cd dist 17 | npm install --production 18 | rm package.json package-lock.json 19 | cd .. 20 | 21 | # Create the layer 22 | echo "Creating Chromium layer..." 23 | mkdir -p layers/chromium/nodejs 24 | 25 | # Install Chromium in the layer with the correct architecture 26 | cd layers/chromium/nodejs 27 | npm init -y 28 | 29 | # Install the chromium package specifically for ARM64 30 | echo "Installing Chromium for ARM64..." 31 | cat > package.json << EOL 32 | { 33 | "name": "chromium-layer", 34 | "version": "1.0.0", 35 | "dependencies": { 36 | "@sparticuz/chromium": "132.0.0" 37 | } 38 | } 39 | EOL 40 | 41 | # Install dependencies with architecture-specific flags 42 | # Sadly, this doesn't work because the chromium package is not available for ARM64 43 | npm install --arch=arm64 --platform=linux 44 | 45 | # Create the zip file with debug information 46 | cd .. 47 | echo "Creating layer zip file..." 48 | echo "Contents of nodejs/node_modules/@sparticuz/chromium/bin:" 49 | ls -la nodejs/node_modules/@sparticuz/chromium/bin 50 | echo "File type of chromium binary:" 51 | file nodejs/node_modules/@sparticuz/chromium/bin/* 52 | 53 | # Create the zip file 54 | zip -r chromium.zip nodejs/ 55 | 56 | echo "Layer has been created at layers/chromium/chromium.zip" 57 | cd ../.. -------------------------------------------------------------------------------- /lambda/tools/web-scraper/template.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Transform: AWS::Serverless-2016-10-31 3 | Description: SAM template for Web Scraper Lambda function using @sparticuz/chromium 4 | 5 | Globals: 6 | Function: 7 | Timeout: 300 8 | MemorySize: 2048 9 | Runtime: nodejs18.x 10 | Architectures: 11 | - x86_64 12 | 13 | Resources: 14 | WebScraperFunction: 15 | Type: AWS::Serverless::Function 16 | Properties: 17 | FunctionName: web-scraper 18 | CodeUri: dist/ 19 | Handler: index.handler 20 | Layers: 21 | - !Ref ChromiumLayer 22 | 23 | ChromiumLayer: 24 | Type: AWS::Serverless::LayerVersion 25 | Properties: 26 | LayerName: chromium-layer 27 | Description: Layer containing Chromium binary 28 | ContentUri: layers/chromium/chromium.zip 29 | CompatibleRuntimes: 30 | - nodejs18.x 31 | CompatibleArchitectures: 32 | - x86_64 33 | 34 | Outputs: 35 | WebScraperFunction: 36 | Description: Web Scraper Lambda Function ARN 37 | Value: !GetAtt WebScraperFunction.Arn -------------------------------------------------------------------------------- /lambda/tools/web-scraper/tests/bbc-news-article.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "bbc_article_1", 3 | "name": "web_scrape", 4 | "input": { 5 | "url": "https://www.bbc.com", 6 | "actions": [ 7 | { 8 | "type": "search", 9 | "searchInput": "input[name='q']", 10 | "searchButton": "button[type='submit']", 11 | "searchTerm": "climate change" 12 | }, 13 | { 14 | "type": "wait", 15 | "timeMs": 2000 16 | }, 17 | { 18 | "type": "clickAndWaitForSelector", 19 | "clickSelector": ".PromoContent a", 20 | "waitForSelector": "article" 21 | } 22 | ], 23 | "extractSelectors": { 24 | "containers": ["article h1", "article p", ".ArticleWrapper"], 25 | "images": ["article img"] 26 | }, 27 | "screenshotSelector": "article" 28 | } 29 | } -------------------------------------------------------------------------------- /lambda/tools/web-scraper/tests/bbc-sports-news.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "bbc_sports_1", 3 | "name": "web_scrape", 4 | "input": { 5 | "url": "https://www.bbc.com", 6 | "actions": [ 7 | { 8 | "type": "click", 9 | "selector": "a[href*='sport']", 10 | "waitForNavigation": true 11 | }, 12 | { 13 | "type": "wait", 14 | "timeMs": 1000 15 | } 16 | ], 17 | "extractSelectors": { 18 | "containers": [".gs-c-promo-heading", ".gs-c-promo-summary"], 19 | "links": [".gs-c-promo-heading a"], 20 | "images": [".gs-c-promo-image img"] 21 | }, 22 | "fullPageScreenshot": true 23 | } 24 | } -------------------------------------------------------------------------------- /lambda/tools/web-scraper/tests/navigation-example.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "nav_example_1", 3 | "name": "web_scrape", 4 | "input": { 5 | "url": "https://www.weather.gov", 6 | "actions": [ 7 | { 8 | "type": "type", 9 | "selector": "#inputstring", 10 | "text": "New York, NY" 11 | }, 12 | { 13 | "type": "click", 14 | "selector": "#btnSearch" 15 | }, 16 | { 17 | "type": "wait", 18 | "timeMs": 2000 19 | }, 20 | { 21 | "type": "waitForSelector", 22 | "selector": "#detailed-forecast" 23 | } 24 | ], 25 | "extractSelectors": { 26 | "containers": ["#detailed-forecast", ".forecast-label", ".forecast-text", ".temp"], 27 | "links": [".forecast-icon a"], 28 | "images": [".forecast-icon img"] 29 | }, 30 | "fullPageScreenshot": true 31 | } 32 | } -------------------------------------------------------------------------------- /lambda/tools/web-scraper/tests/test-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "scrape_1", 3 | "name": "web_scrape", 4 | "input": { 5 | "url": "https://example.com", 6 | "extractSelectors": { 7 | "containers": ["h1", "p"] 8 | } 9 | } 10 | } -------------------------------------------------------------------------------- /lambda/tools/web-scraper/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2020", 4 | "module": "commonjs", 5 | "strict": true, 6 | "esModuleInterop": true, 7 | "skipLibCheck": true, 8 | "forceConsistentCasingInFileNames": true, 9 | "outDir": "./dist", 10 | "rootDir": "./src" 11 | }, 12 | "include": ["src/**/*"], 13 | "exclude": ["node_modules"] 14 | } -------------------------------------------------------------------------------- /lambda/tools/yfinance/requirements.in: -------------------------------------------------------------------------------- 1 | yfinance==0.2.51 2 | pandas 3 | aws-lambda-powertools>=2.30.2 # AWS Lambda Powertools for Python 4 | -------------------------------------------------------------------------------- /lambda/tools/yfinance/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile lambda/tools/yfinance/requirements.in --output-file lambda/tools/yfinance/requirements.txt 3 | aws-lambda-powertools==3.4.0 4 | # via -r lambda/tools/yfinance/requirements.in 5 | beautifulsoup4==4.12.3 6 | # via yfinance 7 | certifi==2024.12.14 8 | # via requests 9 | charset-normalizer==3.4.1 10 | # via requests 11 | frozendict==2.4.6 12 | # via yfinance 13 | html5lib==1.1 14 | # via yfinance 15 | idna==3.10 16 | # via requests 17 | jmespath==1.0.1 18 | # via aws-lambda-powertools 19 | lxml==5.3.0 20 | # via yfinance 21 | multitasking==0.0.11 22 | # via yfinance 23 | numpy==2.2.1 24 | # via 25 | # pandas 26 | # yfinance 27 | pandas==2.2.3 28 | # via 29 | # -r lambda/tools/yfinance/requirements.in 30 | # yfinance 31 | peewee==3.17.8 32 | # via yfinance 33 | platformdirs==4.3.6 34 | # via yfinance 35 | python-dateutil==2.9.0.post0 36 | # via pandas 37 | pytz==2024.2 38 | # via 39 | # pandas 40 | # yfinance 41 | requests==2.32.3 42 | # via yfinance 43 | six==1.17.0 44 | # via 45 | # html5lib 46 | # python-dateutil 47 | soupsieve==2.6 48 | # via beautifulsoup4 49 | typing-extensions==4.12.2 50 | # via aws-lambda-powertools 51 | tzdata==2024.2 52 | # via pandas 53 | urllib3==2.3.0 54 | # via requests 55 | webencodings==0.5.1 56 | # via html5lib 57 | yfinance==0.2.51 58 | # via -r lambda/tools/yfinance/requirements.in 59 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "step-functions-agent" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.11" 7 | dependencies = [ 8 | "anthropic==0.35.0", 9 | "e2b-code-interpreter==1.0.0", 10 | "google-genai>=0.6.0", 11 | "google-generativeai>=0.8.4", 12 | "grpcio>=1.70.0", 13 | "ipykernel>=6.29.5", 14 | "psutil>=6.1.0", 15 | "pytest>=8.3.4", 16 | "python-dotenv==1.0.1", 17 | "rich>=13.9.4", 18 | ] 19 | -------------------------------------------------------------------------------- /requirements.in: -------------------------------------------------------------------------------- 1 | e2b_code_interpreter==1.0.0 2 | anthropic==0.35.0 3 | openai 4 | python-dotenv==1.0.1 5 | ipykernel 6 | aws-cdk-lib 7 | constructs>=10.0.0,<11.0.0 8 | aws_cdk.aws_lambda_python_alpha 9 | aws_cdk.aws_lambda_go_alpha 10 | aws_cdk.aws_apprunner_alpha 11 | cdklabs.generative_ai_cdk_constructs==0.1.296 12 | cdk-monitoring-constructs 13 | aws-lambda-powertools 14 | boto3 -------------------------------------------------------------------------------- /step_functions_agent/agent_docs_stack.py: -------------------------------------------------------------------------------- 1 | # This stack deploys the documentation of the AI agents framework to S3. 2 | 3 | from aws_cdk import ( 4 | Stack, 5 | RemovalPolicy, 6 | aws_s3 as s3, 7 | aws_iam as iam, 8 | ) 9 | from aws_cdk.aws_s3_deployment import Source, BucketDeployment 10 | 11 | from constructs import Construct 12 | 13 | class AgentDocsStack(Stack): 14 | 15 | def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: 16 | super().__init__(scope, construct_id, **kwargs) 17 | 18 | 19 | # Createt the bucket that will host the documentation static pages 20 | docs_bucket = s3.Bucket(self, "AgentDocsBucket", 21 | removal_policy=RemovalPolicy.DESTROY, 22 | auto_delete_objects=True, 23 | website_index_document="index.html", 24 | public_read_access=True, 25 | block_public_access=s3.BlockPublicAccess( 26 | block_public_acls=False, 27 | block_public_policy=False, 28 | ignore_public_acls=False, 29 | restrict_public_buckets=False 30 | ) 31 | ) 32 | 33 | # Add bucket policy to allow public access to all files 34 | docs_bucket.add_to_resource_policy( 35 | iam.PolicyStatement( 36 | actions=["s3:GetObject"], 37 | resources=[docs_bucket.arn_for_objects("*")], 38 | principals=[iam.AnyPrincipal()] 39 | ) 40 | ) 41 | 42 | # Deploy website files from a local directory 43 | deployment = BucketDeployment(self, "DeployWebsiteContent", 44 | sources=[Source.asset("docs/build/")], 45 | destination_bucket=docs_bucket, 46 | ) -------------------------------------------------------------------------------- /ui/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY . /app 6 | 7 | RUN pip install --no-cache-dir -r requirements.txt 8 | 9 | EXPOSE 8080 10 | 11 | CMD ["python", "call_agent.py"] 12 | -------------------------------------------------------------------------------- /ui/apprunner.yaml: -------------------------------------------------------------------------------- 1 | version: 1.0 2 | runtime: python311 3 | build: 4 | commands: 5 | build: 6 | - pip3 install -r requirements.txt 7 | run: 8 | runtime-version: 3.11 9 | pre-run: 10 | - pip3 install -r requirements.txt 11 | command: python3 call_agent.py 12 | network: 13 | port: 8080 -------------------------------------------------------------------------------- /ui/requirements.in: -------------------------------------------------------------------------------- 1 | python-fasthtml 2 | claudette 3 | boto3 -------------------------------------------------------------------------------- /ui/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in --output-file requirements.txt 3 | annotated-types==0.7.0 4 | # via pydantic 5 | anthropic==0.42.0 6 | # via claudette 7 | anyio==4.8.0 8 | # via 9 | # anthropic 10 | # httpx 11 | # starlette 12 | # watchfiles 13 | apsw==3.47.2.0 14 | # via apswutils 15 | apswutils==0.0.2 16 | # via fastlite 17 | beautifulsoup4==4.12.3 18 | # via 19 | # python-fasthtml 20 | # toolslm 21 | boto3==1.35.99 22 | # via -r requirements.in 23 | botocore==1.35.99 24 | # via 25 | # boto3 26 | # s3transfer 27 | certifi==2024.12.14 28 | # via 29 | # httpcore 30 | # httpx 31 | claudette==0.1.1 32 | # via -r requirements.in 33 | click==8.1.8 34 | # via uvicorn 35 | distro==1.9.0 36 | # via anthropic 37 | fastcore==1.7.28 38 | # via 39 | # apswutils 40 | # claudette 41 | # fastlite 42 | # llms-txt 43 | # msglm 44 | # python-fasthtml 45 | # toolslm 46 | fastlite==0.1.1 47 | # via python-fasthtml 48 | h11==0.14.0 49 | # via 50 | # httpcore 51 | # uvicorn 52 | html2text==2024.2.26 53 | # via toolslm 54 | httpcore==1.0.7 55 | # via httpx 56 | httptools==0.6.4 57 | # via uvicorn 58 | httpx==0.28.1 59 | # via 60 | # anthropic 61 | # llms-txt 62 | # python-fasthtml 63 | # toolslm 64 | idna==3.10 65 | # via 66 | # anyio 67 | # httpx 68 | itsdangerous==2.2.0 69 | # via python-fasthtml 70 | jiter==0.8.2 71 | # via anthropic 72 | jmespath==1.0.1 73 | # via 74 | # boto3 75 | # botocore 76 | llms-txt==0.0.4 77 | # via toolslm 78 | msglm==0.0.4 79 | # via claudette 80 | oauthlib==3.2.2 81 | # via python-fasthtml 82 | packaging==24.2 83 | # via fastcore 84 | pydantic==2.10.5 85 | # via anthropic 86 | pydantic-core==2.27.2 87 | # via pydantic 88 | python-dateutil==2.9.0.post0 89 | # via 90 | # botocore 91 | # python-fasthtml 92 | python-dotenv==1.0.1 93 | # via uvicorn 94 | python-fasthtml==0.12.0 95 | # via -r requirements.in 96 | python-multipart==0.0.20 97 | # via python-fasthtml 98 | pyyaml==6.0.2 99 | # via uvicorn 100 | s3transfer==0.10.4 101 | # via boto3 102 | six==1.17.0 103 | # via python-dateutil 104 | sniffio==1.3.1 105 | # via 106 | # anthropic 107 | # anyio 108 | soupsieve==2.6 109 | # via beautifulsoup4 110 | starlette==0.45.2 111 | # via python-fasthtml 112 | toolslm==0.1.0 113 | # via claudette 114 | typing-extensions==4.12.2 115 | # via 116 | # anthropic 117 | # anyio 118 | # pydantic 119 | # pydantic-core 120 | urllib3==2.3.0 121 | # via botocore 122 | uvicorn==0.34.0 123 | # via python-fasthtml 124 | uvloop==0.21.0 125 | # via uvicorn 126 | watchfiles==1.0.4 127 | # via uvicorn 128 | websockets==14.1 129 | # via uvicorn 130 | --------------------------------------------------------------------------------