├── .DS_Store ├── .gitignore ├── .gitmodules ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── deepseek-planner ├── .env.example ├── .gitignore ├── .python-version ├── README.md ├── pyproject.toml ├── src │ └── server.py └── uv.lock ├── dify_mcp_servers ├── README.md ├── cli.txt ├── dify_mcp_server.py └── weather.py ├── gamelift-mcp-server ├── .gitignore ├── .python-version ├── Dockerfile ├── README.md ├── main.py ├── pyproject.toml └── src │ └── gamelift_mcp_server.py ├── html_render_service ├── .DS_Store ├── .gitignore ├── .python-version ├── LICENSE ├── README.md ├── asset │ ├── case_3_1.png │ └── case_3_2.png ├── pyproject.toml ├── src │ └── server.py ├── uv.lock └── web │ ├── Dockerfile │ ├── app │ ├── __init__.py │ ├── extensions │ │ ├── __init__.py │ │ ├── checkbox.py │ │ ├── radio.py │ │ └── textbox.py │ └── static │ │ ├── app.js │ │ ├── base.html │ │ ├── sample-quiz-animation.gif │ │ ├── sample-quiz-md-file.PNG │ │ └── wrapper.html │ ├── docker-compose.yml │ ├── main.py │ └── requirements.txt ├── remote_computer_use ├── .env.example ├── .gitignore ├── .python-version ├── INSTALL.md ├── README.md ├── assets │ └── image1.png ├── docker │ ├── Dockerfile │ ├── README.md │ ├── docker-compose.yml │ ├── start_vnc.sh │ └── xstartup ├── pyproject.toml ├── server.py ├── server_claude.py ├── setup_vnc.sh ├── ssh_controller.py ├── test_connection.py ├── tools │ ├── base.py │ ├── bash.py │ ├── computer.py │ ├── edit.py │ └── tools_config.py ├── uv.lock └── vnc_controller.py ├── s3_upload_server ├── .python-version ├── README.md ├── pyproject.toml ├── src │ └── server.py ├── test_server.py └── uv.lock ├── streamble_mcp_server_demo ├── .gitignore ├── .python-version ├── README.md ├── main.py ├── pyproject.toml ├── setup.sh ├── src │ ├── auth.py │ ├── cognito_auth.py │ └── server.py └── uv.lock └── time_server ├── .python-version ├── README.md ├── hello.py ├── pyproject.toml ├── src └── server.py └── uv.lock /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-mcp-servers-samples/69c58ab6ee4ca135ebb71ba90f8753e1224497d5/.DS_Store -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be added to the global gitignore or merged into this project gitignore. For a PyCharm 158 | # project, it is recommended to include the .idea directory in version control. 159 | # https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this project gitignore. For a PyCharm 161 | # project, it is recommended to include the .idea directory in version control. 162 | .idea/ 163 | 164 | # VS Code 165 | .vscode/ 166 | 167 | # macOS 168 | .DS_Store 169 | .DS_Store? 170 | ._* 171 | .Spotlight-V100 172 | .Trashes 173 | ehthumbs.db 174 | Thumbs.db 175 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-mcp-servers-samples/69c58ab6ee4ca135ebb71ba90f8753e1224497d5/.gitmodules -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT No Attribution 2 | 3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 17 | 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Sample MCP Servers for AWS GCR 2 | 3 | ## Contributed by AWS GCR 4 | 5 | | 序号 | 名称 | 描述 | 作者 | 链接 | 6 | |------|------|------|------|------| 7 | | 1 | Remote Computer Use | 使用MCP连接ubuntu virtual desktop作为computer use sandbox使用 | chuanxie@ | [Computer Use](remote_computer_use/README.md) | 8 | | 2 | Dify worflow mcp server demo | 使用Python MCP SDK 实现 Dify workflow mcp serverr | lht@ | [Dify worflow mcp server demo](https://github.com/aws-samples/aws-mcp-servers-samples/blob/main/dify_mcp_servers/README.md) | 9 | | 3 | Deepseek planner | 使用Bedrock上DeepSeek R1做planning, coding | chuanxie@ | [deepseek-planner](deepseek-planner/README.md) | 10 | | 4 | Time Server | 让Agent知道当前实际时间 | chuanxie@ | [time-server](time_server/README.md) | 11 | | 5 | Html Render Service | 把Markdown文件或者HTML转成网页渲染出来 | chuanxie@ | [Html-Render-Service](html_render_service/README.md) | 12 | | 6 | GameLift MCP Server | 使用MCP协议来获取当前账户的GameLift相关信息 | seanguo@ yuzp@ | [gamelift-mcp-server](gamelift-mcp-server/README.md) | 13 | | 7 | S3 Upload Server | 上传文件到S3并返回公共访问链接的MCP服务器 | hcihang@ | [s3-upload-server](s3_upload_server/README.md) | 14 | 15 | ## Demo MCP on Amazon Bedrock 16 | 推荐Bedrock MCP Demo: 17 | https://github.com/aws-samples/demo_mcp_on_amazon_bedrock 18 | -------------------------------------------------------------------------------- /deepseek-planner/.env.example: -------------------------------------------------------------------------------- 1 | # AWS Credentials for Amazon Bedrock 2 | AWS_ACCESS_KEY_ID=your_access_key_id 3 | AWS_SECRET_ACCESS_KEY=your_secret_access_key 4 | AWS_REGION=us-east-1 5 | 6 | # Optional: AWS Session Token (if using temporary credentials) 7 | # AWS_SESSION_TOKEN=your_session_token 8 | -------------------------------------------------------------------------------- /deepseek-planner/.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | venv/ 8 | ENV/ 9 | env/ 10 | .venv 11 | .env 12 | *.egg-info/ 13 | dist/ 14 | build/ 15 | 16 | # IDE files 17 | .vscode/ 18 | .idea/ 19 | *.swp 20 | *.swo 21 | 22 | # Logs 23 | *.log 24 | 25 | # OS files 26 | .DS_Store 27 | Thumbs.db 28 | -------------------------------------------------------------------------------- /deepseek-planner/.python-version: -------------------------------------------------------------------------------- 1 | 3.11 2 | -------------------------------------------------------------------------------- /deepseek-planner/README.md: -------------------------------------------------------------------------------- 1 | # DeepSeek Planner MCP Server 2 | 3 | An MCP (Model Context Protocol) server that provides planning and coding assistance using the DeepSeek model hosted on Amazon Bedrock. 4 | 5 | ## Features 6 | 7 | - **Project Planning**: Generate detailed project plans based on requirements 8 | - **Code Generation**: Create code in various programming languages 9 | - **Code Review**: Get feedback on your code 10 | - **Code Explanation**: Understand complex code 11 | - **Code Refactoring**: Improve your code quality 12 | 13 | ## Prerequisites 14 | 15 | - Python 3.8 or higher 16 | - AWS account with access to Amazon Bedrock 17 | - AWS credentials with permissions to invoke the DeepSeek model 18 | 19 | ## Installation 20 | 21 | 1. Clone this repository 22 | 2. Create and activate a virtual environment: 23 | ``` 24 | uv sync 25 | source venv/bin/activate 26 | ``` 27 | 28 | ## Usage 29 | 30 | ### Setup 31 | 1. Add the DeepSeek Planner server configuration: 32 | ```json 33 | { 34 | "mcpServers": { 35 | "deepseek-planner": { 36 | "command": "uv", 37 | "args": [ 38 | "--directory", 39 | "/path/to/deepseek-planner/src", 40 | "run", 41 | "server.py" 42 | ], 43 | "env": { 44 | "AWS_ACCESS_KEY_ID": "your_access_key_id", 45 | "AWS_SECRET_ACCESS_KEY": "your_secret_access_key", 46 | "AWS_REGION": "us-east-1" 47 | } 48 | } 49 | } 50 | } 51 | ``` 52 | 53 | ### Available Tools 54 | 55 | 1. **generate_plan**: Create a detailed project plan based on requirements 56 | 2. **generate_code**: Generate code based on requirements 57 | 3. **review_code**: Review code and provide feedback 58 | 4. **explain_code**: Explain code in detail 59 | 5. **refactor_code**: Refactor code to improve quality 60 | 61 | ## License 62 | 63 | MIT 64 | -------------------------------------------------------------------------------- /deepseek-planner/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "deepseek-planner" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.11" 7 | dependencies = [ 8 | "boto3>=1.37.18", 9 | "mcp[cli]>=1.5.0", 10 | "python-dotenv>=1.0.1", 11 | ] 12 | -------------------------------------------------------------------------------- /deepseek-planner/src/server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import json 4 | import boto3 5 | from typing import Optional, Dict, Any, List 6 | from mcp.server.fastmcp import FastMCP, Context 7 | from botocore.client import Config 8 | # import dotenv 9 | # dotenv.load_dotenv() 10 | 11 | # Initialize FastMCP server 12 | mcp = FastMCP("deepseek-planner") 13 | custom_config = Config(connect_timeout=840, read_timeout=840) 14 | MAX_TOKENS = int(os.environ.get("MAX_TOKENS", 16000)) 15 | # Initialize AWS Bedrock client 16 | bedrock_runtime = boto3.client( 17 | service_name="bedrock-runtime", 18 | region_name=os.environ.get("AWS_REGION", "us-east-1"), 19 | aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"), 20 | aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY"), 21 | aws_session_token=os.environ.get("AWS_SESSION_TOKEN"), # Optional 22 | config=custom_config, 23 | ) 24 | 25 | # DeepSeek model ID on Bedrock 26 | DEEPSEEK_MODEL_ID = "us.deepseek.r1-v1:0" 27 | 28 | def invoke_deepseek(messages: List[Dict[str, str]], 29 | temperature: float = 0.7, 30 | max_tokens: int = 2048 31 | ) -> str: 32 | """ 33 | Invoke the DeepSeek model via AWS Bedrock using the converse API 34 | """ 35 | try: 36 | # Prepare the request body 37 | body = { 38 | "modelId": DEEPSEEK_MODEL_ID, 39 | "messages": messages[1:], 40 | "system": messages[0]['content'], 41 | "inferenceConfig": { 42 | "maxTokens": max_tokens, 43 | "temperature": temperature, 44 | } 45 | } 46 | 47 | response = bedrock_runtime.converse( 48 | **body 49 | ) 50 | 51 | # Parse the response 52 | text = [content["text"] for content in response["output"]["message"]["content"] if "text" in content][0] 53 | return text 54 | 55 | except Exception as e: 56 | print(f"Error invoking DeepSeek model: {e}") 57 | raise 58 | 59 | @mcp.tool() 60 | async def generate_plan(requirements: str, 61 | context: Optional[str] = None, 62 | format: str = "markdown") -> str: 63 | """Generate a detailed project plan based on requirements. 64 | 65 | Args: 66 | requirements: Project requirements and goals 67 | context: Additional context or constraints (optional) 68 | format: Output format (markdown, json, or text) 69 | """ 70 | try: 71 | # Prepare messages for DeepSeek 72 | messages = [ 73 | { 74 | "role": "system", 75 | "content": [{"text":"You are an expert project planner. Your task is to create detailed, actionable project plans based on requirements. Be thorough, practical, and consider all aspects of project planning including timeline, resources, milestones, and potential challenges."}] 76 | }, 77 | { 78 | "role": "user", 79 | "content": [{"text":f"Please create a detailed project plan for the following requirements:\n\n{requirements}\n\n" + f'Additional context: {context}\n\n' if context else '' + f"Please provide the plan in {format} format."}] 80 | } 81 | ] 82 | 83 | # Invoke DeepSeek model 84 | response = invoke_deepseek( 85 | messages=messages, 86 | temperature=0.7, 87 | max_tokens=MAX_TOKENS, 88 | ) 89 | 90 | return response 91 | 92 | except Exception as e: 93 | return f"Error generating plan: {str(e)}" 94 | 95 | @mcp.tool() 96 | async def generate_code(language: str, 97 | task: str, 98 | context: Optional[str] = None, 99 | comments: bool = True) -> str: 100 | """Generate code based on requirements. 101 | 102 | Args: 103 | language: Programming language 104 | task: Description of what the code should do 105 | context: Additional context or existing code (optional) 106 | comments: Whether to include comments in the code 107 | """ 108 | try: 109 | # Prepare messages for DeepSeek 110 | messages = [ 111 | { 112 | "role": "system", 113 | "content": [{"text":"You are an expert programmer. Your task is to generate high-quality, efficient, and well-structured code based on requirements. Follow best practices for the specified programming language."}] 114 | }, 115 | { 116 | "role": "user", 117 | "content": [{"text":f"Please generate {language} code for the following task:\n\n{task}\n\n" + f'Additional context or existing code:\n```{language}\n{context}\n```\n\n' if context else '' + 'Please include detailed comments.' if comments else 'No need for extensive comments.'}] 118 | } 119 | ] 120 | 121 | # Invoke DeepSeek model 122 | response = invoke_deepseek( 123 | messages=messages, 124 | temperature=0.3, # Lower temperature for code generation 125 | max_tokens=MAX_TOKENS, 126 | ) 127 | 128 | return response 129 | 130 | except Exception as e: 131 | return f"Error generating code: {str(e)}" 132 | 133 | @mcp.tool() 134 | async def review_code(language: str, 135 | code: str, 136 | focus: Optional[List[str]] = None) -> str: 137 | """Review code and provide feedback. 138 | 139 | Args: 140 | language: Programming language 141 | code: Code to review 142 | focus: Areas to focus on (bugs, performance, security, style, architecture) 143 | """ 144 | try: 145 | # Prepare messages for DeepSeek 146 | messages = [ 147 | { 148 | "role": "system", 149 | "content": [{"text":"You are an expert code reviewer. Your task is to provide detailed, constructive feedback on code. Focus on identifying issues, suggesting improvements, and explaining your reasoning."}] 150 | }, 151 | { 152 | "role": "user", 153 | "content": [{"text":f"Please review the following {language} code:\n\n```{language}\n{code}\n```\n\n" + f'Please focus on these aspects: {", ".join(focus)}' if focus else 'Please provide a comprehensive review.'}] 154 | } 155 | ] 156 | 157 | # Invoke DeepSeek model 158 | response = invoke_deepseek( 159 | messages=messages, 160 | temperature=0.5, 161 | max_tokens=MAX_TOKENS, 162 | ) 163 | 164 | return response 165 | 166 | except Exception as e: 167 | return f"Error reviewing code: {str(e)}" 168 | 169 | @mcp.tool() 170 | async def explain_code(language: str, 171 | code: str, 172 | detail_level: str = "intermediate") -> str: 173 | """Explain code in detail. 174 | 175 | Args: 176 | language: Programming language 177 | code: Code to explain 178 | detail_level: Level of detail in the explanation (basic, intermediate, advanced) 179 | """ 180 | try: 181 | # Prepare messages for DeepSeek 182 | messages = [ 183 | { 184 | "role": "system", 185 | "content": [{"text":"You are an expert programmer and educator. Your task is to explain code clearly and accurately, adapting your explanation to the requested level of detail."}] 186 | }, 187 | { 188 | "role": "user", 189 | "content": [{"text":f"Please explain the following {language} code at a {detail_level} level of detail:\n\n```{language}\n{code}\n```"}] 190 | } 191 | ] 192 | 193 | # Invoke DeepSeek model 194 | response = invoke_deepseek( 195 | messages=messages, 196 | temperature=0.5, 197 | max_tokens=MAX_TOKENS, 198 | ) 199 | 200 | return response 201 | 202 | except Exception as e: 203 | return f"Error explaining code: {str(e)}" 204 | 205 | @mcp.tool() 206 | async def refactor_code(language: str, 207 | code: str, 208 | goals: List[str]) -> str: 209 | """Refactor code to improve quality. 210 | 211 | Args: 212 | language: Programming language 213 | code: Code to refactor 214 | goals: Refactoring goals (readability, performance, modularity, security, maintainability) 215 | """ 216 | try: 217 | # Prepare messages for DeepSeek 218 | messages = [ 219 | { 220 | "role": "system", 221 | "content": [{"text":"You are an expert programmer specializing in code refactoring. Your task is to improve code quality while maintaining functionality. Provide both the refactored code and an explanation of your changes."}] 222 | }, 223 | { 224 | "role": "user", 225 | "content": [{"text":f"Please refactor the following {language} code to improve {', '.join(goals)}:\n\n```{language}\n{code}\n```\n\nProvide the refactored code and explain your changes."}] 226 | } 227 | ] 228 | 229 | # Invoke DeepSeek model 230 | response = invoke_deepseek( 231 | messages=messages, 232 | temperature=0.4, 233 | max_tokens=MAX_TOKENS, 234 | ) 235 | 236 | return response 237 | 238 | except Exception as e: 239 | return f"Error refactoring code: {str(e)}" 240 | 241 | if __name__ == "__main__": 242 | # Check if AWS credentials are set 243 | if not os.environ.get("AWS_ACCESS_KEY_ID") or not os.environ.get("AWS_SECRET_ACCESS_KEY"): 244 | print("AWS credentials are not set. Please set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables.") 245 | exit(1) 246 | 247 | # Run the server 248 | mcp.run(transport='stdio') 249 | 250 | # messages = [ 251 | # { 252 | # "role": "system", 253 | # "content": [{"text":"You are an expert project planner. Your task is to create detailed, actionable project plans based on requirements. Be thorough, practical, and consider all aspects of project planning including timeline, resources, milestones, and potential challenges."}] 254 | # }, 255 | # { 256 | # "role": "user", 257 | # "content": [{"text":f"Please create a detailed project plan for the following requirements:\n\n帮我制作一份司美格鲁肽的介绍,包括特色功能,适用范围,发展历史,价格,图文并茂,需要制作成精美的 HTML保存到本地目录. "}] 258 | # } 259 | # ] 260 | 261 | # response = invoke_deepseek( 262 | # messages=messages, 263 | # temperature=0.5, 264 | # max_tokens=MAX_TOKENS, 265 | # ) 266 | 267 | # print(response) -------------------------------------------------------------------------------- /dify_mcp_servers/README.md: -------------------------------------------------------------------------------- 1 | # Dify MCP Servers Project 2 | 3 | An agent developed specifically for Dify, implementing an MCP server. You can integrate your Dify workflow or Dify chat workflow with MCP. 4 | 5 | ## Development Process 6 | 7 | 1. Log in to Dify, select the workflow you want to integrate, check the API address, and create a new API_KEY. 8 | 2. Test the interface using CLI, referring to the Workflow parameter settings. The parameters in the inputs are the workflow inputs: 9 | ```bash 10 | curl -X POST 'https://api.dify.ai/v1/workflows/run' \ 11 | --header 'Authorization: Bearer api-key' \ 12 | --header 'Content-Type: application/json' \ 13 | --data-raw '{ 14 | "inputs": {"ad_data": "你好,请介绍一下自己"}, 15 | "response_mode": "streaming", 16 | "user": "abc-123" 17 | }' 18 | ``` 19 | 3. Use Amazon Q CLI with the prompt: "The cli.txt file in the project contains a runnable HTTP API request example, please refer to this to generate an MCP server similar to weather.py" 20 | 4. weather.py is a sample provided by the MCP official website, so we can reference it to generate our own, or implement it with custom code. 21 | 22 | ## Setup Instructions 23 | 24 | ### Dify MCP Server 25 | 26 | 1. Install the required dependencies: 27 | ```bash 28 | curl -LsSf https://astral.sh/uv/install.sh | sh 29 | pip install -r dify_mcp_server/requirements.txt 30 | ``` 31 | 32 | 2. Run the server: 33 | ```bash 34 | uv init 35 | uv venv 36 | source .venv/bin/activate 37 | uv add "mcp[cli]" httpx 38 | uv run dify_mcp_server/dify_mcp_server.py 39 | ``` 40 | 41 | 3. Configure MCP Server: 42 | ```json 43 | "ad_delivery_data_analysis": { 44 | "command": "uv", 45 | "args": [ 46 | "--directory", 47 | "/Users/lht/Documents/GitHub/dify_mcp_server", 48 | "run", 49 | "dify_mcp_server.py" 50 | ], 51 | "description": "Analysis advertisement delivery data, get insights, and provide advice", 52 | "status": 1 53 | } 54 | ``` 55 | 56 | ## Usage 57 | 58 | To use an MCP server, you need to connect to it and use the tools and resources it provides. Refer to the documentation of each server for more information on how to use it. 59 | -------------------------------------------------------------------------------- /dify_mcp_servers/cli.txt: -------------------------------------------------------------------------------- 1 | curl -X POST 'https://api.dify.ai/v1/workflows/run' \ 2 | --header 'Authorization: Bearer api-key' \ 3 | --header 'Content-Type: application/json' \ 4 | --data-raw '{ 5 | "inputs": {"ad_data": "你好,请介绍一下自己"}, 6 | "response_mode": "streaming", 7 | "user": "abc-123" 8 | }' -------------------------------------------------------------------------------- /dify_mcp_servers/dify_mcp_server.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Optional 2 | import httpx 3 | import json 4 | from mcp.server.fastmcp import FastMCP 5 | 6 | # Initialize FastMCP server 7 | mcp = FastMCP("dify") 8 | 9 | # Constants 10 | DIFY_API_BASE = "https://api.dify.ai/v1" # You can get it from dify console. 11 | DEFAULT_API_KEY = "API_KEY " # You can get the API_KEY from dify console. 12 | 13 | async def make_dify_request(endpoint: str, data: Dict[str, Any], api_key: str = DEFAULT_API_KEY, streaming: bool = True) -> Dict[str, Any]: 14 | """Make a request to the Dify API with proper error handling. 15 | 16 | Args: 17 | endpoint: API endpoint to call 18 | data: Request payload 19 | api_key: Dify API key 20 | streaming: Whether to use streaming response mode 21 | """ 22 | headers = { 23 | "Authorization": f"Bearer {api_key}", 24 | "Content-Type": "application/json" 25 | } 26 | 27 | # Set response mode based on streaming parameter 28 | if "response_mode" not in data: 29 | data["response_mode"] = "streaming" if streaming else "blocking" 30 | 31 | url = f"{DIFY_API_BASE}/{endpoint}" 32 | 33 | async with httpx.AsyncClient() as client: 34 | try: 35 | response = await client.post(url, headers=headers, json=data, timeout=60.0) 36 | response.raise_for_status() 37 | return response.json() 38 | except httpx.HTTPStatusError as e: 39 | return {"error": f"HTTP error: {e.response.status_code}", "details": e.response.text} 40 | except Exception as e: 41 | return {"error": f"Request failed: {str(e)}"} 42 | 43 | @mcp.tool() 44 | async def run_workflow(inputs: Dict[str, str], user_id: Optional[str] = "abc-123", api_key: Optional[str] = None) -> str: 45 | """Run a Dify workflow with the provided inputs. 46 | 47 | Args: 48 | inputs: Dictionary of input parameters for the workflow 49 | user_id: Optional user identifier for the request 50 | api_key: Optional API key to override the default 51 | """ 52 | data = { 53 | "inputs": inputs, 54 | "response_mode": "blocking", # Using blocking for MCP tool response 55 | "user": user_id 56 | } 57 | 58 | result = await make_dify_request("workflows/run", data, api_key or DEFAULT_API_KEY, streaming=False) 59 | 60 | if "error" in result: 61 | return f"Error: {result['error']}\n{result.get('details', '')}" 62 | 63 | # You shoud replace the output processing with your dify workflow input 64 | try: 65 | if "data" in result and "outputs" in result["data"] and "advice" in result["data"]["outputs"]: 66 | advice = result["data"]["outputs"]["advice"] 67 | return advice 68 | else: 69 | return "No advice found in the response." 70 | except Exception as e: 71 | return f"Failed to parse response: {str(e)}" 72 | 73 | @mcp.tool() 74 | async def chat_completion(message: str, conversation_id: Optional[str] = None, 75 | user_id: Optional[str] = None, api_key: Optional[str] = None) -> str: 76 | """Send a message to Dify chat completion API. 77 | 78 | Args: 79 | message: The user message to send 80 | conversation_id: Optional conversation ID for continuing a conversation 81 | user_id: Optional user identifier for the request 82 | api_key: Optional API key to override the default 83 | """ 84 | data = { 85 | "inputs": {}, 86 | "query": message, 87 | "response_mode": "blocking" # Using blocking for MCP tool response 88 | } 89 | 90 | if user_id: 91 | data["user"] = user_id 92 | 93 | if conversation_id: 94 | data["conversation_id"] = conversation_id 95 | 96 | result = await make_dify_request("chat-messages", data, api_key or DEFAULT_API_KEY, streaming=False) 97 | 98 | if "error" in result: 99 | return f"Error: {result['error']}\n{result.get('details', '')}" 100 | 101 | # Extract only the answer from the response 102 | try: 103 | answer = result.get("answer", "No answer provided") 104 | return answer 105 | except Exception as e: 106 | return f"Failed to parse response: {str(e)}" 107 | 108 | @mcp.tool() 109 | async def get_conversation_history(conversation_id: str, first_id: Optional[str] = None, 110 | limit: int = 20, api_key: Optional[str] = None) -> str: 111 | """Retrieve conversation history from Dify. 112 | 113 | Args: 114 | conversation_id: The ID of the conversation to retrieve 115 | first_id: Optional first message ID for pagination 116 | limit: Maximum number of messages to retrieve (default: 20) 117 | api_key: Optional API key to override the default 118 | """ 119 | endpoint = f"conversations/{conversation_id}/messages" 120 | params = {} 121 | 122 | if first_id: 123 | params["first_id"] = first_id 124 | 125 | if limit: 126 | params["limit"] = limit 127 | 128 | headers = { 129 | "Authorization": f"Bearer {api_key or DEFAULT_API_KEY}", 130 | "Content-Type": "application/json" 131 | } 132 | 133 | url = f"{DIFY_API_BASE}/{endpoint}" 134 | 135 | async with httpx.AsyncClient() as client: 136 | try: 137 | response = await client.get(url, headers=headers, params=params, timeout=30.0) 138 | response.raise_for_status() 139 | result = response.json() 140 | 141 | if "data" not in result: 142 | return "No conversation history found or error retrieving history." 143 | 144 | messages = result["data"] 145 | formatted_messages = [] 146 | 147 | for msg in messages: 148 | role = msg.get("role", "unknown") 149 | content = msg.get("content", "No content") 150 | formatted_messages.append(f"{role.upper()}: {content}") 151 | 152 | return "\n\n".join(formatted_messages) 153 | except Exception as e: 154 | return f"Failed to retrieve conversation history: {str(e)}" 155 | 156 | if __name__ == "__main__": 157 | # Initialize and run the server 158 | mcp.run(transport='stdio') 159 | -------------------------------------------------------------------------------- /dify_mcp_servers/weather.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | import httpx 3 | from mcp.server.fastmcp import FastMCP 4 | 5 | # Initialize FastMCP server 6 | mcp = FastMCP("weather") 7 | 8 | # Constants 9 | NWS_API_BASE = "https://api.weather.gov" 10 | USER_AGENT = "weather-app/1.0" 11 | 12 | async def make_nws_request(url: str) -> dict[str, Any] | None: 13 | """Make a request to the NWS API with proper error handling.""" 14 | headers = { 15 | "User-Agent": USER_AGENT, 16 | "Accept": "application/geo+json" 17 | } 18 | async with httpx.AsyncClient() as client: 19 | try: 20 | response = await client.get(url, headers=headers, timeout=30.0) 21 | response.raise_for_status() 22 | return response.json() 23 | except Exception: 24 | return None 25 | 26 | def format_alert(feature: dict) -> str: 27 | """Format an alert feature into a readable string.""" 28 | props = feature["properties"] 29 | return f""" 30 | Event: {props.get('event', 'Unknown')} 31 | Area: {props.get('areaDesc', 'Unknown')} 32 | Severity: {props.get('severity', 'Unknown')} 33 | Description: {props.get('description', 'No description available')} 34 | Instructions: {props.get('instruction', 'No specific instructions provided')} 35 | """ 36 | 37 | @mcp.tool() 38 | async def get_alerts(state: str) -> str: 39 | """Get weather alerts for a US state. 40 | 41 | Args: 42 | state: Two-letter US state code (e.g. CA, NY) 43 | """ 44 | url = f"{NWS_API_BASE}/alerts/active/area/{state}" 45 | data = await make_nws_request(url) 46 | 47 | if not data or "features" not in data: 48 | return "Unable to fetch alerts or no alerts found." 49 | 50 | if not data["features"]: 51 | return "No active alerts for this state." 52 | 53 | alerts = [format_alert(feature) for feature in data["features"]] 54 | return "\n---\n".join(alerts) 55 | 56 | @mcp.tool() 57 | async def get_forecast(latitude: float, longitude: float) -> str: 58 | """Get weather forecast for a location. 59 | 60 | Args: 61 | latitude: Latitude of the location 62 | longitude: Longitude of the location 63 | """ 64 | # First get the forecast grid endpoint 65 | points_url = f"{NWS_API_BASE}/points/{latitude},{longitude}" 66 | points_data = await make_nws_request(points_url) 67 | 68 | if not points_data: 69 | return "Unable to fetch forecast data for this location." 70 | 71 | # Get the forecast URL from the points response 72 | forecast_url = points_data["properties"]["forecast"] 73 | forecast_data = await make_nws_request(forecast_url) 74 | 75 | if not forecast_data: 76 | return "Unable to fetch detailed forecast." 77 | 78 | # Format the periods into a readable forecast 79 | periods = forecast_data["properties"]["periods"] 80 | forecasts = [] 81 | for period in periods[:5]: # Only show next 5 periods 82 | forecast = f""" 83 | {period['name']}: 84 | Temperature: {period['temperature']}°{period['temperatureUnit']} 85 | Wind: {period['windSpeed']} {period['windDirection']} 86 | Forecast: {period['detailedForecast']} 87 | """ 88 | forecasts.append(forecast) 89 | 90 | return "\n---\n".join(forecasts) 91 | 92 | 93 | if __name__ == "__main__": 94 | # Initialize and run the server 95 | mcp.run(transport='stdio') -------------------------------------------------------------------------------- /gamelift-mcp-server/.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | build/ 8 | develop-eggs/ 9 | dist/ 10 | downloads/ 11 | eggs/ 12 | .eggs/ 13 | lib/ 14 | lib64/ 15 | parts/ 16 | sdist/ 17 | var/ 18 | wheels/ 19 | *.egg-info/ 20 | .installed.cfg 21 | *.egg 22 | 23 | # Virtual Environment 24 | .env 25 | .venv 26 | env/ 27 | venv/ 28 | ENV/ 29 | 30 | # IDE 31 | .idea/ 32 | .vscode/ 33 | *.swp 34 | *.swo 35 | .DS_Store 36 | 37 | # Project specific 38 | uv.lock 39 | -------------------------------------------------------------------------------- /gamelift-mcp-server/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /gamelift-mcp-server/Dockerfile: -------------------------------------------------------------------------------- 1 | # 基于官方Python 3.12镜像 2 | FROM python:3.12-slim 3 | 4 | # Set environment variables 5 | ENV PYTHONDONTWRITEBYTECODE=1 6 | ENV PYTHONUNBUFFERED=1 7 | 8 | # 安装uv(推荐的Python包管理工具) 9 | RUN pip install --upgrade pip \ 10 | && pip install uv 11 | 12 | # 设置工作目录 13 | WORKDIR /app 14 | 15 | # 复制项目文件 16 | COPY . /app 17 | 18 | # 安装依赖 19 | RUN uv pip install --system --no-cache-dir . 20 | 21 | # 设置环境变量(可根据需要覆盖) 22 | ENV AWS_ACCESS_KEY_ID=xxxxx \ 23 | AWS_SECRET_ACCESS_KEY=xxxxxx \ 24 | AWS_REGION=us-east-1 25 | 26 | # 默认启动命令 27 | CMD ["uv", "run", "src/gamelift_mcp_server.py"] -------------------------------------------------------------------------------- /gamelift-mcp-server/README.md: -------------------------------------------------------------------------------- 1 | # GameLift MCP Server 2 | 3 | This project provides a simple MCP server for managing AWS GameLift fleets and container fleets. It exposes several API endpoints for querying fleet information, attributes, and echo testing. 4 | 5 | ## Features 6 | - Query GameLift fleets in a specific AWS region 7 | - Query GameLift container fleets in a specific AWS region 8 | - Get detailed attributes for a given fleet or container fleet 9 | - Simple echo endpoint for testing 10 | 11 | ## Requirements 12 | - Python 3.12+ 13 | - AWS credentials with GameLift permissions 14 | - The following Python packages: 15 | - boto3 16 | - httpx 17 | - mcp.server.fastmcp (custom or third-party) 18 | 19 | ## Environment Variables 20 | - `AWS_PROFILE`: Your AWS profile name (optional, if not set, will use AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY) 21 | - `AWS_ACCESS_KEY_ID`: Your AWS access key (required if AWS_PROFILE is not set) 22 | - `AWS_SECRET_ACCESS_KEY`: Your AWS secret key (required if AWS_PROFILE is not set) 23 | 24 | ## How to Run 25 | 1. Install dependencies: 26 | ```bash 27 | pip install boto3 httpx 28 | # Install mcp.server.fastmcp as required 29 | ``` 30 | 2. Set AWS credentials in your environment (choose one method): 31 | ```bash 32 | # Method 1: Using AWS Profile 33 | export AWS_PROFILE=your_profile_name 34 | 35 | # Method 2: Using Access Keys 36 | export AWS_ACCESS_KEY_ID=your_access_key 37 | export AWS_SECRET_ACCESS_KEY=your_secret_key 38 | ``` 39 | 3. Start the MCP server: 40 | ```bash 41 | python src/gamelift_mcp_server.py 42 | ``` 43 | 44 | ## API Endpoints (Tools) 45 | - `get_game_lift_fleets(region: str = 'us-east-1') -> str`: List all GameLift fleets in the specified region. 46 | - `get_gamelift_container_fleets(region: str = 'us-east-1') -> str`: List all GameLift container fleets in the specified region. 47 | - `get_fleet_attributes(fleet_id: str, region: str = 'us-east-1') -> str`: Get attributes for a specific GameLift fleet. 48 | - `get_container_fleet_attributes(fleet_id: str, region: str = 'us-east-1') -> str`: Get attributes for a specific GameLift container fleet. 49 | - `get_compute_auth_token(fleet_id: str, region: str = 'us-east-1', compute_name: str = '') -> str`: Get compute auth token for an ANYWHERE fleet. 50 | - `get_vpc_peering_connections(fleet_id: str, region: str = 'us-east-1') -> str`: Get VPC peering connections for a specific fleet. 51 | - `get_builds(region: str = 'us-east-1') -> str`: List all GameLift builds in the specified region. 52 | - `get_fleet_capacity(fleet_id_list: List[str], region: str = 'us-east-1') -> str`: Get capacity information for a list of fleets (not supported for ANYWHERE fleets). 53 | 54 | ## Config Mcp Server 55 | ``` 56 | { 57 | "mcpServers": { 58 | "gamelift_mcp_server": { 59 | "command": "uv", 60 | "args": [ 61 | "--directory", 62 | "/path/to/gamelift-mcp-server/src", 63 | "run", 64 | "gamelift_mcp_server.py" 65 | ], 66 | "env": { 67 | "AWS_ACCESS_KEY_ID": "xxxx", 68 | "AWS_SECRET_ACCESS_KEY": "xxxxx", 69 | "AWS_REGION": "us-east-1" 70 | } 71 | } 72 | } 73 | } 74 | ``` 75 | 76 | 77 | ## Notes 78 | - Make sure your AWS account has the necessary GameLift permissions. 79 | - The MCP server is designed for internal or development use. 80 | 81 | --- 82 | 83 | ## License 84 | 85 | This project is licensed under the MIT License. 86 | 87 | ``` 88 | MIT License 89 | 90 | Copyright (c) 2024 91 | 92 | Permission is hereby granted, free of charge, to any person obtaining a copy 93 | of this software and associated documentation files (the "Software"), to deal 94 | in the Software without restriction, including without limitation the rights 95 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 96 | copies of the Software, and to permit persons to whom the Software is 97 | furnished to do so, subject to the following conditions: 98 | 99 | The above copyright notice and this permission notice shall be included in all 100 | copies or substantial portions of the Software. 101 | 102 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 103 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 104 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 105 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 106 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 107 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 108 | SOFTWARE. 109 | ``` 110 | -------------------------------------------------------------------------------- /gamelift-mcp-server/main.py: -------------------------------------------------------------------------------- 1 | def main(): 2 | print("Hello from gamelift-mcp-server!") 3 | 4 | 5 | if __name__ == "__main__": 6 | main() 7 | -------------------------------------------------------------------------------- /gamelift-mcp-server/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "gamelift-mcp-server" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "boto3>=1.38.18", 9 | "httpx>=0.28.1", 10 | "mcp[cli]>=1.9.0", 11 | ] 12 | -------------------------------------------------------------------------------- /gamelift-mcp-server/src/gamelift_mcp_server.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import FastMCP 2 | import httpx 3 | import boto3 4 | import os 5 | import logging 6 | from typing import List 7 | 8 | # create MCP server instance 9 | mcp = FastMCP("gamelift_mcp_server") 10 | 11 | logging.basicConfig(level=logging.INFO) 12 | logger = logging.getLogger("gamelift_mcp_server") 13 | 14 | def get_gamelift_client(region: str): 15 | """Get a GameLift client using either AWS_PROFILE or AWS credentials 16 | 17 | Args: 18 | region: AWS region name 19 | """ 20 | if os.environ.get("AWS_PROFILE"): 21 | session = boto3.Session(profile_name=os.environ.get("AWS_PROFILE")) 22 | return session.client('gamelift', region_name=region) 23 | else: 24 | return boto3.client('gamelift', region_name=region, 25 | aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"), 26 | aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY")) 27 | 28 | # define a tool function, expose to client 29 | # @mcp.tool() 30 | # async def echo(message: str) -> str: 31 | # return f"Echo from MCP server: {message}" 32 | 33 | @mcp.tool() 34 | async def get_game_lift_fleets(region: str = os.environ.get("AWS_REGION")) -> str: 35 | """Get gamelift fleet list in specific region 36 | 37 | Args: 38 | region: AWS region name, e.g. us-east-1, if not provided, use us-east-1 as default 39 | """ 40 | client = get_gamelift_client(region) 41 | 42 | # 1. Get All Fleet ID 43 | fleet_ids = [] 44 | response = client.list_fleets() 45 | fleet_ids.extend(response.get('FleetIds', [])) 46 | 47 | # pagination 48 | next_token = response.get('NextToken') 49 | while next_token: 50 | response = client.list_fleets(NextToken=next_token) 51 | fleet_ids.extend(response.get('FleetIds', [])) 52 | next_token = response.get('NextToken') 53 | 54 | logger.info(f"Found {len(fleet_ids)} Fleet") 55 | 56 | # 2. batch call describe_fleet_attributes to get detailed information (API has a limit on the number of fleets per request, usually up to 100) 57 | def chunks(lst, n): 58 | for i in range(0, len(lst), n): 59 | yield lst[i:i + n] 60 | 61 | fleet_details = [] 62 | for chunk_ids in chunks(fleet_ids, 100): 63 | response = client.describe_fleet_attributes(FleetIds=chunk_ids) 64 | fleet_details.extend(response.get('FleetAttributes', [])) 65 | 66 | return fleet_details 67 | 68 | @mcp.tool() 69 | async def get_gamelift_container_fleets(region: str = os.environ.get("AWS_REGION")) -> str: 70 | """Get gamelift container fleet list in specific region 71 | 72 | Args: 73 | region: AWS region name, e.g. us-east-1, if not provided, use us-east-1 as default 74 | """ 75 | client = get_gamelift_client(region) 76 | 77 | response = client.list_container_fleets() 78 | return response.get('ContainerFleets', []) 79 | 80 | 81 | @mcp.tool() 82 | async def get_fleet_attributes(fleet_id: str, region: str = os.environ.get("AWS_REGION")) -> str: 83 | """Get fleet attributes by fleet id 84 | 85 | Args: 86 | fleet_id: Gamelift fleet id 87 | """ 88 | client = get_gamelift_client(region) 89 | 90 | response = client.describe_fleet_attributes(FleetIds=[fleet_id]) 91 | return response.get('FleetAttributes', []) 92 | 93 | 94 | @mcp.tool() 95 | async def get_container_fleet_attributes(fleet_id: str, region: str = os.environ.get("AWS_REGION")) -> str: 96 | """Get container fleet attributes by fleet id 97 | 98 | Args: 99 | fleet_id: Gamelift container fleet id 100 | """ 101 | client = get_gamelift_client(region) 102 | 103 | response = client.describe_container_fleet(FleetId=fleet_id) 104 | return response.get('ContainerFleet', []) 105 | 106 | 107 | @mcp.tool() 108 | async def get_compute_auth_token(fleet_id: str, region: str = os.environ.get("AWS_REGION"), compute_name: str = '') -> str: 109 | """Get compute auth token by fleet id and compute name 110 | 111 | Args: 112 | fleet_id: Gamelift fleet id 113 | compute_name: compute name 114 | """ 115 | client = get_gamelift_client(region) 116 | 117 | # 先获取fleet属性,判断是否为ANYWHERE Fleet 118 | attr_response = client.describe_fleet_attributes(FleetIds=[fleet_id]) 119 | attrs = attr_response.get('FleetAttributes', []) 120 | if not attrs or attrs[0].get('ComputeType') != 'ANYWHERE': 121 | raise Exception('Only ANYWHERE Fleets support compute auth token.') 122 | 123 | response = client.get_compute_auth_token(FleetId=fleet_id, ComputeName=compute_name) 124 | return response.get('AuthToken', '') 125 | 126 | 127 | @mcp.tool() 128 | async def get_vpc_peering_connections(fleet_id: str, region: str = os.environ.get("AWS_REGION")) -> str: 129 | """Get vpc peering connections by fleet id 130 | 131 | Args: 132 | fleet_id: Gamelift fleet id 133 | """ 134 | client = get_gamelift_client(region) 135 | 136 | connections = [] 137 | next_token = None 138 | while True: 139 | if next_token: 140 | response = client.describe_vpc_peering_connections(FleetId=fleet_id, NextToken=next_token) 141 | else: 142 | response = client.describe_vpc_peering_connections(FleetId=fleet_id) 143 | connections.extend(response.get('VpcPeeringConnections', [])) 144 | next_token = response.get('NextToken') 145 | if not next_token: 146 | break 147 | return connections 148 | 149 | 150 | @mcp.tool() 151 | async def get_builds(region: str = os.environ.get("AWS_REGION")) -> str: 152 | """Get builds by region 153 | 154 | Args: 155 | region: AWS region name, e.g. us-east-1, if not provided, use us-east-1 as default 156 | """ 157 | client = get_gamelift_client(region) 158 | 159 | builds = [] 160 | next_token = None 161 | while True: 162 | if next_token: 163 | response = client.list_builds(NextToken=next_token) 164 | else: 165 | response = client.list_builds() 166 | builds.extend(response.get('Builds', [])) 167 | next_token = response.get('NextToken') 168 | if not next_token: 169 | break 170 | return builds 171 | 172 | 173 | @mcp.tool() 174 | async def get_fleet_capacity(fleet_id_list: List[str], region: str = os.environ.get("AWS_REGION")) -> str: 175 | """Get fleet capacity by fleet id 176 | 177 | Args: 178 | fleet_id: Gamelift fleet id 179 | """ 180 | client = get_gamelift_client(region) 181 | 182 | # check fleet is not a ANYWHERE Fleet 183 | for fleet_id in fleet_id_list: 184 | attr_response = client.describe_fleet_attributes(FleetIds=[fleet_id]) 185 | attrs = attr_response.get('FleetAttributes', []) 186 | if not attrs or attrs[0].get('ComputeType') == 'ANYWHERE': 187 | raise Exception('ANYWHERE Fleets do not support fleet capacity.') 188 | 189 | builds = [] 190 | next_token = None 191 | while True: 192 | if next_token: 193 | response = client.describe_fleet_capacity(FleetIds=fleet_id_list, NextToken=next_token) 194 | else: 195 | response = client.describe_fleet_capacity(FleetIds=fleet_id_list) 196 | builds.extend(response.get('FleetCapacity', [])) 197 | next_token = response.get('NextToken') 198 | if not next_token: 199 | break 200 | return builds 201 | 202 | 203 | # start MCP Server 204 | if __name__ == "__main__": 205 | mcp.run() 206 | -------------------------------------------------------------------------------- /html_render_service/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-mcp-servers-samples/69c58ab6ee4ca135ebb71ba90f8753e1224497d5/html_render_service/.DS_Store -------------------------------------------------------------------------------- /html_render_service/.gitignore: -------------------------------------------------------------------------------- 1 | # Python-generated files 2 | __pycache__/ 3 | *.py[oc] 4 | build/ 5 | dist/ 6 | wheels/ 7 | *.egg-info 8 | 9 | # Virtual environments 10 | .venv 11 | data 12 | files -------------------------------------------------------------------------------- /html_render_service/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /html_render_service/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Osanda Deshan Nimalarathna 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /html_render_service/README.md: -------------------------------------------------------------------------------- 1 | ## Introduction 2 | MCP server 用于将Agent生成的html或者markdown内容渲染成Web Page,并且可以通过浏览器直接访问 3 | 4 | ## 部署方法 5 | 6 | ### Step 1:使用 Docker Compose 启动Flask Web Server 7 | 8 | 1. 启动服务: 9 | 10 | ```bash 11 | cd aws-mcp-servers-samples/html_render_service/web 12 | docker-compose up -d 13 | ``` 14 | 15 | 2. 验证是否启动成功: 16 | 17 | ```bash 18 | curl http://127.0.0.1:5006/ 19 | ``` 20 | 21 | 如果返回: 22 | ``` 23 | { 24 | "message": "ok" 25 | } 26 | ``` 27 | 表示启动成功 28 | 29 | 30 | 3. 可选:停止服务命令: 31 | ```bash 32 | docker-compose down 33 | ``` 34 | 35 | ### Step 2:在MCP Client中添加如下: 36 | ```json 37 | {"mcpServers": 38 | { "html_render_service": 39 | { "command": "uv", 40 | "args": 41 | ["--directory","/path/to/html_render_service/src", 42 | "run", 43 | "server.py"] 44 | } 45 | } 46 | } 47 | ``` 48 | 49 | ## 示例 50 | 51 | **输入:** 52 | 53 | ``` 54 | 请帮我制定从北京到上海的高铁5日游计划(5月1日-5日),要求: 55 | - 交通:往返高铁选早上出发(5.1)和晚上返程(5.5) 56 | - 必去:迪士尼全天(推荐3个最值得玩的项目+看烟花) 57 | - 推荐:3个上海经典景点(含外滩夜景)和1个特色街区 58 | - 住宿:迪士尼住周边酒店,市区住地铁站附近 59 | - 附:每日大致花费预估和景点预约提醒 60 | 需要制作成精美的 HTML,并使用html render service上传html 61 | ``` 62 | 63 | - 从结果中找到链接 64 | ![](asset/case_3_1.png) 65 | 66 | - 用浏览器打开链接 67 | ![](asset/case_3_2.png) 68 | -------------------------------------------------------------------------------- /html_render_service/asset/case_3_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-mcp-servers-samples/69c58ab6ee4ca135ebb71ba90f8753e1224497d5/html_render_service/asset/case_3_1.png -------------------------------------------------------------------------------- /html_render_service/asset/case_3_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-mcp-servers-samples/69c58ab6ee4ca135ebb71ba90f8753e1224497d5/html_render_service/asset/case_3_2.png -------------------------------------------------------------------------------- /html_render_service/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "flask-webserver" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "mcp[cli]>=1.6.0", 9 | "requests>=2.32.3", 10 | ] 11 | -------------------------------------------------------------------------------- /html_render_service/src/server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Flask Web Service Server 4 | It is a web server tool to generate and render web page from Markdown files and html files. 5 | """ 6 | import os 7 | import asyncio 8 | from dataclasses import dataclass 9 | from contextlib import asynccontextmanager 10 | from typing import AsyncIterator, Optional, Dict, Any, List 11 | import io 12 | import json 13 | import requests 14 | from mcp.server.fastmcp import FastMCP, Image, Context 15 | 16 | ENDPOINT = os.environ.get("endpoint","http://127.0.0.1:5006") 17 | 18 | @dataclass 19 | class AppContext: 20 | """Application context for lifespan management""" 21 | ready_status: bool 22 | 23 | @asynccontextmanager 24 | async def app_lifespan(server: FastMCP) -> AsyncIterator[AppContext]: 25 | """ 26 | Application lifespan management context manager. 27 | 28 | Args: 29 | server: FastMCP server instance 30 | 31 | Yields: 32 | AppContext: Application context with ready_status 33 | """ 34 | try: 35 | response = requests.get(f"{ENDPOINT}/") 36 | response.raise_for_status() 37 | yield AppContext(ready_status=True) 38 | 39 | except requests.exceptions.RequestException as e: 40 | raise ValueError(f"Error connecting to server: {e}") 41 | 42 | # Create MCP server 43 | mcp = FastMCP( 44 | "Flask Web Service Server", 45 | app_lifespan=app_lifespan, 46 | dependencies=['requests']) 47 | 48 | @mcp.tool() 49 | async def render_markdown(file_name:str, markdown_content: str) -> str: 50 | """ 51 | uploads markdown it to a server. 52 | 53 | Args: 54 | file_name: Name of the markdown file to be rendered 55 | markdown_content: Markdown content to be rendered 56 | 57 | Returns: 58 | URL string to access the rendered HTML page 59 | """ 60 | try: 61 | response = requests.post(f"{ENDPOINT}/upload_markdown", json={"file_name": file_name, "file_content": markdown_content}) 62 | response.raise_for_status() 63 | return response.json()['url'] 64 | except requests.exceptions.RequestException as e: 65 | raise ValueError(f"Error uploading HTML file: {e}") 66 | 67 | @mcp.tool() 68 | async def render_html(file_name:str, html_content: str) -> str: 69 | """ 70 | Upload the HTML content to a server. 71 | 72 | Args: 73 | file_name: Name of the HTML file to be rendered 74 | html_content: HTML content to be rendered 75 | 76 | Returns: 77 | URL string to access the rendered HTML page 78 | """ 79 | try: 80 | response = requests.post(f"{ENDPOINT}/upload_html", json={"file_name": file_name, "file_content": html_content}) 81 | response.raise_for_status() 82 | return response.json()['url'] 83 | except requests.exceptions.RequestException as e: 84 | raise ValueError(f"Error uploading HTML file: {e}") 85 | 86 | 87 | if __name__ == "__main__": 88 | # print(render_markdown("test.md","## abcd")) 89 | # print(render_html("test2.html","abcd2")) 90 | mcp.run() 91 | -------------------------------------------------------------------------------- /html_render_service/web/Dockerfile: -------------------------------------------------------------------------------- 1 | # 使用官方的 Python 镜像作为基础镜像 2 | FROM python:3.11-slim 3 | 4 | # 设置工作目录 5 | WORKDIR /app 6 | 7 | # 复制 requirements.txt 并安装依赖 8 | COPY requirements.txt . 9 | RUN pip install -r requirements.txt 10 | 11 | # 复制当前目录下的所有文件到工作目录 12 | COPY . . 13 | 14 | # 设置容器启动时执行的命令 15 | CMD ["python", "main.py"] -------------------------------------------------------------------------------- /html_render_service/web/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-mcp-servers-samples/69c58ab6ee4ca135ebb71ba90f8753e1224497d5/html_render_service/web/app/__init__.py -------------------------------------------------------------------------------- /html_render_service/web/app/extensions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-mcp-servers-samples/69c58ab6ee4ca135ebb71ba90f8753e1224497d5/html_render_service/web/app/extensions/__init__.py -------------------------------------------------------------------------------- /html_render_service/web/app/extensions/checkbox.py: -------------------------------------------------------------------------------- 1 | # stolen from: https://github.com/FND/markdown-checklist/blob/master/markdown_checklist/extension.py 2 | import re 3 | 4 | from markdown.extensions import Extension 5 | from markdown.preprocessors import Preprocessor 6 | from markdown.postprocessors import Postprocessor 7 | 8 | 9 | def makeExtension(configs=None): 10 | if configs is None: 11 | return ChecklistExtension() 12 | else: 13 | return ChecklistExtension(configs=configs) 14 | 15 | 16 | class ChecklistExtension(Extension): 17 | 18 | def __init__(self, **kwargs): 19 | self.config = { 20 | "list_class": ["checklist", 21 | "class name to add to the list element"], 22 | "render_item": [render_item, "custom function to render items"] 23 | } 24 | super().__init__(**kwargs) 25 | 26 | def extendMarkdown(self, md, md_globals): 27 | list_class = self.getConfig("list_class") 28 | renderer = self.getConfig("render_item") 29 | postprocessor = ChecklistPostprocessor(list_class, renderer, md) 30 | md.postprocessors.add("checklist", postprocessor, ">raw_html") 31 | 32 | 33 | class ChecklistPostprocessor(Postprocessor): 34 | """ 35 | adds checklist class to list element 36 | """ 37 | 38 | list_pattern = re.compile(r"(