├── src ├── index.ts └── sysoperator │ ├── common │ ├── version.ts │ └── errors.ts │ └── operations │ ├── inventory.ts │ ├── ad_hoc.ts │ ├── terraform.README.md │ ├── playbooks.ts │ └── vault.ts ├── localstack ├── inventory.ini ├── tsconfig.json ├── config.js ├── run_sample_playbook.mjs ├── localstack_test.js ├── sample_playbook.yml ├── README.md ├── localstack_test.mjs ├── restore_original.mjs ├── mcp_localstack_patch.js └── test_mcp_integration.mjs ├── demos ├── aws-lamp-stack │ ├── .security_ids.yml │ ├── .vpc_ids.yml │ ├── roles │ │ ├── web │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ ├── main.yml │ │ │ │ └── deploy_app.yml │ │ ├── common │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── efs │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── db │ │ │ └── tasks │ │ │ └── main.yml │ ├── ansible.cfg │ ├── inventory │ │ ├── localstack_hosts.yml │ │ └── aws_hosts.yml │ ├── playbooks │ │ └── main.yml │ ├── cleanup_aws.sh │ ├── group_vars │ │ ├── aws.yml │ │ ├── localstack.yml │ │ └── all.yml │ ├── deploy_to_aws.sh │ ├── LOCALSTACK_COMPATIBILITY.md │ └── test_with_localstack.sh └── aws-terraform-lamp │ ├── ansible │ ├── roles │ │ ├── db_client │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── templates │ │ │ │ ├── db-credentials.cnf.j2 │ │ │ │ ├── logrotate-db.j2 │ │ │ │ ├── my.cnf.j2 │ │ │ │ ├── db-env.j2 │ │ │ │ └── cloudwatch-db.json.j2 │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── efs_client │ │ │ └── templates │ │ │ │ └── efs-selinux.te.j2 │ │ ├── common │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ ├── sshd_config.j2 │ │ │ │ └── ntp.conf.j2 │ │ └── web │ │ │ ├── handlers │ │ │ └── main.yml │ │ │ └── templates │ │ │ └── info.php.j2 │ ├── ansible.cfg │ └── inventory │ │ └── localstack.yml │ ├── terraform │ ├── modules │ │ ├── storage │ │ │ ├── outputs.tf │ │ │ ├── variables.tf │ │ │ └── main.tf │ │ ├── loadbalancing │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ ├── security │ │ │ ├── variables.tf │ │ │ └── outputs.tf │ │ ├── waf │ │ │ ├── variables.tf │ │ │ └── outputs.tf │ │ ├── dns │ │ │ ├── variables.tf │ │ │ ├── outputs.tf │ │ │ └── main.tf │ │ ├── compute │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ ├── database │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ ├── networking │ │ │ ├── variables.tf │ │ │ └── outputs.tf │ │ └── monitoring │ │ │ ├── variables.tf │ │ │ └── outputs.tf │ └── terraform.tfvars │ ├── LICENSE │ ├── .gitignore │ └── LOCALSTACK_COMPATIBILITY.md ├── examples ├── cline_mcp_settings.json ├── inventory.ini ├── ansible.cfg ├── templates │ ├── nginx.conf.j2 │ └── haproxy.cfg.j2 ├── example-script.js └── playbook.yml ├── tsconfig.json ├── .gitignore ├── .dockerignore ├── docker-compose.yml ├── package.json ├── LICENSE ├── aws_inventory.yml ├── docker-test.js ├── Dockerfile ├── cloudformation_example.yml ├── cloudformation_template.json ├── docker-build-run.sh ├── AWS_README.md ├── docker-README.md └── aws_example.yml /src/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | import './sysoperator/index.js'; 3 | -------------------------------------------------------------------------------- /src/sysoperator/common/version.ts: -------------------------------------------------------------------------------- 1 | export const VERSION = '0.1.0'; 2 | -------------------------------------------------------------------------------- /localstack/inventory.ini: -------------------------------------------------------------------------------- 1 | [local] 2 | localhost ansible_connection=local 3 | 4 | [aws_local] 5 | localhost ansible_connection=local 6 | 7 | [aws_local:vars] 8 | ansible_python_interpreter=python3 9 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/.security_ids.yml: -------------------------------------------------------------------------------- 1 | alb_sg_id: sg-alb 2 | web_sg_id: sg-web 3 | db_sg_id: sg-db 4 | efs_sg_id: sg-efs 5 | ec2_role_name: lamp-stack-ec2-role 6 | ec2_instance_profile_name: lamp-stack-ec2-profile 7 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/.vpc_ids.yml: -------------------------------------------------------------------------------- 1 | vpc_id: vpc-12345678 2 | public_subnet_1_id: subnet-pub1 3 | public_subnet_2_id: subnet-pub2 4 | private_subnet_1_id: subnet-priv1 5 | private_subnet_2_id: subnet-priv2 6 | private_subnet_3_id: subnet-priv3 7 | private_subnet_4_id: subnet-priv4 8 | -------------------------------------------------------------------------------- /examples/cline_mcp_settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "sysoperator": { 4 | "command": "node", 5 | "args": ["/absolute/path/to/mcp-sysoperator/build/index.js"], 6 | "env": {}, 7 | "disabled": false, 8 | "autoApprove": [] 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/roles/web/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Web role handlers 3 | 4 | - name: Restart Apache 5 | service: 6 | name: httpd 7 | state: restarted 8 | when: environment != 'localstack' 9 | 10 | - name: Restart PHP-FPM 11 | service: 12 | name: php-fpm 13 | state: restarted 14 | when: environment != 'localstack' 15 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Common role handlers 3 | 4 | - name: Restart SSH 5 | service: 6 | name: sshd 7 | state: restarted 8 | when: environment != 'localstack' 9 | 10 | - name: Restart firewalld 11 | service: 12 | name: firewalld 13 | state: restarted 14 | when: environment != 'localstack' 15 | -------------------------------------------------------------------------------- /localstack/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig.json", 3 | "compilerOptions": { 4 | "rootDir": "..", 5 | "outDir": "../build-localstack", 6 | "types": ["node"], 7 | "target": "ES2020", 8 | "lib": ["ES2020", "DOM"], 9 | "module": "NodeNext", 10 | "moduleResolution": "NodeNext" 11 | }, 12 | "include": [ 13 | "./**/*" 14 | ], 15 | "exclude": ["node_modules"] 16 | } 17 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2020", 4 | "module": "NodeNext", 5 | "moduleResolution": "NodeNext", 6 | "esModuleInterop": true, 7 | "outDir": "./build", 8 | "rootDir": "./src", 9 | "strict": true, 10 | "declaration": true, 11 | "skipLibCheck": true, 12 | "resolveJsonModule": true 13 | }, 14 | "include": ["src/**/*"], 15 | "exclude": ["node_modules", "build"] 16 | } 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependency directories 2 | node_modules/ 3 | npm-debug.log 4 | yarn-debug.log 5 | yarn-error.log 6 | 7 | # Build artifacts 8 | build/ 9 | dist/ 10 | *.tsbuildinfo 11 | 12 | # Environment variables 13 | .env 14 | 15 | # Logs 16 | logs 17 | *.log 18 | ansible.log 19 | 20 | # Operating System Files 21 | .DS_Store 22 | .DS_Store? 23 | ._* 24 | .Spotlight-V100 25 | .Trashes 26 | ehthumbs.db 27 | Thumbs.db 28 | 29 | # Editor directories and files 30 | .idea 31 | .vscode 32 | *.swp 33 | *.swo 34 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Version control 2 | .git 3 | .gitignore 4 | 5 | # Node.js 6 | node_modules 7 | npm-debug.log 8 | 9 | # Build artifacts 10 | build 11 | 12 | # Docker files 13 | Dockerfile 14 | docker-compose.yml 15 | docker-README.md 16 | docker-build-run.sh 17 | .dockerignore 18 | 19 | # AWS example files 20 | aws_example.yml 21 | aws_inventory.yml 22 | cloudformation_example.yml 23 | cloudformation_template.json 24 | 25 | # AWS examples 26 | demos/aws-lamp-stack/ 27 | demos/aws-terraform-lamp/ 28 | 29 | # Examples 30 | examples/ 31 | 32 | # LocalStack 33 | localstack/ 34 | 35 | # Misc 36 | .DS_Store 37 | *.log 38 | *.swp 39 | *.swo 40 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/db_client/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # DB Client Role Handlers 3 | 4 | - name: restart cloudwatch agent 5 | service: 6 | name: amazon-cloudwatch-agent 7 | state: restarted 8 | enabled: yes 9 | become: true 10 | 11 | - name: reload systemd 12 | command: systemctl daemon-reload 13 | become: true 14 | 15 | - name: reload profile 16 | shell: source /etc/profile 17 | args: 18 | executable: /bin/bash 19 | become: true 20 | 21 | - name: reload cron 22 | service: 23 | name: "{{ 'cron' if ansible_os_family == 'Debian' else 'crond' }}" 24 | state: reloaded 25 | become: true 26 | -------------------------------------------------------------------------------- /examples/inventory.ini: -------------------------------------------------------------------------------- 1 | [webservers] 2 | web1.example.com ansible_host=192.168.1.101 3 | web2.example.com ansible_host=192.168.1.102 4 | 5 | [dbservers] 6 | db1.example.com ansible_host=192.168.1.201 7 | db2.example.com ansible_host=192.168.1.202 8 | 9 | [loadbalancers] 10 | lb1.example.com ansible_host=192.168.1.10 11 | 12 | [monitoring] 13 | monitor.example.com ansible_host=192.168.1.50 14 | 15 | [webservers:vars] 16 | http_port=80 17 | https_port=443 18 | ansible_user=webadmin 19 | 20 | [dbservers:vars] 21 | postgres_port=5432 22 | ansible_user=dbadmin 23 | 24 | [all:vars] 25 | ansible_ssh_private_key_file=~/.ssh/id_rsa 26 | ansible_python_interpreter=/usr/bin/python3 27 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | sysoperator-mcp: 5 | build: 6 | context: . 7 | dockerfile: Dockerfile 8 | image: sysoperator-mcp 9 | stdin_open: true # Equivalent to -i in docker run 10 | tty: false # We don't need a TTY for the MCP server 11 | volumes: 12 | # Uncomment and modify these volume mounts as needed 13 | # - ./playbooks:/playbooks 14 | # - ~/.aws:/root/.aws 15 | # - ./terraform:/terraform 16 | # environment: 17 | # Uncomment and set these environment variables as needed 18 | # - AWS_ACCESS_KEY_ID=your_key 19 | # - AWS_SECRET_ACCESS_KEY=your_secret 20 | # - AWS_DEFAULT_REGION=us-west-2 21 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = inventory 3 | roles_path = roles 4 | library = library 5 | host_key_checking = False 6 | retry_files_enabled = False 7 | stdout_callback = yaml 8 | bin_ansible_callbacks = True 9 | deprecation_warnings = False 10 | command_warnings = False 11 | interpreter_python = auto_silent 12 | 13 | [ssh_connection] 14 | pipelining = True 15 | control_path = /tmp/ansible-ssh-%%h-%%p-%%r 16 | ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no 17 | 18 | [inventory] 19 | enable_plugins = host_list, script, auto, yaml, ini, toml 20 | 21 | [privilege_escalation] 22 | become = False 23 | become_method = sudo 24 | become_user = root 25 | become_ask_pass = False 26 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mcp-sysoperator", 3 | "version": "0.1.0", 4 | "description": "Model Context Protocol server for Infrastructure as Code operations (Ansible, Terraform)", 5 | "main": "build/index.js", 6 | "type": "module", 7 | "scripts": { 8 | "build": "tsc && node -e \"require('fs').chmodSync('build/index.js', '755')\"", 9 | "start": "node build/index.js", 10 | "dev": "tsc -w" 11 | }, 12 | "author": "", 13 | "license": "MIT", 14 | "dependencies": { 15 | "@modelcontextprotocol/sdk": "latest", 16 | "axios": "^1.6.2", 17 | "zod": "^3.22.4", 18 | "zod-to-json-schema": "^3.22.3" 19 | }, 20 | "devDependencies": { 21 | "@types/node": "^20.17.42", 22 | "typescript": "^5.3.2" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/efs_client/templates/efs-selinux.te.j2: -------------------------------------------------------------------------------- 1 | module efs 1.0; 2 | 3 | require { 4 | type httpd_t; 5 | type nfs_t; 6 | type unconfined_t; 7 | class file { read write getattr open create unlink rename setattr }; 8 | class dir { read write getattr open create rmdir search add_name remove_name }; 9 | } 10 | 11 | #============= httpd_t ============== 12 | allow httpd_t nfs_t:dir { read write getattr open create rmdir search add_name remove_name }; 13 | allow httpd_t nfs_t:file { read write getattr open create unlink rename setattr }; 14 | 15 | #============= unconfined_t ============== 16 | allow unconfined_t nfs_t:dir { read write getattr open create rmdir search add_name remove_name }; 17 | allow unconfined_t nfs_t:file { read write getattr open create unlink rename setattr }; 18 | -------------------------------------------------------------------------------- /examples/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = ./inventory.ini 3 | host_key_checking = False 4 | retry_files_enabled = False 5 | roles_path = ./roles 6 | log_path = ./ansible.log 7 | forks = 5 8 | timeout = 30 9 | gathering = smart 10 | display_skipped_hosts = True 11 | command_warnings = False 12 | 13 | [privilege_escalation] 14 | become = True 15 | become_method = sudo 16 | become_user = root 17 | become_ask_pass = False 18 | 19 | [ssh_connection] 20 | ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null 21 | pipelining = True 22 | control_path = /tmp/ansible-ssh-%%h-%%p-%%r 23 | 24 | [colors] 25 | highlight = white 26 | verbose = blue 27 | warn = bright purple 28 | error = red 29 | debug = dark gray 30 | deprecate = purple 31 | skip = cyan 32 | unreachable = red 33 | ok = green 34 | changed = yellow 35 | diff_add = green 36 | diff_remove = red 37 | diff_lines = cyan 38 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/db_client/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: db_client 4 | author: DevOps Team 5 | description: Installs and configures MySQL client and related tools for LAMP stack 6 | company: LAMP Stack AWS 7 | license: MIT 8 | min_ansible_version: 2.9 9 | platforms: 10 | - name: Ubuntu 11 | versions: 12 | - bionic 13 | - focal 14 | - jammy 15 | - name: Debian 16 | versions: 17 | - stretch 18 | - buster 19 | - bullseye 20 | - name: Amazon 21 | versions: 22 | - 2 23 | - 2023 24 | - name: EL 25 | versions: 26 | - 7 27 | - 8 28 | - 9 29 | galaxy_tags: 30 | - database 31 | - mysql 32 | - mariadb 33 | - aws 34 | - rds 35 | - lamp 36 | - web 37 | - monitoring 38 | - backup 39 | 40 | dependencies: 41 | - role: common 42 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/storage/outputs.tf: -------------------------------------------------------------------------------- 1 | # Storage Module Outputs 2 | 3 | output "efs_id" { 4 | description = "ID of the EFS file system" 5 | value = aws_efs_file_system.main.id 6 | } 7 | 8 | output "efs_arn" { 9 | description = "ARN of the EFS file system" 10 | value = aws_efs_file_system.main.arn 11 | } 12 | 13 | output "efs_dns_name" { 14 | description = "DNS name of the EFS file system" 15 | value = aws_efs_file_system.main.dns_name 16 | } 17 | 18 | output "efs_mount_targets" { 19 | description = "Mount targets of the EFS file system" 20 | value = aws_efs_mount_target.main[*].id 21 | } 22 | 23 | output "efs_access_point_id" { 24 | description = "ID of the EFS access point" 25 | value = aws_efs_access_point.main.id 26 | } 27 | 28 | output "efs_access_point_arn" { 29 | description = "ARN of the EFS access point" 30 | value = aws_efs_access_point.main.arn 31 | } 32 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/inventory/localstack_hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # LocalStack Inventory for LAMP Stack Testing 3 | 4 | all: 5 | vars: 6 | ansible_connection: local 7 | domain_name: testerlab.com 8 | environment: localstack 9 | aws_cli_command: awslocal 10 | aws_region: us-east-1 11 | 12 | children: 13 | webservers: 14 | hosts: 15 | web1: 16 | ansible_host: localhost 17 | web2: 18 | ansible_host: localhost 19 | 20 | dbservers: 21 | hosts: 22 | db1: 23 | ansible_host: localhost 24 | db2: 25 | ansible_host: localhost 26 | 27 | loadbalancers: 28 | hosts: 29 | alb: 30 | ansible_host: localhost 31 | 32 | efs: 33 | hosts: 34 | efs1: 35 | ansible_host: localhost 36 | 37 | localhost: 38 | hosts: 39 | localhost: 40 | ansible_connection: local 41 | -------------------------------------------------------------------------------- /examples/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | server { 2 | listen {{ http_port }} default_server; 3 | listen [::]:{{ http_port }} default_server; 4 | 5 | {% if https_port is defined %} 6 | listen {{ https_port }} ssl; 7 | listen [::]:{{ https_port }} ssl; 8 | {% endif %} 9 | 10 | server_name {{ ansible_hostname }}; 11 | 12 | root {{ deploy_dir }}; 13 | index index.html index.htm; 14 | 15 | location / { 16 | try_files $uri $uri/ =404; 17 | } 18 | 19 | location /api { 20 | proxy_pass http://localhost:8000; 21 | proxy_http_version 1.1; 22 | proxy_set_header Upgrade $http_upgrade; 23 | proxy_set_header Connection 'upgrade'; 24 | proxy_set_header Host $host; 25 | proxy_cache_bypass $http_upgrade; 26 | } 27 | 28 | # Additional configuration 29 | access_log /var/log/nginx/{{ app_name }}-access.log; 30 | error_log /var/log/nginx/{{ app_name }}-error.log; 31 | } 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 jascha 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/storage/variables.tf: -------------------------------------------------------------------------------- 1 | # Storage Module Variables 2 | 3 | variable "project_name" { 4 | description = "Name of the project" 5 | type = string 6 | } 7 | 8 | variable "environment" { 9 | description = "Environment (dev, staging, prod)" 10 | type = string 11 | } 12 | 13 | variable "vpc_id" { 14 | description = "ID of the VPC" 15 | type = string 16 | } 17 | 18 | variable "subnet_ids" { 19 | description = "List of subnet IDs for EFS mount targets" 20 | type = list(string) 21 | } 22 | 23 | variable "security_group_id" { 24 | description = "ID of the security group for EFS" 25 | type = string 26 | } 27 | 28 | variable "performance_mode" { 29 | description = "EFS performance mode" 30 | type = string 31 | default = "generalPurpose" 32 | } 33 | 34 | variable "throughput_mode" { 35 | description = "EFS throughput mode" 36 | type = string 37 | default = "bursting" 38 | } 39 | 40 | variable "tags" { 41 | description = "A map of tags to add to all resources" 42 | type = map(string) 43 | default = {} 44 | } 45 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/loadbalancing/outputs.tf: -------------------------------------------------------------------------------- 1 | # Load Balancing Module Outputs 2 | 3 | output "alb_id" { 4 | description = "ID of the ALB" 5 | value = aws_lb.main.id 6 | } 7 | 8 | output "alb_arn" { 9 | description = "ARN of the ALB" 10 | value = aws_lb.main.arn 11 | } 12 | 13 | output "alb_dns_name" { 14 | description = "DNS name of the ALB" 15 | value = aws_lb.main.dns_name 16 | } 17 | 18 | output "alb_zone_id" { 19 | description = "Zone ID of the ALB" 20 | value = aws_lb.main.zone_id 21 | } 22 | 23 | output "target_group_arn" { 24 | description = "ARN of the target group" 25 | value = aws_lb_target_group.main.arn 26 | } 27 | 28 | output "target_group_name" { 29 | description = "Name of the target group" 30 | value = aws_lb_target_group.main.name 31 | } 32 | 33 | output "http_listener_arn" { 34 | description = "ARN of the HTTP listener" 35 | value = aws_lb_listener.http.arn 36 | } 37 | 38 | output "https_listener_arn" { 39 | description = "ARN of the HTTPS listener" 40 | value = length(aws_lb_listener.https) > 0 ? aws_lb_listener.https[0].arn : null 41 | } 42 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 LAMP Stack on AWS Project 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/security/variables.tf: -------------------------------------------------------------------------------- 1 | # Security Module Variables 2 | 3 | variable "project_name" { 4 | description = "Name of the project" 5 | type = string 6 | } 7 | 8 | variable "environment" { 9 | description = "Environment (dev, staging, prod)" 10 | type = string 11 | } 12 | 13 | variable "vpc_id" { 14 | description = "ID of the VPC" 15 | type = string 16 | } 17 | 18 | variable "vpc_cidr" { 19 | description = "CIDR block of the VPC" 20 | type = string 21 | default = "10.0.0.0/16" 22 | } 23 | 24 | variable "public_subnet_ids" { 25 | description = "List of public subnet IDs" 26 | type = list(string) 27 | default = [] 28 | } 29 | 30 | variable "private_subnet_ids" { 31 | description = "List of private subnet IDs" 32 | type = list(string) 33 | default = [] 34 | } 35 | 36 | variable "database_subnet_ids" { 37 | description = "List of database subnet IDs" 38 | type = list(string) 39 | default = [] 40 | } 41 | 42 | variable "tags" { 43 | description = "A map of tags to add to all resources" 44 | type = map(string) 45 | default = {} 46 | } 47 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/waf/variables.tf: -------------------------------------------------------------------------------- 1 | # WAF Module Variables 2 | 3 | variable "project_name" { 4 | description = "Name of the project" 5 | type = string 6 | } 7 | 8 | variable "environment" { 9 | description = "Environment (dev, staging, prod)" 10 | type = string 11 | } 12 | 13 | variable "alb_arn" { 14 | description = "ARN of the ALB to associate with the WAF" 15 | type = string 16 | } 17 | 18 | variable "allowed_ips" { 19 | description = "List of allowed IP addresses in CIDR notation" 20 | type = list(string) 21 | default = [] 22 | } 23 | 24 | variable "blocked_ips" { 25 | description = "List of blocked IP addresses in CIDR notation" 26 | type = list(string) 27 | default = [] 28 | } 29 | 30 | variable "rate_limit" { 31 | description = "Rate limit for requests from a single IP" 32 | type = number 33 | default = 1000 34 | } 35 | 36 | variable "enable_logging" { 37 | description = "Whether to enable logging for the WAF" 38 | type = bool 39 | default = true 40 | } 41 | 42 | variable "tags" { 43 | description = "A map of tags to add to all resources" 44 | type = map(string) 45 | default = {} 46 | } 47 | -------------------------------------------------------------------------------- /examples/templates/haproxy.cfg.j2: -------------------------------------------------------------------------------- 1 | global 2 | log /dev/log local0 3 | log /dev/log local1 notice 4 | chroot /var/lib/haproxy 5 | stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners 6 | stats timeout 30s 7 | user haproxy 8 | group haproxy 9 | daemon 10 | 11 | defaults 12 | log global 13 | mode http 14 | option httplog 15 | option dontlognull 16 | timeout connect 5000 17 | timeout client 50000 18 | timeout server 50000 19 | errorfile 400 /etc/haproxy/errors/400.http 20 | errorfile 403 /etc/haproxy/errors/403.http 21 | errorfile 408 /etc/haproxy/errors/408.http 22 | errorfile 500 /etc/haproxy/errors/500.http 23 | errorfile 502 /etc/haproxy/errors/502.http 24 | errorfile 503 /etc/haproxy/errors/503.http 25 | errorfile 504 /etc/haproxy/errors/504.http 26 | 27 | frontend http_front 28 | bind *:80 29 | stats uri /haproxy?stats 30 | default_backend webservers 31 | 32 | backend webservers 33 | balance roundrobin 34 | option httpchk GET / 35 | http-check expect status 200 36 | {% for host in groups['webservers'] %} 37 | server {{ host }} {{ hostvars[host]['ansible_host'] }}:80 check 38 | {% endfor %} 39 | -------------------------------------------------------------------------------- /localstack/config.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Configuration for LocalStack integration 3 | * This file provides configuration options for using LocalStack with the MCP Ansible server 4 | */ 5 | 6 | module.exports = { 7 | /** 8 | * Whether to use LocalStack instead of real AWS 9 | * Set to true to use LocalStack, false to use real AWS 10 | */ 11 | useLocalStack: true, 12 | 13 | /** 14 | * LocalStack endpoint URL 15 | * Default: http://localhost:4566 16 | */ 17 | localStackEndpoint: 'http://localhost:4566', 18 | 19 | /** 20 | * AWS region to use with LocalStack 21 | * Default: us-east-1 22 | */ 23 | region: 'us-east-1', 24 | 25 | /** 26 | * Whether to verify SSL certificates when connecting to LocalStack 27 | * Default: false 28 | */ 29 | verifySSL: false, 30 | 31 | /** 32 | * Whether to log commands executed with LocalStack 33 | * Default: true 34 | */ 35 | logCommands: true, 36 | 37 | /** 38 | * Path to the awslocal CLI 39 | * Default: awslocal (assumes it's in PATH) 40 | */ 41 | awslocalPath: 'awslocal', 42 | 43 | /** 44 | * Path to the localstack CLI 45 | * Default: localstack (assumes it's in PATH) 46 | */ 47 | localstackPath: 'localstack' 48 | }; 49 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/playbooks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Main playbook 3 | # This playbook orchestrates the entire LAMP stack deployment 4 | 5 | - name: Set environment variable 6 | hosts: localhost 7 | gather_facts: false 8 | tasks: 9 | - name: Set environment fact 10 | set_fact: 11 | environment: "{{ lookup('env', 'ENVIRONMENT') | default('localstack', true) }}" 12 | register: env_result 13 | 14 | - name: Display environment 15 | debug: 16 | msg: "Deploying to {{ environment }} environment" 17 | 18 | # Infrastructure playbooks 19 | - import_playbook: vpc.yml 20 | - import_playbook: security.yml 21 | - import_playbook: efs.yml 22 | vars: 23 | any_errors_fatal: false 24 | - import_playbook: rds.yml 25 | vars: 26 | any_errors_fatal: false 27 | - import_playbook: ec2.yml 28 | vars: 29 | any_errors_fatal: false 30 | - import_playbook: loadbalancer.yml 31 | vars: 32 | any_errors_fatal: false 33 | - import_playbook: dns_ssl.yml 34 | vars: 35 | any_errors_fatal: false 36 | 37 | # Configuration playbooks 38 | - name: Configure web servers 39 | hosts: web_servers 40 | become: true 41 | roles: 42 | - common 43 | - efs 44 | - web 45 | - db 46 | vars: 47 | deploy_app: true 48 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/dns/variables.tf: -------------------------------------------------------------------------------- 1 | # DNS Module Variables 2 | 3 | variable "project_name" { 4 | description = "Name of the project" 5 | type = string 6 | } 7 | 8 | variable "environment" { 9 | description = "Environment (dev, staging, prod)" 10 | type = string 11 | } 12 | 13 | variable "domain_name" { 14 | description = "Domain name for the application" 15 | type = string 16 | default = "" 17 | } 18 | 19 | variable "subdomain" { 20 | description = "Subdomain for the application" 21 | type = string 22 | default = "" 23 | } 24 | 25 | variable "alb_dns_name" { 26 | description = "DNS name of the ALB" 27 | type = string 28 | } 29 | 30 | variable "alb_zone_id" { 31 | description = "Zone ID of the ALB" 32 | type = string 33 | } 34 | 35 | variable "create_www_record" { 36 | description = "Whether to create a www record" 37 | type = bool 38 | default = true 39 | } 40 | 41 | variable "create_certificate" { 42 | description = "Whether to create an ACM certificate" 43 | type = bool 44 | default = true 45 | } 46 | 47 | variable "tags" { 48 | description = "A map of tags to add to all resources" 49 | type = map(string) 50 | default = {} 51 | } 52 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/compute/outputs.tf: -------------------------------------------------------------------------------- 1 | # Compute Module Outputs 2 | 3 | output "launch_template_id" { 4 | description = "ID of the launch template" 5 | value = aws_launch_template.main.id 6 | } 7 | 8 | output "launch_template_arn" { 9 | description = "ARN of the launch template" 10 | value = aws_launch_template.main.arn 11 | } 12 | 13 | output "launch_template_latest_version" { 14 | description = "Latest version of the launch template" 15 | value = aws_launch_template.main.latest_version 16 | } 17 | 18 | output "autoscaling_group_id" { 19 | description = "ID of the Auto Scaling Group" 20 | value = aws_autoscaling_group.main.id 21 | } 22 | 23 | output "autoscaling_group_name" { 24 | description = "Name of the Auto Scaling Group" 25 | value = aws_autoscaling_group.main.name 26 | } 27 | 28 | output "autoscaling_group_arn" { 29 | description = "ARN of the Auto Scaling Group" 30 | value = aws_autoscaling_group.main.arn 31 | } 32 | 33 | output "scale_up_policy_arn" { 34 | description = "ARN of the scale up policy" 35 | value = aws_autoscaling_policy.scale_up.arn 36 | } 37 | 38 | output "scale_down_policy_arn" { 39 | description = "ARN of the scale down policy" 40 | value = aws_autoscaling_policy.scale_down.arn 41 | } 42 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/loadbalancing/variables.tf: -------------------------------------------------------------------------------- 1 | # Load Balancing Module Variables 2 | 3 | variable "project_name" { 4 | description = "Name of the project" 5 | type = string 6 | } 7 | 8 | variable "environment" { 9 | description = "Environment (dev, staging, prod)" 10 | type = string 11 | } 12 | 13 | variable "vpc_id" { 14 | description = "ID of the VPC" 15 | type = string 16 | } 17 | 18 | variable "subnet_ids" { 19 | description = "List of subnet IDs for the ALB" 20 | type = list(string) 21 | } 22 | 23 | variable "security_group_id" { 24 | description = "ID of the security group for the ALB" 25 | type = string 26 | } 27 | 28 | variable "health_check_path" { 29 | description = "Path for the ALB health check" 30 | type = string 31 | default = "/health.php" 32 | } 33 | 34 | variable "certificate_arn" { 35 | description = "ARN of the SSL certificate for HTTPS" 36 | type = string 37 | default = "" 38 | } 39 | 40 | variable "access_logs_bucket" { 41 | description = "S3 bucket for ALB access logs" 42 | type = string 43 | default = "" 44 | } 45 | 46 | variable "tags" { 47 | description = "A map of tags to add to all resources" 48 | type = map(string) 49 | default = {} 50 | } 51 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/inventory/aws_hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # AWS Inventory for LAMP Stack 3 | 4 | all: 5 | vars: 6 | ansible_user: ec2-user 7 | ansible_ssh_private_key_file: "{{ lookup('env', 'AWS_SSH_KEY_FILE') | default('~/.ssh/aws-key.pem') }}" 8 | ansible_ssh_common_args: "-o StrictHostKeyChecking=no" 9 | domain_name: testerlab.com 10 | environment: aws 11 | 12 | children: 13 | webservers: 14 | hosts: 15 | web1: 16 | ansible_host: "{{ hostvars['localhost']['web1_public_ip'] | default('') }}" 17 | web2: 18 | ansible_host: "{{ hostvars['localhost']['web2_public_ip'] | default('') }}" 19 | vars: 20 | ansible_user: ec2-user 21 | 22 | dbservers: 23 | hosts: 24 | db1: 25 | ansible_host: "{{ hostvars['localhost']['db1_endpoint'] | default('') }}" 26 | db2: 27 | ansible_host: "{{ hostvars['localhost']['db2_endpoint'] | default('') }}" 28 | 29 | loadbalancers: 30 | hosts: 31 | alb: 32 | ansible_host: "{{ hostvars['localhost']['alb_dns_name'] | default('') }}" 33 | 34 | efs: 35 | hosts: 36 | efs1: 37 | ansible_host: "{{ hostvars['localhost']['efs_dns_name'] | default('') }}" 38 | 39 | localhost: 40 | hosts: 41 | localhost: 42 | ansible_connection: local 43 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/cleanup_aws.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script to clean up all AWS resources created by the LAMP stack deployment 3 | 4 | # Set environment variables 5 | export ENVIRONMENT=aws 6 | export AWS_REGION=${AWS_REGION:-us-east-1} 7 | 8 | # Check if AWS credentials are configured 9 | if [ -z "$AWS_ACCESS_KEY_ID" ] || [ -z "$AWS_SECRET_ACCESS_KEY" ]; then 10 | echo "AWS credentials not found. Please configure your AWS credentials." 11 | echo "You can set them as environment variables or configure the AWS CLI with 'aws configure'." 12 | exit 1 13 | fi 14 | 15 | # Create a directory for logs 16 | mkdir -p logs 17 | 18 | # Prompt for confirmation 19 | echo "WARNING: This script will delete all AWS resources created by the LAMP stack deployment." 20 | echo "This action cannot be undone." 21 | read -p "Are you sure you want to continue? (y/n) " -n 1 -r 22 | echo 23 | if [[ ! $REPLY =~ ^[Yy]$ ]]; then 24 | echo "Cleanup cancelled." 25 | exit 0 26 | fi 27 | 28 | # Run the cleanup playbook 29 | echo "Cleaning up AWS resources..." 30 | ansible-playbook playbooks/cleanup.yml -v | tee logs/aws_cleanup.log 31 | 32 | # Check the result 33 | if [ ${PIPESTATUS[0]} -eq 0 ]; then 34 | echo "Cleanup completed successfully!" 35 | echo "All AWS resources have been deleted." 36 | else 37 | echo "Cleanup failed or completed with warnings. Check logs/aws_cleanup.log for details." 38 | echo "Some resources may still exist in your AWS account." 39 | fi 40 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/database/outputs.tf: -------------------------------------------------------------------------------- 1 | # Database Module Outputs 2 | 3 | output "db_instance_id" { 4 | description = "ID of the RDS instance" 5 | value = aws_db_instance.main.id 6 | } 7 | 8 | output "db_instance_arn" { 9 | description = "ARN of the RDS instance" 10 | value = aws_db_instance.main.arn 11 | } 12 | 13 | output "db_endpoint" { 14 | description = "Endpoint of the RDS instance" 15 | value = aws_db_instance.main.endpoint 16 | } 17 | 18 | output "db_address" { 19 | description = "Address of the RDS instance" 20 | value = aws_db_instance.main.address 21 | } 22 | 23 | output "db_port" { 24 | description = "Port of the RDS instance" 25 | value = aws_db_instance.main.port 26 | } 27 | 28 | output "db_name" { 29 | description = "Name of the database" 30 | value = aws_db_instance.main.db_name 31 | } 32 | 33 | output "db_username" { 34 | description = "Username for the database" 35 | value = aws_db_instance.main.username 36 | } 37 | 38 | output "db_subnet_group_id" { 39 | description = "ID of the DB subnet group" 40 | value = aws_db_subnet_group.main.id 41 | } 42 | 43 | output "db_parameter_group_id" { 44 | description = "ID of the DB parameter group" 45 | value = aws_db_parameter_group.main.id 46 | } 47 | 48 | output "db_option_group_id" { 49 | description = "ID of the DB option group" 50 | value = aws_db_option_group.main.id 51 | } 52 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/networking/variables.tf: -------------------------------------------------------------------------------- 1 | # Networking Module Variables 2 | 3 | variable "project_name" { 4 | description = "Name of the project" 5 | type = string 6 | } 7 | 8 | variable "environment" { 9 | description = "Environment (dev, staging, prod)" 10 | type = string 11 | } 12 | 13 | variable "vpc_cidr" { 14 | description = "CIDR block for the VPC" 15 | type = string 16 | } 17 | 18 | variable "azs" { 19 | description = "List of availability zones to use" 20 | type = list(string) 21 | } 22 | 23 | variable "public_subnets" { 24 | description = "CIDR blocks for the public subnets" 25 | type = list(string) 26 | } 27 | 28 | variable "private_subnets" { 29 | description = "CIDR blocks for the private subnets" 30 | type = list(string) 31 | } 32 | 33 | variable "database_subnets" { 34 | description = "CIDR blocks for the database subnets" 35 | type = list(string) 36 | } 37 | 38 | variable "enable_nat_gateway" { 39 | description = "Whether to enable NAT Gateway" 40 | type = bool 41 | default = true 42 | } 43 | 44 | variable "single_nat_gateway" { 45 | description = "Whether to use a single NAT Gateway for all private subnets" 46 | type = bool 47 | default = false 48 | } 49 | 50 | variable "tags" { 51 | description = "A map of tags to add to all resources" 52 | type = map(string) 53 | default = {} 54 | } 55 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/.gitignore: -------------------------------------------------------------------------------- 1 | # Terraform files 2 | **/.terraform/* 3 | *.tfstate 4 | *.tfstate.* 5 | *.tfplan 6 | *.tfvars.json 7 | crash.log 8 | crash.*.log 9 | override.tf 10 | override.tf.json 11 | *_override.tf 12 | *_override.tf.json 13 | .terraformrc 14 | terraform.rc 15 | .terraform.lock.hcl 16 | tfdestroyplan 17 | 18 | # Ansible files 19 | *.retry 20 | ansible/.ansible_cache/ 21 | ansible/terraform_outputs.json 22 | 23 | # Local environment files 24 | .env 25 | .env.* 26 | .envrc 27 | .direnv/ 28 | 29 | # OS specific files 30 | .DS_Store 31 | Thumbs.db 32 | *.swp 33 | *~ 34 | 35 | # IDE files 36 | .idea/ 37 | .vscode/ 38 | *.iml 39 | *.sublime-* 40 | .project 41 | .settings/ 42 | .classpath 43 | 44 | # Python files 45 | __pycache__/ 46 | *.py[cod] 47 | *$py.class 48 | *.so 49 | .Python 50 | env/ 51 | build/ 52 | develop-eggs/ 53 | dist/ 54 | downloads/ 55 | eggs/ 56 | .eggs/ 57 | lib/ 58 | lib64/ 59 | parts/ 60 | sdist/ 61 | var/ 62 | *.egg-info/ 63 | .installed.cfg 64 | *.egg 65 | 66 | # Logs 67 | logs/ 68 | *.log 69 | npm-debug.log* 70 | yarn-debug.log* 71 | yarn-error.log* 72 | 73 | # Dependency directories 74 | node_modules/ 75 | jspm_packages/ 76 | 77 | # Distribution directories 78 | dist/ 79 | build/ 80 | 81 | # Temporary files 82 | tmp/ 83 | temp/ 84 | 85 | # Sensitive files 86 | *.pem 87 | *.key 88 | *.crt 89 | *.p12 90 | *.pfx 91 | *.jks 92 | *.keystore 93 | *.truststore 94 | secrets.yml 95 | secrets.yaml 96 | secrets.json 97 | secrets.tfvars 98 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/group_vars/aws.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # AWS-specific variables 3 | 4 | # AWS CLI command 5 | aws_cli_command: "aws" 6 | 7 | # AWS credentials 8 | aws_profile: "default" 9 | 10 | # AWS region (override from all.yml if needed) 11 | aws_region: "us-east-1" 12 | 13 | # EC2 configuration 14 | ec2_instance_type: "t4g.small" 15 | ec2_ami_owner: "amazon" 16 | ec2_ami_name: "al2023-ami-*-arm64" 17 | 18 | # RDS configuration 19 | db_instance_class: "db.t4g.small" 20 | db_multi_az: true 21 | 22 | # Route 53 configuration 23 | route53_create_zone: false # Set to true if you need to create the zone 24 | route53_zone_id: "{{ lookup('env', 'ROUTE53_ZONE_ID') }}" 25 | 26 | # ACM configuration 27 | acm_create_certificate: true 28 | acm_validate_certificate: true 29 | 30 | # CloudWatch configuration 31 | cloudwatch_create_alarms: true 32 | cloudwatch_alarm_email: "admin@testerlab.com" 33 | 34 | # Backup configuration 35 | backup_enabled: true 36 | backup_retention_days: 7 37 | 38 | # Cost optimization 39 | use_spot_instances: false # Set to true to use spot instances for non-critical workloads 40 | reserved_instances: false # Set to true if you have reserved instances 41 | 42 | # Security configuration 43 | security_enable_waf: true 44 | security_enable_shield: false # AWS Shield Advanced is expensive, disabled by default 45 | 46 | # Tagging strategy 47 | aws_tags: 48 | Project: "LAMP Stack" 49 | Environment: "{{ environment }}" 50 | ManagedBy: "Ansible" 51 | Owner: "DevOps Team" 52 | CostCenter: "IT-123" 53 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Common role handlers 3 | # These handlers are used by the common role tasks 4 | 5 | - name: restart ntp service 6 | service: 7 | name: "{{ 'ntp' if ansible_os_family == 'Debian' else 'chronyd' }}" 8 | state: restarted 9 | listen: restart ntp service 10 | 11 | - name: restart ssh service 12 | service: 13 | name: "{{ 'ssh' if ansible_os_family == 'Debian' else 'sshd' }}" 14 | state: restarted 15 | listen: restart ssh service 16 | 17 | - name: restart firewall service 18 | service: 19 | name: "{{ 'ufw' if ansible_os_family == 'Debian' else 'firewalld' }}" 20 | state: restarted 21 | listen: restart firewall service 22 | 23 | - name: reload sysctl 24 | command: sysctl -p 25 | listen: reload sysctl 26 | 27 | - name: reload systemd 28 | systemd: 29 | daemon_reload: yes 30 | listen: reload systemd 31 | 32 | - name: restart logrotate 33 | service: 34 | name: logrotate 35 | state: restarted 36 | listen: restart logrotate 37 | ignore_errors: yes 38 | 39 | - name: update ca certificates 40 | command: update-ca-certificates 41 | listen: update ca certificates 42 | changed_when: true 43 | 44 | - name: restart network 45 | service: 46 | name: "{{ 'networking' if ansible_os_family == 'Debian' else 'network' }}" 47 | state: restarted 48 | listen: restart network 49 | 50 | - name: reboot system 51 | reboot: 52 | reboot_timeout: 600 53 | post_reboot_delay: 60 54 | test_command: uptime 55 | listen: reboot system 56 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/waf/outputs.tf: -------------------------------------------------------------------------------- 1 | # WAF Module Outputs 2 | 3 | output "web_acl_id" { 4 | description = "ID of the WAF Web ACL" 5 | value = aws_wafv2_web_acl.main.id 6 | } 7 | 8 | output "web_acl_arn" { 9 | description = "ARN of the WAF Web ACL" 10 | value = aws_wafv2_web_acl.main.arn 11 | } 12 | 13 | output "web_acl_name" { 14 | description = "Name of the WAF Web ACL" 15 | value = aws_wafv2_web_acl.main.name 16 | } 17 | 18 | output "allowed_ips_id" { 19 | description = "ID of the allowed IPs IP set" 20 | value = length(var.allowed_ips) > 0 ? aws_wafv2_ip_set.allowed_ips[0].id : null 21 | } 22 | 23 | output "allowed_ips_arn" { 24 | description = "ARN of the allowed IPs IP set" 25 | value = length(var.allowed_ips) > 0 ? aws_wafv2_ip_set.allowed_ips[0].arn : null 26 | } 27 | 28 | output "blocked_ips_id" { 29 | description = "ID of the blocked IPs IP set" 30 | value = length(var.blocked_ips) > 0 ? aws_wafv2_ip_set.blocked_ips[0].id : null 31 | } 32 | 33 | output "blocked_ips_arn" { 34 | description = "ARN of the blocked IPs IP set" 35 | value = length(var.blocked_ips) > 0 ? aws_wafv2_ip_set.blocked_ips[0].arn : null 36 | } 37 | 38 | output "log_group_name" { 39 | description = "Name of the CloudWatch log group for WAF" 40 | value = var.enable_logging ? aws_cloudwatch_log_group.waf[0].name : null 41 | } 42 | 43 | output "log_group_arn" { 44 | description = "ARN of the CloudWatch log group for WAF" 45 | value = var.enable_logging ? aws_cloudwatch_log_group.waf[0].arn : null 46 | } 47 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/networking/outputs.tf: -------------------------------------------------------------------------------- 1 | # Networking Module Outputs 2 | 3 | output "vpc_id" { 4 | description = "The ID of the VPC" 5 | value = aws_vpc.main.id 6 | } 7 | 8 | output "vpc_cidr_block" { 9 | description = "The CIDR block of the VPC" 10 | value = aws_vpc.main.cidr_block 11 | } 12 | 13 | output "public_subnet_ids" { 14 | description = "List of IDs of public subnets" 15 | value = aws_subnet.public[*].id 16 | } 17 | 18 | output "private_subnet_ids" { 19 | description = "List of IDs of private subnets" 20 | value = aws_subnet.private[*].id 21 | } 22 | 23 | output "database_subnet_ids" { 24 | description = "List of IDs of database subnets" 25 | value = aws_subnet.database[*].id 26 | } 27 | 28 | output "public_route_table_id" { 29 | description = "ID of the public route table" 30 | value = aws_route_table.public.id 31 | } 32 | 33 | output "private_route_table_ids" { 34 | description = "List of IDs of private route tables" 35 | value = aws_route_table.private[*].id 36 | } 37 | 38 | output "nat_gateway_ids" { 39 | description = "List of IDs of NAT gateways" 40 | value = aws_nat_gateway.main[*].id 41 | } 42 | 43 | output "internet_gateway_id" { 44 | description = "ID of the Internet Gateway" 45 | value = aws_internet_gateway.main.id 46 | } 47 | 48 | output "azs" { 49 | description = "List of availability zones used" 50 | value = var.azs 51 | } 52 | 53 | output "flow_log_id" { 54 | description = "ID of the VPC Flow Log" 55 | value = aws_flow_log.main.id 56 | } 57 | -------------------------------------------------------------------------------- /aws_inventory.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # AWS Dynamic Inventory for Ansible 3 | # This file configures Ansible to use AWS as a dynamic inventory source 4 | 5 | plugin: aws_ec2 6 | regions: 7 | - us-west-2 # Change to your preferred region 8 | keyed_groups: 9 | # Create groups based on instance tags 10 | - prefix: tag 11 | key: tags.Name 12 | # Create groups based on instance type 13 | - prefix: instance_type 14 | key: instance_type 15 | # Create groups based on VPC 16 | - prefix: vpc 17 | key: vpc_id 18 | # Create groups based on security groups 19 | - prefix: security_group 20 | key: security_groups 21 | # Create groups based on availability zone 22 | - prefix: az 23 | key: placement.availability_zone 24 | # Create groups based on state 25 | - prefix: instance_state 26 | key: instance_state.name 27 | 28 | # Configure which hostnames to use 29 | hostnames: 30 | - tag:Name 31 | - public_ip_address 32 | - private_ip_address 33 | - instance_id 34 | 35 | # Add custom host variables 36 | compose: 37 | # Set ansible_host to the public IP if available, otherwise private IP 38 | ansible_host: public_ip_address | default(private_ip_address) 39 | # Add other instance attributes as host variables 40 | instance_id: instance_id 41 | instance_type: instance_type 42 | region: placement.region 43 | availability_zone: placement.availability_zone 44 | state: instance_state.name 45 | platform: platform | default('linux') 46 | private_ip: private_ip_address 47 | public_ip: public_ip_address 48 | 49 | # Filter instances (optional) 50 | # filters: 51 | # instance-state-name: running 52 | # tag:Environment: production 53 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/dns/outputs.tf: -------------------------------------------------------------------------------- 1 | # DNS Module Outputs 2 | 3 | output "domain_name" { 4 | description = "Domain name for the application" 5 | value = var.domain_name 6 | } 7 | 8 | output "fqdn" { 9 | description = "Fully qualified domain name for the application" 10 | value = var.domain_name != "" ? (var.subdomain != "" ? "${var.subdomain}.${var.domain_name}" : var.domain_name) : null 11 | } 12 | 13 | output "certificate_arn" { 14 | description = "ARN of the ACM certificate" 15 | value = var.domain_name != "" && var.create_certificate ? aws_acm_certificate.main[0].arn : null 16 | } 17 | 18 | output "certificate_domain_validation_options" { 19 | description = "Domain validation options for the ACM certificate" 20 | value = var.domain_name != "" && var.create_certificate ? aws_acm_certificate.main[0].domain_validation_options : null 21 | } 22 | 23 | output "route53_record_name" { 24 | description = "Name of the Route53 record" 25 | value = var.domain_name != "" ? aws_route53_record.main[0].name : null 26 | } 27 | 28 | output "route53_record_fqdn" { 29 | description = "FQDN of the Route53 record" 30 | value = var.domain_name != "" ? aws_route53_record.main[0].fqdn : null 31 | } 32 | 33 | output "www_record_name" { 34 | description = "Name of the www Route53 record" 35 | value = var.domain_name != "" && var.create_www_record ? aws_route53_record.www[0].name : null 36 | } 37 | 38 | output "www_record_fqdn" { 39 | description = "FQDN of the www Route53 record" 40 | value = var.domain_name != "" && var.create_www_record ? aws_route53_record.www[0].fqdn : null 41 | } 42 | -------------------------------------------------------------------------------- /src/sysoperator/operations/inventory.ts: -------------------------------------------------------------------------------- 1 | import { AnsibleExecutionError } from '../common/errors.js'; 2 | import { ListInventoryOptions } from '../common/types.js'; 3 | import { execAsync, validateInventoryPath } from '../common/utils.js'; 4 | 5 | /** 6 | * Lists all hosts and groups in an Ansible inventory 7 | * @param options Options containing the inventory path 8 | * @returns JSON string representation of the inventory (formatted) 9 | * @throws AnsibleInventoryNotFoundError if the specified inventory doesn't exist 10 | * @throws AnsibleExecutionError if the inventory listing fails 11 | */ 12 | export async function listInventory(options: ListInventoryOptions): Promise { 13 | const inventoryPath = validateInventoryPath(options.inventory); 14 | 15 | // Build command 16 | let command = 'ansible-inventory'; 17 | 18 | // Add inventory if specified 19 | if (inventoryPath) { 20 | command += ` -i ${inventoryPath}`; 21 | } 22 | 23 | command += ' --list'; 24 | 25 | try { 26 | // Execute command 27 | const { stdout, stderr } = await execAsync(command); 28 | 29 | try { 30 | // Try to parse as JSON for better formatting 31 | const inventory = JSON.parse(stdout); 32 | return JSON.stringify(inventory, null, 2); 33 | } catch { 34 | // Fall back to raw output if can't parse as JSON 35 | return stdout || 'No inventory data returned'; 36 | } 37 | } catch (error) { 38 | // Handle exec error 39 | const execError = error as { stderr?: string; message: string }; 40 | throw new AnsibleExecutionError( 41 | `Error listing inventory: ${execError.message}`, 42 | execError.stderr 43 | ); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/db_client/templates/db-credentials.cnf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # MySQL credentials file for {{ inventory_hostname }} 3 | # This file is managed by Ansible - local changes will be overwritten 4 | # SECURITY WARNING: This file contains sensitive information and should be protected 5 | 6 | [client] 7 | host={{ db_host }} 8 | port={{ mysql_port | default('3306') }} 9 | user={{ db_user }} 10 | password={{ db_password }} 11 | database={{ db_name }} 12 | 13 | {% if db_ssl_enabled | default(true) | bool %} 14 | # SSL Configuration 15 | ssl=ON 16 | ssl-verify-server-cert={{ db_ssl_verify | default(true) | bool | lower }} 17 | ssl-ca={{ mysql_ssl_ca_path }} 18 | {% endif %} 19 | 20 | # Connection settings 21 | connect_timeout={{ db_connection_timeout | default(10) }} 22 | default-character-set={{ mysql_charset | default('utf8mb4') }} 23 | 24 | [mysql] 25 | # MySQL CLI client settings 26 | prompt="{{ db_name }}> " 27 | default-character-set={{ mysql_charset | default('utf8mb4') }} 28 | 29 | [mysqldump] 30 | # Dump settings 31 | host={{ db_host }} 32 | port={{ mysql_port | default('3306') }} 33 | user={{ db_user }} 34 | password={{ db_password }} 35 | default-character-set={{ mysql_charset | default('utf8mb4') }} 36 | 37 | {% if db_ssl_enabled | default(true) | bool %} 38 | # SSL Configuration 39 | ssl=ON 40 | ssl-verify-server-cert={{ db_ssl_verify | default(true) | bool | lower }} 41 | ssl-ca={{ mysql_ssl_ca_path }} 42 | {% endif %} 43 | 44 | # Environment information 45 | # Environment: {{ environment | default('production') }} 46 | # Region: {{ aws_region | default('us-east-1') }} 47 | # Instance: {{ ansible_hostname }} 48 | # Created: {{ ansible_date_time.iso8601 }} 49 | -------------------------------------------------------------------------------- /docker-test.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Test script for the SysOperator MCP server in Docker 5 | * 6 | * This script demonstrates how to interact with the SysOperator MCP server 7 | * running in a Docker container. It sends a request to list available tools 8 | * and then executes a simple ad-hoc command. 9 | * 10 | * Usage: 11 | * 1. Build and run the Docker container with stdin/stdout connected to this script: 12 | * docker run -i sysoperator-mcp < docker-test.js 13 | * 14 | * 2. Or use the helper script: 15 | * ./docker-build-run.sh --run | node docker-test.js 16 | */ 17 | 18 | // MCP protocol message format 19 | const formatMcpMessage = (method, params) => { 20 | const message = { 21 | jsonrpc: '2.0', 22 | id: Date.now().toString(), 23 | method, 24 | params 25 | }; 26 | 27 | return JSON.stringify(message); 28 | }; 29 | 30 | // Send a message to the MCP server 31 | const sendMessage = (message) => { 32 | console.log(message); 33 | }; 34 | 35 | // First, list the available tools 36 | sendMessage(formatMcpMessage('mcp.list_tools', {})); 37 | 38 | // Wait a moment before sending the next message 39 | setTimeout(() => { 40 | // Then, run a simple ad-hoc command (echo hello) 41 | // Note: This will only work if Ansible is installed in the container 42 | sendMessage(formatMcpMessage('mcp.call_tool', { 43 | name: 'run_ad_hoc', 44 | arguments: { 45 | pattern: 'localhost', 46 | module: 'shell', 47 | args: 'echo "Hello from Docker container!"' 48 | } 49 | })); 50 | }, 1000); 51 | 52 | // In a real application, you would also read and parse the responses 53 | // from the MCP server. This simple example just sends requests. 54 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/web/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Web role handlers 3 | # These handlers are used by the web role tasks 4 | 5 | - name: restart apache 6 | service: 7 | name: "{{ apache_service_name }}" 8 | state: restarted 9 | listen: restart apache 10 | 11 | - name: reload apache 12 | service: 13 | name: "{{ apache_service_name }}" 14 | state: reloaded 15 | listen: reload apache 16 | 17 | - name: restart php-fpm 18 | service: 19 | name: "{{ php_fpm_service_name }}" 20 | state: restarted 21 | when: php_fpm_enabled | default(true) | bool 22 | listen: restart php-fpm 23 | 24 | - name: reload php-fpm 25 | service: 26 | name: "{{ php_fpm_service_name }}" 27 | state: reloaded 28 | when: php_fpm_enabled | default(true) | bool 29 | listen: reload php-fpm 30 | 31 | - name: restart cloudwatch agent 32 | service: 33 | name: amazon-cloudwatch-agent 34 | state: restarted 35 | listen: restart cloudwatch agent 36 | 37 | - name: restart services 38 | service: 39 | name: "{{ item }}" 40 | state: restarted 41 | with_items: 42 | - "{{ apache_service_name }}" 43 | - "{{ php_fpm_service_name | default(omit) }}" 44 | listen: restart services 45 | when: item is defined 46 | 47 | - name: clear php cache 48 | command: "php -r 'opcache_reset();'" 49 | changed_when: true 50 | ignore_errors: yes 51 | listen: clear php cache 52 | 53 | - name: validate apache config 54 | command: "{{ 'apachectl' if ansible_os_family == 'Debian' else 'httpd' }} -t" 55 | changed_when: false 56 | listen: validate apache config 57 | 58 | - name: validate php config 59 | command: php -l {{ php_ini_path }} 60 | changed_when: false 61 | listen: validate php config 62 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/security/outputs.tf: -------------------------------------------------------------------------------- 1 | # Security Module Outputs 2 | 3 | output "web_security_group_id" { 4 | description = "ID of the web server security group" 5 | value = aws_security_group.web.id 6 | } 7 | 8 | output "alb_security_group_id" { 9 | description = "ID of the ALB security group" 10 | value = aws_security_group.alb.id 11 | } 12 | 13 | output "db_security_group_id" { 14 | description = "ID of the database security group" 15 | value = aws_security_group.db.id 16 | } 17 | 18 | output "efs_security_group_id" { 19 | description = "ID of the EFS security group" 20 | value = aws_security_group.efs.id 21 | } 22 | 23 | output "web_server_role_arn" { 24 | description = "ARN of the web server IAM role" 25 | value = aws_iam_role.web_server.arn 26 | } 27 | 28 | output "web_server_role_name" { 29 | description = "Name of the web server IAM role" 30 | value = aws_iam_role.web_server.name 31 | } 32 | 33 | output "web_instance_profile_name" { 34 | description = "Name of the web server instance profile" 35 | value = aws_iam_instance_profile.web_server.name 36 | } 37 | 38 | output "web_instance_profile_arn" { 39 | description = "ARN of the web server instance profile" 40 | value = aws_iam_instance_profile.web_server.arn 41 | } 42 | 43 | output "public_nacl_id" { 44 | description = "ID of the public network ACL" 45 | value = aws_network_acl.public.id 46 | } 47 | 48 | output "private_nacl_id" { 49 | description = "ID of the private network ACL" 50 | value = aws_network_acl.private.id 51 | } 52 | 53 | output "database_nacl_id" { 54 | description = "ID of the database network ACL" 55 | value = aws_network_acl.database.id 56 | } 57 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/group_vars/localstack.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # LocalStack-specific variables 3 | 4 | # AWS CLI command for LocalStack 5 | aws_cli_command: "awslocal" 6 | 7 | # AWS credentials (not needed for LocalStack) 8 | aws_profile: "" 9 | 10 | # AWS region (override from all.yml if needed) 11 | aws_region: "us-east-1" 12 | 13 | # LocalStack endpoint 14 | localstack_endpoint: "http://localhost:4566" 15 | 16 | # EC2 configuration - simplified for LocalStack 17 | ec2_instance_type: "t2.micro" # Instance type doesn't matter in LocalStack 18 | ec2_ami_id: "ami-12345678" # Dummy AMI ID for LocalStack 19 | ec2_key_name: "dummy-key" # Dummy key name for LocalStack 20 | 21 | # RDS configuration - simplified for LocalStack 22 | db_instance_class: "db.t2.micro" # Instance class doesn't matter in LocalStack 23 | db_multi_az: false # Multi-AZ not fully supported in LocalStack 24 | 25 | # Route 53 configuration 26 | route53_create_zone: true 27 | route53_zone_id: "" # Will be populated during creation 28 | 29 | # ACM configuration 30 | acm_create_certificate: true 31 | acm_validate_certificate: false # Skip validation in LocalStack 32 | 33 | # CloudWatch configuration 34 | cloudwatch_create_alarms: false # Skip CloudWatch alarms in LocalStack 35 | 36 | # Backup configuration 37 | backup_enabled: false # Skip backups in LocalStack 38 | 39 | # Security configuration 40 | security_enable_waf: false # Skip WAF in LocalStack 41 | security_enable_shield: false # Skip Shield in LocalStack 42 | 43 | # Tagging strategy 44 | aws_tags: 45 | Project: "LAMP Stack" 46 | Environment: "localstack" 47 | ManagedBy: "Ansible" 48 | Owner: "DevOps Team" 49 | 50 | # LocalStack testing 51 | localstack_test_mode: true 52 | localstack_skip_long_operations: true 53 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/monitoring/variables.tf: -------------------------------------------------------------------------------- 1 | # Monitoring Module Variables 2 | 3 | variable "project_name" { 4 | description = "Name of the project" 5 | type = string 6 | } 7 | 8 | variable "environment" { 9 | description = "Environment (dev, staging, prod)" 10 | type = string 11 | } 12 | 13 | variable "region" { 14 | description = "AWS region" 15 | type = string 16 | } 17 | 18 | variable "asg_name" { 19 | description = "Name of the Auto Scaling Group" 20 | type = string 21 | } 22 | 23 | variable "db_instance_id" { 24 | description = "ID of the RDS instance" 25 | type = string 26 | } 27 | 28 | variable "alb_arn_suffix" { 29 | description = "ARN suffix of the ALB" 30 | type = string 31 | } 32 | 33 | variable "efs_id" { 34 | description = "ID of the EFS file system" 35 | type = string 36 | } 37 | 38 | variable "ec2_cpu_alarm_arn" { 39 | description = "ARN of the EC2 CPU alarm" 40 | type = string 41 | } 42 | 43 | variable "rds_cpu_alarm_arn" { 44 | description = "ARN of the RDS CPU alarm" 45 | type = string 46 | } 47 | 48 | variable "alb_5xx_alarm_arn" { 49 | description = "ARN of the ALB 5XX alarm" 50 | type = string 51 | } 52 | 53 | variable "alarm_actions" { 54 | description = "List of ARNs to notify when the alarm transitions to ALARM state" 55 | type = list(string) 56 | default = [] 57 | } 58 | 59 | variable "ok_actions" { 60 | description = "List of ARNs to notify when the alarm transitions to OK state" 61 | type = list(string) 62 | default = [] 63 | } 64 | 65 | variable "tags" { 66 | description = "A map of tags to add to all resources" 67 | type = map(string) 68 | default = {} 69 | } 70 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Stage 1: Build the application 2 | FROM node:18 AS builder 3 | 4 | WORKDIR /app 5 | 6 | # Copy package files and install dependencies 7 | COPY package*.json ./ 8 | RUN npm ci 9 | 10 | # Copy source code 11 | COPY tsconfig.json ./ 12 | COPY src/ ./src/ 13 | 14 | # Build the TypeScript code 15 | RUN npm run build 16 | 17 | # Stage 2: Create the runtime image 18 | FROM node:18-slim 19 | 20 | # Install system dependencies 21 | RUN apt-get update && apt-get install -y \ 22 | python3 \ 23 | python3-pip \ 24 | curl \ 25 | unzip \ 26 | gnupg \ 27 | software-properties-common \ 28 | lsb-release \ 29 | && rm -rf /var/lib/apt/lists/* 30 | 31 | # Install Ansible 32 | RUN pip3 install ansible 33 | 34 | # Install AWS CLI 35 | RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" \ 36 | && unzip awscliv2.zip \ 37 | && ./aws/install \ 38 | && rm -rf aws awscliv2.zip 39 | 40 | # Install Terraform 41 | RUN curl -fsSL https://apt.releases.hashicorp.com/gpg | gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg \ 42 | && echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | tee /etc/apt/sources.list.d/hashicorp.list \ 43 | && apt-get update && apt-get install -y terraform \ 44 | && rm -rf /var/lib/apt/lists/* 45 | 46 | WORKDIR /app 47 | 48 | # Copy package files and install production dependencies 49 | COPY package*.json ./ 50 | RUN npm ci --only=production 51 | 52 | # Copy built application from the builder stage 53 | COPY --from=builder /app/build ./build 54 | 55 | # Set executable permissions for the entry point 56 | RUN chmod +x ./build/index.js 57 | 58 | # Set the entry point 59 | ENTRYPOINT ["node", "build/index.js"] 60 | -------------------------------------------------------------------------------- /cloudformation_example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Example Ansible playbook for AWS CloudFormation 3 | # Note: This playbook requires valid AWS credentials to run 4 | 5 | - name: AWS CloudFormation Example 6 | hosts: localhost 7 | connection: local 8 | gather_facts: false 9 | vars: 10 | aws_region: us-west-2 11 | stack_name: example-stack 12 | template_file: cloudformation_template.json 13 | 14 | tasks: 15 | - name: Create CloudFormation stack 16 | amazon.aws.cloudformation: 17 | stack_name: "{{ stack_name }}" 18 | state: present 19 | region: "{{ aws_region }}" 20 | template_body: "{{ lookup('file', template_file) }}" 21 | template_parameters: 22 | KeyName: my-key-pair 23 | InstanceType: t2.micro 24 | tags: 25 | Stack: "{{ stack_name }}" 26 | Environment: Development 27 | register: cf_result 28 | tags: 29 | - create 30 | 31 | - name: Display CloudFormation stack result 32 | debug: 33 | var: cf_result 34 | tags: 35 | - create 36 | 37 | - name: Get CloudFormation stack information 38 | amazon.aws.cloudformation_info: 39 | stack_name: "{{ stack_name }}" 40 | region: "{{ aws_region }}" 41 | register: cf_info 42 | tags: 43 | - info 44 | 45 | - name: Display CloudFormation stack information 46 | debug: 47 | var: cf_info 48 | tags: 49 | - info 50 | 51 | - name: Delete CloudFormation stack 52 | amazon.aws.cloudformation: 53 | stack_name: "{{ stack_name }}" 54 | state: absent 55 | region: "{{ aws_region }}" 56 | register: cf_delete 57 | tags: 58 | - delete 59 | 60 | - name: Display CloudFormation stack deletion result 61 | debug: 62 | var: cf_delete 63 | tags: 64 | - delete 65 | -------------------------------------------------------------------------------- /src/sysoperator/operations/ad_hoc.ts: -------------------------------------------------------------------------------- 1 | import { AnsibleExecutionError } from '../common/errors.js'; 2 | import { RunAdHocOptions } from '../common/types.js'; 3 | import { execAsync, validateInventoryPath } from '../common/utils.js'; 4 | 5 | /** 6 | * Runs an Ansible ad-hoc command 7 | * @param options Options for running the ad-hoc command 8 | * @returns Standard output from ansible command 9 | * @throws AnsibleInventoryNotFoundError if the specified inventory doesn't exist 10 | * @throws AnsibleExecutionError if the command execution fails 11 | */ 12 | export async function runAdHoc(options: RunAdHocOptions): Promise { 13 | const inventoryPath = validateInventoryPath(options.inventory); 14 | 15 | // Build command 16 | let command = `ansible ${options.pattern}`; 17 | 18 | // Add module 19 | command += ` -m ${options.module}`; 20 | 21 | // Add module args if specified 22 | if (options.args) { 23 | command += ` -a "${options.args}"`; 24 | } 25 | 26 | // Add inventory if specified 27 | if (inventoryPath) { 28 | command += ` -i ${inventoryPath}`; 29 | } 30 | 31 | // Add become flag if needed 32 | if (options.become) { 33 | command += ' --become'; 34 | } 35 | 36 | // Add extra vars if specified 37 | if (options.extra_vars && Object.keys(options.extra_vars).length > 0) { 38 | const extraVarsJson = JSON.stringify(options.extra_vars); 39 | command += ` --extra-vars '${extraVarsJson}'`; 40 | } 41 | 42 | try { 43 | // Execute command 44 | const { stdout, stderr } = await execAsync(command); 45 | return stdout || 'Command executed successfully (no output)'; 46 | } catch (error) { 47 | // Handle exec error 48 | const execError = error as { stderr?: string; message: string }; 49 | throw new AnsibleExecutionError( 50 | `Error running ad-hoc command: ${execError.message}`, 51 | execError.stderr 52 | ); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/monitoring/outputs.tf: -------------------------------------------------------------------------------- 1 | # Monitoring Module Outputs 2 | 3 | output "dashboard_name" { 4 | description = "Name of the CloudWatch dashboard" 5 | value = aws_cloudwatch_dashboard.main.dashboard_name 6 | } 7 | 8 | output "dashboard_arn" { 9 | description = "ARN of the CloudWatch dashboard" 10 | value = aws_cloudwatch_dashboard.main.dashboard_arn 11 | } 12 | 13 | output "system_health_alarm_arn" { 14 | description = "ARN of the system health composite alarm" 15 | value = aws_cloudwatch_composite_alarm.system_health.arn 16 | } 17 | 18 | output "system_health_alarm_id" { 19 | description = "ID of the system health composite alarm" 20 | value = aws_cloudwatch_composite_alarm.system_health.id 21 | } 22 | 23 | output "app_log_group_name" { 24 | description = "Name of the application log group" 25 | value = aws_cloudwatch_log_group.app_logs.name 26 | } 27 | 28 | output "app_log_group_arn" { 29 | description = "ARN of the application log group" 30 | value = aws_cloudwatch_log_group.app_logs.arn 31 | } 32 | 33 | output "access_log_group_name" { 34 | description = "Name of the access log group" 35 | value = aws_cloudwatch_log_group.access_logs.name 36 | } 37 | 38 | output "access_log_group_arn" { 39 | description = "ARN of the access log group" 40 | value = aws_cloudwatch_log_group.access_logs.arn 41 | } 42 | 43 | output "error_log_group_name" { 44 | description = "Name of the error log group" 45 | value = aws_cloudwatch_log_group.error_logs.name 46 | } 47 | 48 | output "error_log_group_arn" { 49 | description = "ARN of the error log group" 50 | value = aws_cloudwatch_log_group.error_logs.arn 51 | } 52 | 53 | output "php_errors_alarm_arn" { 54 | description = "ARN of the PHP errors alarm" 55 | value = aws_cloudwatch_metric_alarm.php_errors.arn 56 | } 57 | 58 | output "php_errors_alarm_id" { 59 | description = "ID of the PHP errors alarm" 60 | value = aws_cloudwatch_metric_alarm.php_errors.id 61 | } 62 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/terraform.tfvars: -------------------------------------------------------------------------------- 1 | # Example terraform.tfvars file for LAMP Stack on AWS 2 | # Customize these values for your environment 3 | 4 | # General 5 | project_name = "lamp-stack" 6 | environment = "dev" 7 | region = "us-west-2" 8 | 9 | tags = { 10 | Owner = "DevOps" 11 | Project = "LAMP Stack" 12 | Environment = "Development" 13 | Terraform = "true" 14 | } 15 | 16 | # Networking 17 | vpc_cidr = "10.0.0.0/16" 18 | availability_zones = ["us-west-2a", "us-west-2b"] 19 | public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"] 20 | private_subnet_cidrs = ["10.0.3.0/24", "10.0.4.0/24"] 21 | 22 | # Security 23 | allowed_ips = ["0.0.0.0/0"] # Restrict this to your IP in production 24 | 25 | # Database 26 | db_name = "lampdb" 27 | db_username = "admin" 28 | db_password = "YourStrongPasswordHere" # Change this to a secure password 29 | db_instance_class = "db.t3.micro" 30 | db_allocated_storage = 20 31 | db_engine = "mysql" 32 | db_engine_version = "8.0" 33 | db_multi_az = true 34 | db_backup_retention_period = 7 35 | 36 | # Compute 37 | instance_type = "t3.micro" 38 | key_name = "your-key-pair" # Change this to your key pair name 39 | asg_min_size = 2 40 | asg_max_size = 4 41 | asg_desired_capacity = 2 42 | efs_mount_point = "/var/www/html/shared" 43 | 44 | # Load Balancing 45 | health_check_path = "/health.php" 46 | certificate_arn = "" # Add your certificate ARN if you have one 47 | 48 | # DNS 49 | domain_name = "" # Add your domain name if you have one 50 | subdomain = "lamp" # Subdomain for the application 51 | create_www_record = true 52 | create_certificate = true 53 | 54 | # WAF 55 | waf_allowed_ips = [] # Add specific IPs to allow 56 | waf_blocked_ips = [] # Add specific IPs to block 57 | waf_rate_limit = 1000 58 | waf_enable_logging = true 59 | 60 | # Monitoring 61 | alarm_actions = [] # Add SNS topic ARNs for alarms 62 | ok_actions = [] # Add SNS topic ARNs for OK notifications 63 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/database/variables.tf: -------------------------------------------------------------------------------- 1 | # Database Module Variables 2 | 3 | variable "project_name" { 4 | description = "Name of the project" 5 | type = string 6 | } 7 | 8 | variable "environment" { 9 | description = "Environment (dev, staging, prod)" 10 | type = string 11 | } 12 | 13 | variable "vpc_id" { 14 | description = "ID of the VPC" 15 | type = string 16 | } 17 | 18 | variable "subnet_ids" { 19 | description = "List of subnet IDs for the database" 20 | type = list(string) 21 | } 22 | 23 | variable "security_group_id" { 24 | description = "ID of the security group for the database" 25 | type = string 26 | } 27 | 28 | variable "db_name" { 29 | description = "Name of the database" 30 | type = string 31 | default = "lampdb" 32 | } 33 | 34 | variable "db_username" { 35 | description = "Username for the database" 36 | type = string 37 | default = "admin" 38 | } 39 | 40 | variable "db_password" { 41 | description = "Password for the database" 42 | type = string 43 | default = "" 44 | sensitive = true 45 | } 46 | 47 | variable "db_instance_class" { 48 | description = "Instance class for the RDS instance" 49 | type = string 50 | default = "db.t3.small" 51 | } 52 | 53 | variable "allocated_storage" { 54 | description = "Allocated storage for the RDS instance (in GB)" 55 | type = number 56 | default = 20 57 | } 58 | 59 | variable "multi_az" { 60 | description = "Whether to enable Multi-AZ for the RDS instance" 61 | type = bool 62 | default = true 63 | } 64 | 65 | variable "backup_retention_period" { 66 | description = "Backup retention period for the RDS instance (in days)" 67 | type = number 68 | default = 7 69 | } 70 | 71 | variable "skip_final_snapshot" { 72 | description = "Whether to skip the final snapshot when the RDS instance is deleted" 73 | type = bool 74 | default = false 75 | } 76 | 77 | variable "tags" { 78 | description = "A map of tags to add to all resources" 79 | type = map(string) 80 | default = {} 81 | } 82 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/db_client/templates/logrotate-db.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # Log rotation configuration for database client logs 3 | 4 | /var/log/db-backup.log { 5 | daily 6 | rotate {{ db_log_rotation_count | default(14) }} 7 | missingok 8 | notifempty 9 | compress 10 | delaycompress 11 | create 0644 {{ log_user | default('root') }} {{ log_group | default('root') }} 12 | dateext 13 | dateformat -%Y%m%d 14 | sharedscripts 15 | postrotate 16 | systemctl reload rsyslog >/dev/null 2>&1 || true 17 | endscript 18 | } 19 | 20 | /var/log/db-monitor.log { 21 | daily 22 | rotate {{ db_log_rotation_count | default(14) }} 23 | missingok 24 | notifempty 25 | compress 26 | delaycompress 27 | create 0644 {{ log_user | default('root') }} {{ log_group | default('root') }} 28 | dateext 29 | dateformat -%Y%m%d 30 | sharedscripts 31 | postrotate 32 | systemctl reload rsyslog >/dev/null 2>&1 || true 33 | endscript 34 | } 35 | 36 | /var/log/php/db-errors.log { 37 | daily 38 | rotate {{ db_log_rotation_count | default(14) }} 39 | missingok 40 | notifempty 41 | compress 42 | delaycompress 43 | create 0644 {{ web_user | default('www-data') }} {{ web_group | default('www-data') }} 44 | dateext 45 | dateformat -%Y%m%d 46 | sharedscripts 47 | postrotate 48 | systemctl reload php-fpm >/dev/null 2>&1 || true 49 | systemctl reload apache2 >/dev/null 2>&1 || true 50 | endscript 51 | } 52 | 53 | /var/log/db-backup/* { 54 | daily 55 | rotate {{ db_log_rotation_count | default(14) }} 56 | missingok 57 | notifempty 58 | compress 59 | delaycompress 60 | create 0644 {{ log_user | default('root') }} {{ log_group | default('root') }} 61 | dateext 62 | dateformat -%Y%m%d 63 | } 64 | 65 | /var/log/db-monitor/* { 66 | daily 67 | rotate {{ db_log_rotation_count | default(14) }} 68 | missingok 69 | notifempty 70 | compress 71 | delaycompress 72 | create 0644 {{ log_user | default('root') }} {{ log_group | default('root') }} 73 | dateext 74 | dateformat -%Y%m%d 75 | } 76 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/roles/efs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # EFS role tasks 3 | # These tasks configure EFS mounting on the web servers 4 | 5 | - name: Install EFS utilities 6 | package: 7 | name: amazon-efs-utils 8 | state: present 9 | when: environment != 'localstack' 10 | 11 | - name: Create EFS mount point 12 | file: 13 | path: "{{ efs_mount_point }}" 14 | state: directory 15 | owner: apache 16 | group: apache 17 | mode: 0755 18 | when: environment != 'localstack' 19 | 20 | - name: Check if EFS is already mounted 21 | shell: mount | grep -q "{{ efs_mount_point }}" 22 | register: efs_mounted 23 | changed_when: false 24 | failed_when: false 25 | when: environment != 'localstack' 26 | 27 | - name: Add EFS mount to fstab 28 | lineinfile: 29 | path: /etc/fstab 30 | line: "{{ efs_id }}.efs.{{ aws_region }}.amazonaws.com:/ {{ efs_mount_point }} efs _netdev,tls,iam 0 0" 31 | state: present 32 | when: environment != 'localstack' 33 | 34 | - name: Mount EFS 35 | mount: 36 | path: "{{ efs_mount_point }}" 37 | src: "{{ efs_id }}.efs.{{ aws_region }}.amazonaws.com:/" 38 | fstype: efs 39 | opts: _netdev,tls,iam 40 | state: mounted 41 | when: environment != 'localstack' and efs_mounted.rc != 0 42 | 43 | - name: Set proper permissions for EFS mount point 44 | file: 45 | path: "{{ efs_mount_point }}" 46 | owner: apache 47 | group: apache 48 | mode: 0755 49 | state: directory 50 | when: environment != 'localstack' 51 | 52 | - name: Create shared directories on EFS 53 | file: 54 | path: "{{ efs_mount_point }}/{{ item }}" 55 | state: directory 56 | owner: apache 57 | group: apache 58 | mode: 0755 59 | loop: 60 | - uploads 61 | - cache 62 | - sessions 63 | - logs 64 | when: environment != 'localstack' 65 | 66 | - name: Create a test file on EFS 67 | copy: 68 | content: "This is a test file to verify EFS is working correctly." 69 | dest: "{{ efs_mount_point }}/efs-test.txt" 70 | owner: apache 71 | group: apache 72 | mode: 0644 73 | when: environment != 'localstack' 74 | 75 | - name: Create a dummy file for LocalStack testing 76 | file: 77 | path: /tmp/efs-role-applied 78 | state: touch 79 | mode: 0644 80 | when: environment == 'localstack' 81 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Common variables for all environments 3 | 4 | # Project information 5 | project_name: "lamp-stack" 6 | domain_name: "testerlab.com" 7 | 8 | # Environment (will be overridden by inventory) 9 | environment: "{{ lookup('env', 'DEPLOY_ENV') | default('localstack', true) }}" 10 | 11 | # AWS region 12 | aws_region: "us-east-1" 13 | 14 | # Common tags 15 | common_tags: 16 | Project: "LAMP Stack" 17 | Environment: "{{ environment }}" 18 | ManagedBy: "Ansible" 19 | Owner: "DevOps Team" 20 | 21 | # VPC configuration 22 | vpc_cidr: "10.0.0.0/16" 23 | public_subnet_1_cidr: "10.0.1.0/24" 24 | public_subnet_2_cidr: "10.0.2.0/24" 25 | private_subnet_1_cidr: "10.0.3.0/24" 26 | private_subnet_2_cidr: "10.0.4.0/24" 27 | private_subnet_3_cidr: "10.0.5.0/24" 28 | private_subnet_4_cidr: "10.0.6.0/24" 29 | 30 | # EC2 configuration 31 | ec2_instance_type: "t4g.small" 32 | ec2_ami_owner: "amazon" 33 | ec2_ami_name: "al2023-ami-*-arm64" 34 | ec2_key_name: "aws-key" 35 | ec2_min_instances: 2 36 | ec2_max_instances: 6 37 | ec2_desired_instances: 2 38 | 39 | # EFS configuration 40 | efs_performance_mode: "generalPurpose" 41 | efs_throughput_mode: "bursting" 42 | efs_mount_point: "/var/www/html/shared" 43 | 44 | # RDS configuration 45 | db_instance_class: "db.t4g.small" 46 | db_engine: "aurora-mysql" 47 | db_engine_version: "8.0" 48 | db_name: "lampdb" 49 | db_username: "admin" 50 | db_password: "{{ lookup('env', 'DB_PASSWORD') | default('ChangeMe123!', true) }}" 51 | db_port: 3306 52 | db_allocated_storage: 20 53 | db_backup_retention_period: 7 54 | db_multi_az: true 55 | 56 | # ALB configuration 57 | alb_http_port: 80 58 | alb_https_port: 443 59 | alb_healthcheck_path: "/health.php" 60 | alb_healthcheck_interval: 30 61 | alb_healthcheck_timeout: 5 62 | alb_healthcheck_healthy_threshold: 2 63 | alb_healthcheck_unhealthy_threshold: 2 64 | 65 | # PHP configuration 66 | php_version: "8.1" 67 | php_packages: 68 | - php 69 | - php-cli 70 | - php-fpm 71 | - php-mysqlnd 72 | - php-zip 73 | - php-devel 74 | - php-gd 75 | - php-mcrypt 76 | - php-mbstring 77 | - php-curl 78 | - php-xml 79 | - php-pear 80 | - php-bcmath 81 | - php-json 82 | 83 | # Apache configuration 84 | apache_server_admin: "admin@testerlab.com" 85 | apache_server_name: "{{ domain_name }}" 86 | apache_document_root: "/var/www/html" 87 | 88 | # Application configuration 89 | app_name: "lamp-app" 90 | app_git_repo: "" 91 | app_version: "main" 92 | app_deploy_dir: "/var/www/html" 93 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/web/templates/info.php.j2: -------------------------------------------------------------------------------- 1 | 403 Forbidden'; 20 | echo '

Access to this resource is restricted in production environments.

'; 21 | exit; 22 | } 23 | 24 | // Display a warning banner 25 | echo '
'; 26 | echo '

Security Warning

'; 27 | echo '

This page displays sensitive information about your PHP configuration. It should not be accessible in production environments.

'; 28 | echo '

Current environment: ' . htmlspecialchars($environment) . '

'; 29 | echo '

Client IP: ' . htmlspecialchars($client_ip) . '

'; 30 | echo '
'; 31 | 32 | // Display server information 33 | echo '

Server Information

'; 34 | echo ''; 40 | 41 | // Display AWS environment information if available 42 | echo '

AWS Environment

'; 43 | echo ''; 48 | 49 | // Display PHP information 50 | phpinfo(); 51 | -------------------------------------------------------------------------------- /cloudformation_template.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Example CloudFormation template for a simple EC2 instance", 4 | "Parameters": { 5 | "KeyName": { 6 | "Description": "Name of an existing EC2 KeyPair to enable SSH access", 7 | "Type": "AWS::EC2::KeyPair::KeyName", 8 | "ConstraintDescription": "must be the name of an existing EC2 KeyPair." 9 | }, 10 | "InstanceType": { 11 | "Description": "EC2 instance type", 12 | "Type": "String", 13 | "Default": "t2.micro", 14 | "AllowedValues": ["t2.micro", "t2.small", "t2.medium"], 15 | "ConstraintDescription": "must be a valid EC2 instance type." 16 | } 17 | }, 18 | "Resources": { 19 | "EC2Instance": { 20 | "Type": "AWS::EC2::Instance", 21 | "Properties": { 22 | "InstanceType": { "Ref": "InstanceType" }, 23 | "SecurityGroups": [{ "Ref": "InstanceSecurityGroup" }], 24 | "KeyName": { "Ref": "KeyName" }, 25 | "ImageId": "ami-0c55b159cbfafe1f0", 26 | "Tags": [ 27 | { "Key": "Name", "Value": "ExampleInstance" }, 28 | { "Key": "Environment", "Value": "Development" } 29 | ] 30 | } 31 | }, 32 | "InstanceSecurityGroup": { 33 | "Type": "AWS::EC2::SecurityGroup", 34 | "Properties": { 35 | "GroupDescription": "Enable SSH access via port 22", 36 | "SecurityGroupIngress": [ 37 | { 38 | "IpProtocol": "tcp", 39 | "FromPort": "22", 40 | "ToPort": "22", 41 | "CidrIp": "0.0.0.0/0" 42 | }, 43 | { 44 | "IpProtocol": "tcp", 45 | "FromPort": "80", 46 | "ToPort": "80", 47 | "CidrIp": "0.0.0.0/0" 48 | } 49 | ] 50 | } 51 | } 52 | }, 53 | "Outputs": { 54 | "InstanceId": { 55 | "Description": "InstanceId of the newly created EC2 instance", 56 | "Value": { "Ref": "EC2Instance" } 57 | }, 58 | "AZ": { 59 | "Description": "Availability Zone of the newly created EC2 instance", 60 | "Value": { "Fn::GetAtt": ["EC2Instance", "AvailabilityZone"] } 61 | }, 62 | "PublicDNS": { 63 | "Description": "Public DNSName of the newly created EC2 instance", 64 | "Value": { "Fn::GetAtt": ["EC2Instance", "PublicDnsName"] } 65 | }, 66 | "PublicIP": { 67 | "Description": "Public IP address of the newly created EC2 instance", 68 | "Value": { "Fn::GetAtt": ["EC2Instance", "PublicIp"] } 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/deploy_to_aws.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script to deploy the LAMP stack to AWS 3 | 4 | # Set environment variables 5 | export ENVIRONMENT=aws 6 | export AWS_REGION=${AWS_REGION:-us-east-1} 7 | 8 | # Check if AWS credentials are configured 9 | if [ -z "$AWS_ACCESS_KEY_ID" ] || [ -z "$AWS_SECRET_ACCESS_KEY" ]; then 10 | echo "AWS credentials not found. Please configure your AWS credentials." 11 | echo "You can set them as environment variables or configure the AWS CLI with 'aws configure'." 12 | exit 1 13 | fi 14 | 15 | # Create a directory for logs 16 | mkdir -p logs 17 | 18 | # Prompt for confirmation 19 | echo "This script will deploy a LAMP stack to AWS in region $AWS_REGION." 20 | echo "This will create real AWS resources that may incur costs." 21 | read -p "Are you sure you want to continue? (y/n) " -n 1 -r 22 | echo 23 | if [[ ! $REPLY =~ ^[Yy]$ ]]; then 24 | echo "Deployment cancelled." 25 | exit 0 26 | fi 27 | 28 | # Prompt for domain name 29 | read -p "Enter your domain name (leave empty to skip DNS setup): " DOMAIN_NAME 30 | if [ ! -z "$DOMAIN_NAME" ]; then 31 | # Update the domain name in the group vars 32 | sed -i "s/domain_name: .*/domain_name: $DOMAIN_NAME/" group_vars/aws.yml 33 | echo "Domain name set to $DOMAIN_NAME" 34 | fi 35 | 36 | # Prompt for SSH key name 37 | read -p "Enter your EC2 key pair name (must exist in AWS): " KEY_NAME 38 | if [ ! -z "$KEY_NAME" ]; then 39 | # Update the key name in the group vars 40 | sed -i "s/ec2_key_name: .*/ec2_key_name: $KEY_NAME/" group_vars/aws.yml 41 | echo "EC2 key name set to $KEY_NAME" 42 | else 43 | echo "No key name provided. Using default from group_vars/aws.yml." 44 | fi 45 | 46 | # Run the main playbook 47 | echo "Running LAMP stack deployment to AWS..." 48 | ansible-playbook playbooks/main.yml -v | tee logs/aws_deployment.log 49 | 50 | # Check the result 51 | if [ ${PIPESTATUS[0]} -eq 0 ]; then 52 | echo "Deployment completed successfully!" 53 | 54 | # Display the resources created 55 | echo "Resources created:" 56 | echo "===================" 57 | 58 | # Load the ALB info 59 | if [ -f .alb_info.yml ]; then 60 | ALB_DNS=$(grep alb_dns_name .alb_info.yml | cut -d' ' -f2) 61 | echo "Load Balancer DNS: $ALB_DNS" 62 | echo "You can access your application at: http://$ALB_DNS" 63 | 64 | if [ ! -z "$DOMAIN_NAME" ]; then 65 | echo "Once DNS propagates, you can also access it at: https://$DOMAIN_NAME" 66 | fi 67 | fi 68 | 69 | echo "===================" 70 | echo "To clean up all resources, run: ./demos/aws-lamp-stack/cleanup_aws.sh" 71 | else 72 | echo "Deployment failed. Check logs/aws_deployment.log for details." 73 | fi 74 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/db_client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # DB Client Role Default Variables 3 | 4 | # MySQL client configuration 5 | mysql_client_packages: 6 | - mysql-client 7 | - python3-mysqldb 8 | - python3-pymysql 9 | - libmysqlclient-dev 10 | - mysql-common 11 | 12 | mysql_client_utilities: 13 | - percona-toolkit 14 | - mytop 15 | - innotop 16 | - mysqltuner 17 | 18 | mysql_credentials_dir: /etc/mysql/credentials 19 | mysql_credentials_file: "{{ mysql_credentials_dir }}/db-credentials.cnf" 20 | mysql_port: 3306 21 | mysql_charset: utf8mb4 22 | mysql_collation: utf8mb4_unicode_ci 23 | 24 | # Database connection settings 25 | db_host: "{{ rds_endpoint | default('localhost') }}" 26 | db_name: "{{ rds_db_name | default('lamp_db') }}" 27 | db_user: "{{ rds_master_username | default('lamp_user') }}" 28 | db_password: "{{ rds_master_password | default('changeme') }}" 29 | db_persistent_connection: false 30 | db_connection_timeout: 10 31 | db_max_connections: 100 32 | db_new_link: false 33 | db_display_errors: false 34 | db_log_errors: true 35 | db_error_log_file: /var/log/php/db-errors.log 36 | 37 | # SSL configuration 38 | db_ssl_enabled: true 39 | db_ssl_verify: true 40 | mysql_ssl_ca_path: /etc/mysql/ssl/rds-ca-2019-root.pem 41 | 42 | # Backup settings 43 | db_backup_enabled: true 44 | db_backup_dir: /var/backups/mysql 45 | db_backup_user: root 46 | db_backup_group: root 47 | db_backup_retention: 30 48 | db_backup_bucket: "" 49 | db_backup_prefix: db-backups 50 | db_backup_hour: 3 51 | db_backup_minute: 0 52 | db_backup_sns_topic: "" 53 | 54 | # Monitoring settings 55 | db_monitoring_enabled: true 56 | db_alert_threshold_connections: 100 57 | db_alert_threshold_slow_queries: 10 58 | db_alert_threshold_replication_lag: 300 59 | db_monitor_sns_topic: "" 60 | db_alarm_sns_topic: "" 61 | 62 | # Performance settings 63 | db_query_cache_size: 20M 64 | db_query_cache_limit: 1M 65 | db_query_cache_type: 1 66 | db_query_cache_min_res_unit: 4K 67 | db_slow_query_log: true 68 | db_slow_query_log_file: /var/log/mysql/mysql-slow.log 69 | db_long_query_time: 2 70 | 71 | # CloudWatch settings 72 | cloudwatch_log_group: "{{ aws_resource_prefix | default('lamp') }}-logs" 73 | cloudwatch_log_retention_days: 30 74 | instance_id: "{{ ansible_ec2_instance_id | default(inventory_hostname) }}" 75 | 76 | # Web server settings 77 | web_user: www-data 78 | web_group: www-data 79 | web_root: /var/www/html 80 | app_include_path: /var/www/includes 81 | app_db_prefix: "" 82 | app_db_debug: false 83 | 84 | # Environment settings 85 | db_env_file: /etc/db-env 86 | environment: "{{ env | default('production') }}" 87 | aws_region: "{{ aws_region | default('us-east-1') }}" 88 | 89 | # Log settings 90 | log_user: root 91 | log_group: root 92 | logrotate_db_template: logrotate-db.j2 93 | -------------------------------------------------------------------------------- /localstack/run_sample_playbook.mjs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | // Script to run the sample playbook with LocalStack 4 | // This script demonstrates how to run Ansible playbooks with LocalStack 5 | 6 | import { execSync } from 'child_process'; 7 | import { fileURLToPath } from 'url'; 8 | import path from 'path'; 9 | import fs from 'fs'; 10 | 11 | // Get current file directory (equivalent to __dirname in CommonJS) 12 | const __filename = fileURLToPath(import.meta.url); 13 | const __dirname = path.dirname(__filename); 14 | 15 | // Helper function to execute shell commands 16 | function runCommand(command) { 17 | console.log(`Executing: ${command}`); 18 | try { 19 | const output = execSync(command, { encoding: 'utf8' }); 20 | console.log(output); 21 | return output; 22 | } catch (error) { 23 | console.error(`Error executing command: ${error.message}`); 24 | if (error.stderr) console.error(error.stderr); 25 | throw error; 26 | } 27 | } 28 | 29 | // Check if LocalStack is running 30 | function checkLocalStackRunning() { 31 | try { 32 | runCommand('awslocal s3 ls'); 33 | console.log("LocalStack is running!"); 34 | return true; 35 | } catch (error) { 36 | console.error("LocalStack is not running. Please start LocalStack with 'localstack start'."); 37 | return false; 38 | } 39 | } 40 | 41 | // Run the sample playbook 42 | async function runSamplePlaybook() { 43 | try { 44 | // Check if LocalStack is running 45 | console.log("Checking if LocalStack is running..."); 46 | if (!checkLocalStackRunning()) { 47 | return; 48 | } 49 | 50 | // Get paths to playbook and inventory 51 | const playbookPath = path.join(__dirname, 'sample_playbook.yml'); 52 | const inventoryPath = path.join(__dirname, 'inventory.ini'); 53 | 54 | // Verify files exist 55 | if (!fs.existsSync(playbookPath)) { 56 | console.error(`Playbook not found: ${playbookPath}`); 57 | return; 58 | } 59 | 60 | if (!fs.existsSync(inventoryPath)) { 61 | console.error(`Inventory not found: ${inventoryPath}`); 62 | return; 63 | } 64 | 65 | // Run the playbook 66 | console.log("\nRunning sample playbook with ansible-playbook..."); 67 | runCommand(`ansible-playbook ${playbookPath} -i ${inventoryPath}`); 68 | 69 | console.log("\nPlaybook execution completed successfully!"); 70 | console.log("\nThis demonstrates how to use Ansible with LocalStack for testing AWS operations locally."); 71 | console.log("You can use this approach to test your AWS playbooks before running them against real AWS infrastructure."); 72 | } catch (error) { 73 | console.error("Failed to run sample playbook:", error); 74 | } 75 | } 76 | 77 | // Run the sample playbook 78 | runSamplePlaybook(); 79 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/compute/variables.tf: -------------------------------------------------------------------------------- 1 | # Compute Module Variables 2 | 3 | variable "project_name" { 4 | description = "Name of the project" 5 | type = string 6 | } 7 | 8 | variable "environment" { 9 | description = "Environment (dev, staging, prod)" 10 | type = string 11 | } 12 | 13 | variable "vpc_id" { 14 | description = "ID of the VPC" 15 | type = string 16 | } 17 | 18 | variable "subnet_ids" { 19 | description = "List of subnet IDs for the Auto Scaling Group" 20 | type = list(string) 21 | } 22 | 23 | variable "security_group_id" { 24 | description = "ID of the security group for the EC2 instances" 25 | type = string 26 | } 27 | 28 | variable "iam_instance_profile" { 29 | description = "Name of the IAM instance profile for the EC2 instances" 30 | type = string 31 | } 32 | 33 | variable "instance_type" { 34 | description = "Instance type for the EC2 instances" 35 | type = string 36 | default = "t3.micro" 37 | } 38 | 39 | variable "key_name" { 40 | description = "Name of the key pair for SSH access" 41 | type = string 42 | default = "" 43 | } 44 | 45 | variable "min_size" { 46 | description = "Minimum size of the Auto Scaling Group" 47 | type = number 48 | default = 2 49 | } 50 | 51 | variable "max_size" { 52 | description = "Maximum size of the Auto Scaling Group" 53 | type = number 54 | default = 4 55 | } 56 | 57 | variable "desired_capacity" { 58 | description = "Desired capacity of the Auto Scaling Group" 59 | type = number 60 | default = 2 61 | } 62 | 63 | variable "target_group_arns" { 64 | description = "List of target group ARNs for the Auto Scaling Group" 65 | type = list(string) 66 | } 67 | 68 | variable "efs_id" { 69 | description = "ID of the EFS file system" 70 | type = string 71 | } 72 | 73 | variable "efs_mount_point" { 74 | description = "Mount point for the EFS file system" 75 | type = string 76 | default = "/var/www/html/shared" 77 | } 78 | 79 | variable "db_endpoint" { 80 | description = "Endpoint of the RDS instance" 81 | type = string 82 | } 83 | 84 | variable "db_name" { 85 | description = "Name of the database" 86 | type = string 87 | } 88 | 89 | variable "db_username" { 90 | description = "Username for the database" 91 | type = string 92 | } 93 | 94 | variable "db_password" { 95 | description = "Password for the database" 96 | type = string 97 | sensitive = true 98 | } 99 | 100 | variable "tags" { 101 | description = "A map of tags to add to all resources" 102 | type = map(string) 103 | default = {} 104 | } 105 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/dns/main.tf: -------------------------------------------------------------------------------- 1 | # DNS Module 2 | # Creates Route53 records for the ALB 3 | 4 | # Get the hosted zone 5 | data "aws_route53_zone" "main" { 6 | count = var.domain_name != "" ? 1 : 0 7 | name = var.domain_name 8 | } 9 | 10 | # Create A record for the ALB 11 | resource "aws_route53_record" "main" { 12 | count = var.domain_name != "" ? 1 : 0 13 | zone_id = data.aws_route53_zone.main[0].zone_id 14 | name = var.subdomain != "" ? "${var.subdomain}.${var.domain_name}" : var.domain_name 15 | type = "A" 16 | 17 | alias { 18 | name = var.alb_dns_name 19 | zone_id = var.alb_zone_id 20 | evaluate_target_health = true 21 | } 22 | } 23 | 24 | # Create CNAME record for www 25 | resource "aws_route53_record" "www" { 26 | count = var.domain_name != "" && var.create_www_record ? 1 : 0 27 | zone_id = data.aws_route53_zone.main[0].zone_id 28 | name = "www.${var.subdomain != "" ? "${var.subdomain}." : ""}${var.domain_name}" 29 | type = "CNAME" 30 | ttl = 300 31 | records = [var.subdomain != "" ? "${var.subdomain}.${var.domain_name}" : var.domain_name] 32 | } 33 | 34 | # Create ACM certificate 35 | resource "aws_acm_certificate" "main" { 36 | count = var.domain_name != "" && var.create_certificate ? 1 : 0 37 | domain_name = var.subdomain != "" ? "${var.subdomain}.${var.domain_name}" : var.domain_name 38 | subject_alternative_names = var.create_www_record ? ["www.${var.subdomain != "" ? "${var.subdomain}." : ""}${var.domain_name}"] : [] 39 | validation_method = "DNS" 40 | 41 | lifecycle { 42 | create_before_destroy = true 43 | } 44 | 45 | tags = merge( 46 | var.tags, 47 | { 48 | Name = "${var.project_name}-${var.environment}-certificate" 49 | } 50 | ) 51 | } 52 | 53 | # Create DNS validation records for the certificate 54 | resource "aws_route53_record" "cert_validation" { 55 | for_each = var.domain_name != "" && var.create_certificate ? { 56 | for dvo in aws_acm_certificate.main[0].domain_validation_options : dvo.domain_name => { 57 | name = dvo.resource_record_name 58 | record = dvo.resource_record_value 59 | type = dvo.resource_record_type 60 | } 61 | } : {} 62 | 63 | zone_id = data.aws_route53_zone.main[0].zone_id 64 | name = each.value.name 65 | type = each.value.type 66 | records = [each.value.record] 67 | ttl = 60 68 | } 69 | 70 | # Validate the certificate 71 | resource "aws_acm_certificate_validation" "main" { 72 | count = var.domain_name != "" && var.create_certificate ? 1 : 0 73 | certificate_arn = aws_acm_certificate.main[0].arn 74 | validation_record_fqdns = [for record in aws_route53_record.cert_validation : record.fqdn] 75 | } 76 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/roles/web/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Web server role tasks 3 | # These tasks configure Apache and PHP for the web servers 4 | 5 | - name: Install Apache and PHP packages 6 | package: 7 | name: 8 | - httpd 9 | - mod_ssl 10 | - "{{ php_packages }}" 11 | state: present 12 | when: environment != 'localstack' 13 | 14 | - name: Configure Apache 15 | template: 16 | src: httpd.conf.j2 17 | dest: /etc/httpd/conf/httpd.conf 18 | owner: root 19 | group: root 20 | mode: 0644 21 | notify: Restart Apache 22 | when: environment != 'localstack' 23 | 24 | - name: Configure Apache SSL 25 | template: 26 | src: ssl.conf.j2 27 | dest: /etc/httpd/conf.d/ssl.conf 28 | owner: root 29 | group: root 30 | mode: 0644 31 | notify: Restart Apache 32 | when: environment != 'localstack' 33 | 34 | - name: Configure PHP 35 | template: 36 | src: php.ini.j2 37 | dest: /etc/php.ini 38 | owner: root 39 | group: root 40 | mode: 0644 41 | notify: Restart Apache 42 | when: environment != 'localstack' 43 | 44 | - name: Create web document root 45 | file: 46 | path: "{{ apache_document_root }}" 47 | state: directory 48 | owner: apache 49 | group: apache 50 | mode: 0755 51 | when: environment != 'localstack' 52 | 53 | - name: Create index.php file 54 | template: 55 | src: index.php.j2 56 | dest: "{{ apache_document_root }}/index.php" 57 | owner: apache 58 | group: apache 59 | mode: 0644 60 | when: environment != 'localstack' 61 | 62 | - name: Create health check file 63 | template: 64 | src: health.php.j2 65 | dest: "{{ apache_document_root }}/health.php" 66 | owner: apache 67 | group: apache 68 | mode: 0644 69 | when: environment != 'localstack' 70 | 71 | - name: Create info.php file 72 | template: 73 | src: info.php.j2 74 | dest: "{{ apache_document_root }}/info.php" 75 | owner: apache 76 | group: apache 77 | mode: 0644 78 | when: environment != 'localstack' 79 | 80 | - name: Enable and start Apache 81 | service: 82 | name: httpd 83 | state: started 84 | enabled: yes 85 | when: environment != 'localstack' 86 | 87 | - name: Configure firewall for Apache 88 | firewalld: 89 | service: "{{ item }}" 90 | permanent: yes 91 | state: enabled 92 | loop: 93 | - http 94 | - https 95 | notify: Restart firewalld 96 | when: environment != 'localstack' 97 | 98 | - name: Deploy application if requested 99 | include_tasks: deploy_app.yml 100 | when: deploy_app | default(false) 101 | 102 | - name: Create a dummy file for LocalStack testing 103 | file: 104 | path: /tmp/web-role-applied 105 | state: touch 106 | mode: 0644 107 | when: environment == 'localstack' 108 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/common/templates/sshd_config.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # SSH server configuration for {{ inventory_hostname }} 3 | # This file is managed by Ansible - local changes will be overwritten 4 | 5 | # Basic SSH server configuration 6 | Port {{ ssh_port | default(22) }} 7 | Protocol 2 8 | HostKey /etc/ssh/ssh_host_rsa_key 9 | HostKey /etc/ssh/ssh_host_ecdsa_key 10 | HostKey /etc/ssh/ssh_host_ed25519_key 11 | 12 | # Ciphers and keying 13 | KexAlgorithms curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256 14 | Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr 15 | MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com 16 | 17 | # Logging 18 | SyslogFacility AUTH 19 | LogLevel VERBOSE 20 | 21 | # Authentication 22 | LoginGraceTime 30s 23 | PermitRootLogin {{ ssh_permit_root_login | default('no') }} 24 | StrictModes yes 25 | MaxAuthTries 3 26 | MaxSessions 10 27 | 28 | # Password authentication 29 | PasswordAuthentication {{ ssh_password_authentication | default('no') }} 30 | PermitEmptyPasswords no 31 | ChallengeResponseAuthentication no 32 | 33 | # Public key authentication 34 | PubkeyAuthentication yes 35 | AuthorizedKeysFile .ssh/authorized_keys 36 | 37 | # Other authentication methods 38 | HostbasedAuthentication no 39 | IgnoreRhosts yes 40 | IgnoreUserKnownHosts no 41 | 42 | # Forwarding 43 | AllowAgentForwarding yes 44 | AllowTcpForwarding yes 45 | GatewayPorts no 46 | X11Forwarding no 47 | X11DisplayOffset 10 48 | X11UseLocalhost yes 49 | PermitTTY yes 50 | PrintMotd no 51 | PrintLastLog yes 52 | TCPKeepAlive yes 53 | 54 | # Security 55 | UsePAM yes 56 | UseDNS no 57 | PermitUserEnvironment no 58 | Compression delayed 59 | ClientAliveInterval 300 60 | ClientAliveCountMax 3 61 | 62 | # SFTP configuration 63 | Subsystem sftp internal-sftp 64 | 65 | # Allow only specific users/groups 66 | {% if ssh_allowed_users is defined and ssh_allowed_users | length > 0 %} 67 | AllowUsers {{ ssh_allowed_users | join(' ') }} 68 | {% endif %} 69 | 70 | {% if ssh_allowed_groups is defined and ssh_allowed_groups | length > 0 %} 71 | AllowGroups {{ ssh_allowed_groups | join(' ') }} 72 | {% endif %} 73 | 74 | # Deny specific users/groups 75 | {% if ssh_denied_users is defined and ssh_denied_users | length > 0 %} 76 | DenyUsers {{ ssh_denied_users | join(' ') }} 77 | {% endif %} 78 | 79 | {% if ssh_denied_groups is defined and ssh_denied_groups | length > 0 %} 80 | DenyGroups {{ ssh_denied_groups | join(' ') }} 81 | {% endif %} 82 | 83 | # Match blocks for specific configurations 84 | {% if ssh_match_blocks is defined %} 85 | {% for match_block in ssh_match_blocks %} 86 | Match {{ match_block.criteria }} 87 | {% for option, value in match_block.options.items() %} 88 | {{ option }} {{ value }} 89 | {% endfor %} 90 | 91 | {% endfor %} 92 | {% endif %} 93 | 94 | # Additional custom SSH configuration 95 | {% if ssh_custom_config is defined %} 96 | {{ ssh_custom_config }} 97 | {% endif %} 98 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/db_client/templates/my.cnf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # MySQL client configuration for {{ inventory_hostname }} 3 | # This file is managed by Ansible - local changes will be overwritten 4 | 5 | [client] 6 | host = {{ db_host }} 7 | port = {{ mysql_port | default('3306') }} 8 | user = {{ db_user }} 9 | password = {{ db_password }} 10 | default-character-set = {{ mysql_charset | default('utf8mb4') }} 11 | 12 | {% if db_ssl_enabled | default(true) | bool %} 13 | # SSL Configuration 14 | ssl = ON 15 | ssl-verify-server-cert = {{ db_ssl_verify | default(true) | bool | lower }} 16 | ssl-ca = {{ mysql_ssl_ca_path }} 17 | {% endif %} 18 | 19 | # Connection settings 20 | connect_timeout = {{ db_connection_timeout | default(10) }} 21 | max_allowed_packet = 16M 22 | 23 | [mysql] 24 | # MySQL CLI client settings 25 | prompt = "{{ db_name }}> " 26 | no-auto-rehash 27 | show-warnings 28 | sigint-ignore 29 | auto-vertical-output = {{ (db_auto_vertical_output | default(false)) | bool | lower }} 30 | pager = "less -SFX" 31 | default-character-set = {{ mysql_charset | default('utf8mb4') }} 32 | 33 | # History settings 34 | histignore = "DROP*:CREATE*:ALTER*:TRUNCATE*:DELETE*" 35 | histignore-re = "^COMMIT$|^ROLLBACK$" 36 | select-limit = 1000 37 | 38 | {% if db_ssl_enabled | default(true) | bool %} 39 | # SSL Configuration 40 | ssl = ON 41 | ssl-verify-server-cert = {{ db_ssl_verify | default(true) | bool | lower }} 42 | ssl-ca = {{ mysql_ssl_ca_path }} 43 | {% endif %} 44 | 45 | [mysqldump] 46 | # Dump settings 47 | quick 48 | quote-names 49 | max_allowed_packet = 64M 50 | default-character-set = {{ mysql_charset | default('utf8mb4') }} 51 | 52 | {% if db_ssl_enabled | default(true) | bool %} 53 | # SSL Configuration 54 | ssl = ON 55 | ssl-verify-server-cert = {{ db_ssl_verify | default(true) | bool | lower }} 56 | ssl-ca = {{ mysql_ssl_ca_path }} 57 | {% endif %} 58 | 59 | # Additional options 60 | single-transaction 61 | skip-lock-tables 62 | add-drop-table 63 | add-locks 64 | create-options 65 | disable-keys 66 | extended-insert 67 | skip-add-locks 68 | skip-comments 69 | skip-dump-date 70 | 71 | [mysqlimport] 72 | # Import settings 73 | local 74 | default-character-set = {{ mysql_charset | default('utf8mb4') }} 75 | 76 | {% if db_ssl_enabled | default(true) | bool %} 77 | # SSL Configuration 78 | ssl = ON 79 | ssl-verify-server-cert = {{ db_ssl_verify | default(true) | bool | lower }} 80 | ssl-ca = {{ mysql_ssl_ca_path }} 81 | {% endif %} 82 | 83 | [mysqlcheck] 84 | # Check settings 85 | auto-repair = true 86 | check-upgrade = true 87 | optimize = true 88 | default-character-set = {{ mysql_charset | default('utf8mb4') }} 89 | 90 | {% if db_ssl_enabled | default(true) | bool %} 91 | # SSL Configuration 92 | ssl = ON 93 | ssl-verify-server-cert = {{ db_ssl_verify | default(true) | bool | lower }} 94 | ssl-ca = {{ mysql_ssl_ca_path }} 95 | {% endif %} 96 | 97 | [myisamchk] 98 | # MyISAM check settings 99 | key_buffer_size = 128M 100 | sort_buffer_size = 128M 101 | read_buffer = 2M 102 | write_buffer = 2M 103 | 104 | [mysqlhotcopy] 105 | interactive-timeout 106 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/db_client/templates/db-env.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # Database environment variables for {{ inventory_hostname }} 3 | # This file is managed by Ansible - local changes will be overwritten 4 | 5 | # Database connection settings 6 | DB_HOST="{{ db_host }}" 7 | DB_PORT="{{ mysql_port | default('3306') }}" 8 | DB_NAME="{{ db_name }}" 9 | DB_USER="{{ db_user }}" 10 | DB_PASSWORD="{{ db_password }}" 11 | DB_CHARSET="{{ mysql_charset | default('utf8mb4') }}" 12 | DB_COLLATION="{{ mysql_collation | default('utf8mb4_unicode_ci') }}" 13 | 14 | # SSL configuration 15 | DB_SSL_ENABLED="{{ db_ssl_enabled | default(true) | bool | lower }}" 16 | {% if db_ssl_enabled | default(true) | bool %} 17 | DB_SSL_CA="{{ mysql_ssl_ca_path }}" 18 | DB_SSL_VERIFY="{{ db_ssl_verify | default(true) | bool | lower }}" 19 | {% endif %} 20 | 21 | # Connection options 22 | DB_PERSISTENT="{{ db_persistent_connection | default(false) | bool | lower }}" 23 | DB_TIMEOUT="{{ db_connection_timeout | default(10) }}" 24 | DB_MAX_CONNECTIONS="{{ db_max_connections | default(100) }}" 25 | 26 | # Performance settings 27 | DB_QUERY_CACHE_SIZE="{{ db_query_cache_size | default('20M') }}" 28 | DB_QUERY_CACHE_LIMIT="{{ db_query_cache_limit | default('1M') }}" 29 | DB_QUERY_CACHE_TYPE="{{ db_query_cache_type | default('1') }}" 30 | DB_QUERY_CACHE_MIN_RES_UNIT="{{ db_query_cache_min_res_unit | default('4K') }}" 31 | 32 | # Logging settings 33 | DB_LOG_ERRORS="{{ db_log_errors | default(true) | bool | lower }}" 34 | DB_ERROR_LOG_FILE="{{ db_error_log_file | default('/var/log/php/db-errors.log') }}" 35 | DB_SLOW_QUERY_LOG="{{ db_slow_query_log | default(true) | bool | lower }}" 36 | DB_SLOW_QUERY_LOG_FILE="{{ db_slow_query_log_file | default('/var/log/mysql/mysql-slow.log') }}" 37 | DB_LONG_QUERY_TIME="{{ db_long_query_time | default(2) }}" 38 | 39 | # Backup settings 40 | DB_BACKUP_ENABLED="{{ db_backup_enabled | default(true) | bool | lower }}" 41 | DB_BACKUP_DIR="{{ db_backup_dir }}" 42 | DB_BACKUP_RETENTION="{{ db_backup_retention | default(30) }}" 43 | DB_BACKUP_BUCKET="{{ db_backup_bucket | default('') }}" 44 | DB_BACKUP_PREFIX="{{ db_backup_prefix | default('db-backups') }}" 45 | DB_BACKUP_HOUR="{{ db_backup_hour | default('3') }}" 46 | DB_BACKUP_MINUTE="{{ db_backup_minute | default('0') }}" 47 | 48 | # Monitoring settings 49 | DB_MONITORING_ENABLED="{{ db_monitoring_enabled | default(true) | bool | lower }}" 50 | DB_ALERT_THRESHOLD_CONNECTIONS="{{ db_alert_threshold_connections | default(100) }}" 51 | DB_ALERT_THRESHOLD_SLOW_QUERIES="{{ db_alert_threshold_slow_queries | default(10) }}" 52 | DB_ALERT_THRESHOLD_REPLICATION_LAG="{{ db_alert_threshold_replication_lag | default(300) }}" 53 | 54 | # AWS settings 55 | AWS_REGION="{{ aws_region }}" 56 | {% if db_backup_sns_topic is defined %} 57 | DB_BACKUP_SNS_TOPIC="{{ db_backup_sns_topic }}" 58 | {% endif %} 59 | {% if db_monitor_sns_topic is defined %} 60 | DB_MONITOR_SNS_TOPIC="{{ db_monitor_sns_topic }}" 61 | {% endif %} 62 | {% if db_alarm_sns_topic is defined %} 63 | DB_ALARM_SNS_TOPIC="{{ db_alarm_sns_topic }}" 64 | {% endif %} 65 | 66 | # Application settings 67 | APP_DB_PREFIX="{{ app_db_prefix | default('') }}" 68 | APP_DB_DEBUG="{{ app_db_debug | default(false) | bool | lower }}" 69 | APP_DB_DISPLAY_ERRORS="{{ db_display_errors | default(false) | bool | lower }}" 70 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/LOCALSTACK_COMPATIBILITY.md: -------------------------------------------------------------------------------- 1 | # LocalStack Compatibility Improvements 2 | 3 | This document outlines the changes made to improve compatibility with LocalStack for testing the LAMP stack deployment. 4 | 5 | ## Overview 6 | 7 | LocalStack is a cloud service emulator that runs in a single container on your laptop or in your CI environment. It provides an easy-to-use test/mocking framework for developing cloud applications. However, it has some limitations compared to the actual AWS services, especially for complex operations or services that are not fully implemented. 8 | 9 | ## Changes Made 10 | 11 | We've added error handling to several playbooks to ensure they can run successfully with LocalStack, even when certain AWS services are not fully implemented or behave differently than in the actual AWS environment. 12 | 13 | ### Main Playbook (`main.yml`) 14 | 15 | - Added `any_errors_fatal: false` to the imported playbooks to ensure that errors in individual playbooks don't cause the entire deployment to fail 16 | 17 | ### 1. EFS Playbook (`efs.yml`) 18 | 19 | - Used direct `awslocal` CLI commands for EFS operations in LocalStack mode instead of MCP tools 20 | - Added separate tasks for AWS and LocalStack environments 21 | - Replaced the "Wait for EFS mount targets to be available" task with a simulation task for LocalStack 22 | - Replaced the "Create EFS access point for web servers" task with a simulation task for LocalStack 23 | - This approach avoids the long wait times and potential failures when LocalStack doesn't properly implement the state transitions for EFS resources, while using the native LocalStack CLI commands that are known to work correctly 24 | 25 | ### 2. RDS Playbook (`rds.yml`) 26 | 27 | - Added `ignore_errors: "{{ environment == 'localstack' }}"` to the "Wait for DB cluster to be available" task 28 | 29 | ### 3. EC2 Playbook (`ec2.yml`) 30 | 31 | - Added `ignore_errors: "{{ environment == 'localstack' }}"` to the "Wait for instances to be running in ASG" task 32 | 33 | ### 4. Load Balancer Playbook (`loadbalancer.yml`) 34 | 35 | - Added `ignore_errors: "{{ environment == 'localstack' }}"` to the "Wait for ALB to be active" task 36 | 37 | ### 5. DNS and SSL Playbook (`dns_ssl.yml`) 38 | 39 | - Added `ignore_errors: "{{ environment == 'localstack' }}"` to the "Wait for certificate validation" task 40 | 41 | ## Testing 42 | 43 | These changes allow the playbooks to continue execution even when certain operations fail or timeout in LocalStack, which is expected for some services that are not fully implemented. The playbooks will still create the necessary resources and configurations for testing purposes. 44 | 45 | To test the LAMP stack deployment with LocalStack: 46 | 47 | 1. Ensure LocalStack is running: 48 | ``` 49 | docker run --rm -it -p 4566:4566 -p 4571:4571 localstack/localstack 50 | ``` 51 | 52 | 2. Run the test script: 53 | ``` 54 | ./test_with_localstack.sh 55 | ``` 56 | 57 | ## Notes 58 | 59 | - The `localstack_skip_long_operations` variable in `group_vars/localstack.yml` is used to skip certain long-running operations that are not necessary for testing. 60 | - The `ignore_errors` parameter is conditionally set to only ignore errors when running in the LocalStack environment, ensuring that errors are still caught when deploying to actual AWS. 61 | -------------------------------------------------------------------------------- /src/sysoperator/common/errors.ts: -------------------------------------------------------------------------------- 1 | import { ErrorCode } from '@modelcontextprotocol/sdk/types.js'; 2 | 3 | export class AnsibleError extends Error { 4 | constructor(message: string) { 5 | super(message); 6 | this.name = 'AnsibleError'; 7 | } 8 | } 9 | 10 | export class AnsibleExecutionError extends AnsibleError { 11 | readonly stderr?: string; 12 | 13 | constructor(message: string, stderr?: string) { 14 | super(message); 15 | this.name = 'AnsibleExecutionError'; 16 | this.stderr = stderr; 17 | } 18 | } 19 | 20 | export class AnsiblePlaybookNotFoundError extends AnsibleError { 21 | constructor(path: string) { 22 | super(`Playbook not found: ${path}`); 23 | this.name = 'AnsiblePlaybookNotFoundError'; 24 | } 25 | } 26 | 27 | export class AnsibleInventoryNotFoundError extends AnsibleError { 28 | constructor(path: string) { 29 | super(`Inventory not found: ${path}`); 30 | this.name = 'AnsibleInventoryNotFoundError'; 31 | } 32 | } 33 | 34 | export class AnsibleNotInstalledError extends AnsibleError { 35 | constructor() { 36 | super('Ansible is not installed or not found in PATH. Please install Ansible first.'); 37 | this.name = 'AnsibleNotInstalledError'; 38 | } 39 | } 40 | 41 | export class AwsCredentialsError extends AnsibleError { 42 | constructor(message = 'AWS credentials are not configured or are invalid') { 43 | super(message); 44 | this.name = 'AwsCredentialsError'; 45 | } 46 | } 47 | 48 | export class AwsCliNotInstalledError extends AnsibleError { 49 | constructor() { 50 | super('AWS CLI is not installed or not found in PATH. Please install AWS CLI first.'); 51 | this.name = 'AwsCliNotInstalledError'; 52 | } 53 | } 54 | 55 | export class AwsModuleNotFoundError extends AnsibleError { 56 | constructor(moduleName: string) { 57 | super(`AWS module not found: ${moduleName}. Make sure amazon.aws collection is installed.`); 58 | this.name = 'AwsModuleNotFoundError'; 59 | } 60 | } 61 | 62 | export class TerraformNotInstalledError extends AnsibleError { 63 | constructor() { 64 | super('Terraform is not installed or not found in PATH. Please install Terraform first.'); 65 | this.name = 'TerraformNotInstalledError'; 66 | } 67 | } 68 | 69 | export class TflocalNotInstalledError extends AnsibleError { 70 | constructor() { 71 | super('tflocal is not installed or not found in PATH. Please install tflocal first.'); 72 | this.name = 'TflocalNotInstalledError'; 73 | } 74 | } 75 | 76 | export function isAnsibleError(error: unknown): error is AnsibleError { 77 | return error instanceof AnsibleError; 78 | } 79 | 80 | export function formatAnsibleError(error: AnsibleError): string { 81 | let message = `Ansible Error: ${error.message}`; 82 | 83 | if (error instanceof AnsibleExecutionError && error.stderr) { 84 | message = `Execution Error: ${error.message}\nDetails: ${error.stderr}`; 85 | } else if (error instanceof AnsiblePlaybookNotFoundError) { 86 | message = `Playbook Not Found: ${error.message}`; 87 | } else if (error instanceof AnsibleInventoryNotFoundError) { 88 | message = `Inventory Not Found: ${error.message}`; 89 | } else if (error instanceof AnsibleNotInstalledError) { 90 | message = `Ansible Not Installed: ${error.message}`; 91 | } 92 | 93 | return message; 94 | } 95 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | # Inventory settings 3 | inventory = inventory/aws_ec2.yml 4 | inventory_plugins = ~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory 5 | 6 | # General settings 7 | remote_user = ec2-user 8 | host_key_checking = False 9 | retry_files_enabled = False 10 | roles_path = roles 11 | library = library 12 | module_utils = module_utils 13 | callback_whitelist = profile_tasks, timer 14 | stdout_callback = yaml 15 | bin_ansible_callbacks = True 16 | nocows = 1 17 | force_color = 1 18 | deprecation_warnings = False 19 | command_warnings = False 20 | system_warnings = False 21 | interpreter_python = auto_silent 22 | 23 | # Performance settings 24 | forks = 20 25 | gathering = smart 26 | fact_caching = jsonfile 27 | fact_caching_connection = /tmp/ansible_fact_cache 28 | fact_caching_timeout = 7200 29 | internal_poll_interval = 0.001 30 | callback_enabled = true 31 | 32 | # SSH settings 33 | timeout = 30 34 | ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o ServerAliveInterval=30 -o ServerAliveCountMax=10 35 | pipelining = True 36 | control_path = /tmp/ansible-ssh-%%h-%%p-%%r 37 | 38 | # Privilege escalation settings 39 | become = True 40 | become_method = sudo 41 | become_user = root 42 | become_ask_pass = False 43 | 44 | # Error handling 45 | any_errors_fatal = False 46 | max_fail_percentage = 0 47 | error_on_undefined_vars = True 48 | display_skipped_hosts = False 49 | display_args_to_stdout = False 50 | show_custom_stats = True 51 | 52 | # Logging settings 53 | log_path = /tmp/ansible.log 54 | verbosity = 0 55 | no_target_syslog = False 56 | 57 | # Jinja2 settings 58 | jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n 59 | 60 | # Vault settings 61 | vault_password_file = ~/.ansible/vault_password 62 | 63 | [inventory] 64 | # Inventory settings 65 | enable_plugins = aws_ec2, host_list, script, yaml, ini, auto 66 | unparsed_is_failed = True 67 | cache = True 68 | cache_plugin = jsonfile 69 | cache_connection = /tmp/ansible_inventory_cache 70 | cache_timeout = 3600 71 | 72 | [privilege_escalation] 73 | # Privilege escalation settings 74 | become = True 75 | become_method = sudo 76 | become_user = root 77 | become_ask_pass = False 78 | 79 | [ssh_connection] 80 | # SSH connection settings 81 | ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o ServerAliveInterval=30 -o ServerAliveCountMax=10 82 | control_path = /tmp/ansible-ssh-%%h-%%p-%%r 83 | pipelining = True 84 | retries = 3 85 | timeout = 30 86 | 87 | [persistent_connection] 88 | # Persistent connection settings 89 | connect_timeout = 30 90 | connect_retries = 3 91 | connect_interval = 1 92 | 93 | [accelerate] 94 | # Accelerate settings 95 | accelerate_port = 5099 96 | accelerate_timeout = 30 97 | accelerate_connect_timeout = 5.0 98 | 99 | [selinux] 100 | # SELinux settings 101 | special_context_filesystems = nfs,vboxsf,fuse,ramfs,9p,vfat 102 | 103 | [colors] 104 | # Color settings 105 | highlight = white 106 | verbose = blue 107 | warn = bright purple 108 | error = red 109 | debug = dark gray 110 | deprecate = purple 111 | skip = cyan 112 | unreachable = red 113 | ok = green 114 | changed = yellow 115 | diff_add = green 116 | diff_remove = red 117 | diff_lines = cyan 118 | 119 | [diff] 120 | # Diff settings 121 | always = False 122 | context = 3 123 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/terraform/modules/storage/main.tf: -------------------------------------------------------------------------------- 1 | # Storage Module 2 | # Creates EFS file system and mount targets 3 | 4 | # EFS File System 5 | resource "aws_efs_file_system" "main" { 6 | creation_token = "${var.project_name}-${var.environment}-efs" 7 | 8 | performance_mode = var.performance_mode 9 | throughput_mode = var.throughput_mode 10 | encrypted = true 11 | 12 | lifecycle_policy { 13 | transition_to_ia = "AFTER_30_DAYS" 14 | } 15 | 16 | tags = merge( 17 | var.tags, 18 | { 19 | Name = "${var.project_name}-${var.environment}-efs" 20 | } 21 | ) 22 | } 23 | 24 | # EFS Mount Targets (one per subnet) 25 | resource "aws_efs_mount_target" "main" { 26 | count = length(var.subnet_ids) 27 | file_system_id = aws_efs_file_system.main.id 28 | subnet_id = var.subnet_ids[count.index] 29 | security_groups = [var.security_group_id] 30 | } 31 | 32 | # EFS Access Point 33 | resource "aws_efs_access_point" "main" { 34 | file_system_id = aws_efs_file_system.main.id 35 | 36 | posix_user { 37 | gid = 48 # Apache user GID 38 | uid = 48 # Apache user UID 39 | } 40 | 41 | root_directory { 42 | path = "/var/www/html/shared" 43 | creation_info { 44 | owner_gid = 48 45 | owner_uid = 48 46 | permissions = "0755" 47 | } 48 | } 49 | 50 | tags = merge( 51 | var.tags, 52 | { 53 | Name = "${var.project_name}-${var.environment}-efs-ap" 54 | } 55 | ) 56 | } 57 | 58 | # EFS Backup Policy 59 | resource "aws_efs_backup_policy" "main" { 60 | file_system_id = aws_efs_file_system.main.id 61 | 62 | backup_policy { 63 | status = "ENABLED" 64 | } 65 | } 66 | 67 | # CloudWatch Alarms for EFS 68 | resource "aws_cloudwatch_metric_alarm" "efs_burst_credit_balance" { 69 | alarm_name = "${var.project_name}-${var.environment}-efs-burst-credit-balance" 70 | comparison_operator = "LessThanThreshold" 71 | evaluation_periods = 1 72 | metric_name = "BurstCreditBalance" 73 | namespace = "AWS/EFS" 74 | period = 300 75 | statistic = "Average" 76 | threshold = 1000000000 # 1GB of burst credits 77 | alarm_description = "This alarm monitors EFS burst credit balance" 78 | 79 | dimensions = { 80 | FileSystemId = aws_efs_file_system.main.id 81 | } 82 | 83 | alarm_actions = [] # Add SNS topic ARN here if needed 84 | ok_actions = [] # Add SNS topic ARN here if needed 85 | 86 | tags = var.tags 87 | } 88 | 89 | resource "aws_cloudwatch_metric_alarm" "efs_percent_io_limit" { 90 | alarm_name = "${var.project_name}-${var.environment}-efs-percent-io-limit" 91 | comparison_operator = "GreaterThanThreshold" 92 | evaluation_periods = 3 93 | metric_name = "PercentIOLimit" 94 | namespace = "AWS/EFS" 95 | period = 300 96 | statistic = "Average" 97 | threshold = 90 98 | alarm_description = "This alarm monitors EFS IO limit utilization" 99 | 100 | dimensions = { 101 | FileSystemId = aws_efs_file_system.main.id 102 | } 103 | 104 | alarm_actions = [] # Add SNS topic ARN here if needed 105 | ok_actions = [] # Add SNS topic ARN here if needed 106 | 107 | tags = var.tags 108 | } 109 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/common/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # NTP configuration for {{ inventory_hostname }} 3 | # This file is managed by Ansible - local changes will be overwritten 4 | 5 | {% if ansible_os_family == 'Debian' %} 6 | # NTP configuration for Debian/Ubuntu systems 7 | 8 | # Drift file to remember clock rate across restarts 9 | driftfile /var/lib/ntp/ntp.drift 10 | 11 | # Enable this if you want statistics to be logged 12 | #statsdir /var/log/ntpstats/ 13 | 14 | # Statistics logging configuration 15 | statistics loopstats peerstats clockstats 16 | filegen loopstats file loopstats type day enable 17 | filegen peerstats file peerstats type day enable 18 | filegen clockstats file clockstats type day enable 19 | 20 | # You should have at least 4 NTP servers 21 | pool 0.{{ ansible_distribution | lower }}.pool.ntp.org iburst 22 | pool 1.{{ ansible_distribution | lower }}.pool.ntp.org iburst 23 | pool 2.{{ ansible_distribution | lower }}.pool.ntp.org iburst 24 | pool 3.{{ ansible_distribution | lower }}.pool.ntp.org iburst 25 | 26 | # AWS Time Sync Service 27 | server 169.254.169.123 prefer iburst 28 | 29 | # By default, exchange time with everybody, but don't allow configuration 30 | restrict -4 default kod notrap nomodify nopeer noquery limited 31 | restrict -6 default kod notrap nomodify nopeer noquery limited 32 | 33 | # Local users may interrogate the ntp server more closely 34 | restrict 127.0.0.1 35 | restrict ::1 36 | 37 | # Needed for adding pool entries 38 | restrict source notrap nomodify noquery 39 | 40 | {% else %} 41 | # Chrony configuration for RedHat/CentOS systems 42 | 43 | # Record the rate at which the system clock gains/losses time 44 | driftfile /var/lib/chrony/drift 45 | 46 | # Allow the system clock to be stepped in the first three updates 47 | # if its offset is larger than 1 second 48 | makestep 1.0 3 49 | 50 | # Enable kernel synchronization of the real-time clock (RTC) 51 | rtcsync 52 | 53 | # Enable hardware timestamping on all interfaces that support it 54 | #hwtimestamp * 55 | 56 | # Increase the minimum number of selectable sources required to adjust 57 | # the system clock 58 | #minsources 2 59 | 60 | # AWS Time Sync Service 61 | server 169.254.169.123 prefer iburst 62 | 63 | # Use public servers from the pool.ntp.org project 64 | pool 0.{{ ansible_distribution | lower }}.pool.ntp.org iburst 65 | pool 1.{{ ansible_distribution | lower }}.pool.ntp.org iburst 66 | pool 2.{{ ansible_distribution | lower }}.pool.ntp.org iburst 67 | pool 3.{{ ansible_distribution | lower }}.pool.ntp.org iburst 68 | 69 | # Record the rate at which the system clock gains/losses time 70 | driftfile /var/lib/chrony/drift 71 | 72 | # Allow the system clock to be stepped in the first three updates 73 | # if its offset is larger than 1 second 74 | makestep 1.0 3 75 | 76 | # Enable kernel synchronization of the real-time clock (RTC) 77 | rtcsync 78 | 79 | # Allow NTP client access from local network 80 | #allow 192.168.0.0/16 81 | 82 | # Serve time even if not synchronized to a time source 83 | #local stratum 10 84 | 85 | # Specify file containing keys for NTP authentication 86 | keyfile /etc/chrony.keys 87 | 88 | # Specify directory for log files 89 | logdir /var/log/chrony 90 | 91 | # Select which information is logged 92 | #log measurements statistics tracking 93 | {% endif %} 94 | 95 | # Additional custom NTP configuration 96 | {% if ntp_custom_config is defined %} 97 | {{ ntp_custom_config }} 98 | {% endif %} 99 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/roles/web/tasks/deploy_app.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Deploy application tasks 3 | # These tasks deploy the application code to the web servers 4 | 5 | - name: Check if application git repository is provided 6 | debug: 7 | msg: "Application git repository is not provided. Using default demo application." 8 | when: app_git_repo == "" 9 | 10 | - name: Create application directory 11 | file: 12 | path: "{{ app_deploy_dir }}" 13 | state: directory 14 | owner: apache 15 | group: apache 16 | mode: 0755 17 | when: environment != 'localstack' 18 | 19 | - name: Clone application repository if provided 20 | git: 21 | repo: "{{ app_git_repo }}" 22 | dest: "{{ app_deploy_dir }}" 23 | version: "{{ app_version }}" 24 | force: yes 25 | become: yes 26 | become_user: apache 27 | when: app_git_repo != "" and environment != 'localstack' 28 | 29 | - name: Create demo application if no git repo provided 30 | block: 31 | - name: Create demo index.php 32 | template: 33 | src: demo/index.php.j2 34 | dest: "{{ app_deploy_dir }}/index.php" 35 | owner: apache 36 | group: apache 37 | mode: 0644 38 | 39 | - name: Create demo CSS file 40 | template: 41 | src: demo/style.css.j2 42 | dest: "{{ app_deploy_dir }}/style.css" 43 | owner: apache 44 | group: apache 45 | mode: 0644 46 | 47 | - name: Create demo JavaScript file 48 | template: 49 | src: demo/script.js.j2 50 | dest: "{{ app_deploy_dir }}/script.js" 51 | owner: apache 52 | group: apache 53 | mode: 0644 54 | 55 | - name: Create demo database config file 56 | template: 57 | src: demo/config.php.j2 58 | dest: "{{ app_deploy_dir }}/config.php" 59 | owner: apache 60 | group: apache 61 | mode: 0644 62 | 63 | - name: Create demo database connection file 64 | template: 65 | src: demo/db.php.j2 66 | dest: "{{ app_deploy_dir }}/db.php" 67 | owner: apache 68 | group: apache 69 | mode: 0644 70 | 71 | - name: Create demo API file 72 | template: 73 | src: demo/api.php.j2 74 | dest: "{{ app_deploy_dir }}/api.php" 75 | owner: apache 76 | group: apache 77 | mode: 0644 78 | 79 | - name: Create demo health check file 80 | template: 81 | src: demo/health.php.j2 82 | dest: "{{ app_deploy_dir }}/health.php" 83 | owner: apache 84 | group: apache 85 | mode: 0644 86 | 87 | - name: Create demo images directory 88 | file: 89 | path: "{{ app_deploy_dir }}/images" 90 | state: directory 91 | owner: apache 92 | group: apache 93 | mode: 0755 94 | 95 | - name: Create demo favicon 96 | template: 97 | src: demo/favicon.ico.j2 98 | dest: "{{ app_deploy_dir }}/favicon.ico" 99 | owner: apache 100 | group: apache 101 | mode: 0644 102 | when: app_git_repo == "" and environment != 'localstack' 103 | 104 | - name: Set proper permissions for application files 105 | file: 106 | path: "{{ app_deploy_dir }}" 107 | owner: apache 108 | group: apache 109 | mode: 0755 110 | recurse: yes 111 | when: environment != 'localstack' 112 | 113 | - name: Create a dummy file for LocalStack testing 114 | file: 115 | path: /tmp/app-deployed 116 | state: touch 117 | mode: 0644 118 | when: environment == 'localstack' 119 | -------------------------------------------------------------------------------- /localstack/localstack_test.js: -------------------------------------------------------------------------------- 1 | // LocalStack Test Script for MCP Ansible Server 2 | // This script demonstrates how to use the MCP Ansible server with LocalStack 3 | 4 | const { execSync } = require('child_process'); 5 | const fs = require('fs'); 6 | const path = require('path'); 7 | const os = require('os'); 8 | 9 | // Helper function to execute shell commands 10 | function runCommand(command) { 11 | console.log(`Executing: ${command}`); 12 | try { 13 | const output = execSync(command, { encoding: 'utf8' }); 14 | console.log(output); 15 | return output; 16 | } catch (error) { 17 | console.error(`Error executing command: ${error.message}`); 18 | console.error(error.stderr); 19 | throw error; 20 | } 21 | } 22 | 23 | // Create a temporary directory for our test files 24 | function createTempDir() { 25 | const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ansible-localstack-')); 26 | console.log(`Created temporary directory: ${tempDir}`); 27 | return tempDir; 28 | } 29 | 30 | // Create a playbook that uses awslocal instead of aws 31 | function createLocalStackPlaybook(tempDir) { 32 | const playbookContent = `--- 33 | # Example Ansible playbook for LocalStack 34 | - name: LocalStack S3 Operations 35 | hosts: localhost 36 | connection: local 37 | gather_facts: false 38 | tasks: 39 | - name: List S3 buckets using awslocal 40 | shell: awslocal s3 ls 41 | register: s3_buckets 42 | 43 | - name: Display S3 buckets 44 | debug: 45 | var: s3_buckets.stdout_lines 46 | 47 | - name: Create a new S3 bucket 48 | shell: awslocal s3 mb s3://ansible-test-bucket 49 | register: create_bucket 50 | 51 | - name: Display bucket creation result 52 | debug: 53 | var: create_bucket 54 | 55 | - name: Upload a file to the bucket 56 | shell: echo "Hello from Ansible MCP" > /tmp/test.txt && awslocal s3 cp /tmp/test.txt s3://ansible-test-bucket/test.txt 57 | register: upload_file 58 | 59 | - name: Display upload result 60 | debug: 61 | var: upload_file 62 | 63 | - name: List objects in the bucket 64 | shell: awslocal s3 ls s3://ansible-test-bucket 65 | register: list_objects 66 | 67 | - name: Display objects 68 | debug: 69 | var: list_objects.stdout_lines 70 | `; 71 | 72 | const playbookPath = path.join(tempDir, 'localstack_playbook.yml'); 73 | fs.writeFileSync(playbookPath, playbookContent); 74 | console.log(`Created playbook at: ${playbookPath}`); 75 | return playbookPath; 76 | } 77 | 78 | // Main function to run the test 79 | async function runTest() { 80 | try { 81 | // Check if LocalStack is running 82 | console.log("Checking if LocalStack is running..."); 83 | runCommand('awslocal s3 ls'); 84 | console.log("LocalStack is running!"); 85 | 86 | // Create temporary directory and playbook 87 | const tempDir = createTempDir(); 88 | const playbookPath = createLocalStackPlaybook(tempDir); 89 | 90 | // Run the playbook using ansible-playbook directly 91 | console.log("\nRunning playbook with ansible-playbook..."); 92 | runCommand(`ansible-playbook ${playbookPath}`); 93 | 94 | // Clean up 95 | console.log("\nCleaning up..."); 96 | fs.rmSync(tempDir, { recursive: true, force: true }); 97 | console.log(`Removed temporary directory: ${tempDir}`); 98 | 99 | console.log("\nTest completed successfully!"); 100 | } catch (error) { 101 | console.error("Test failed:", error); 102 | } 103 | } 104 | 105 | // Run the test 106 | runTest(); 107 | -------------------------------------------------------------------------------- /docker-build-run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script to build and run the SysOperator MCP server Docker container 4 | 5 | # Function to display usage information 6 | function show_usage { 7 | echo "Usage: $0 [options]" 8 | echo "Options:" 9 | echo " --build Build the Docker image" 10 | echo " --run Run the Docker container" 11 | echo " --playbooks DIR Mount the specified directory as /playbooks in the container" 12 | echo " --aws Mount local AWS credentials to the container" 13 | echo " --terraform DIR Mount the specified directory as /terraform in the container" 14 | echo " --help Display this help message" 15 | echo "" 16 | echo "Examples:" 17 | echo " $0 --build # Build the Docker image" 18 | echo " $0 --run # Run the Docker container" 19 | echo " $0 --build --run # Build and run the Docker container" 20 | echo " $0 --run --playbooks ./playbooks --aws # Run with playbooks and AWS credentials" 21 | } 22 | 23 | # Default values 24 | BUILD=false 25 | RUN=false 26 | PLAYBOOKS_DIR="" 27 | AWS_CREDS=false 28 | TERRAFORM_DIR="" 29 | 30 | # Parse command line arguments 31 | while [[ $# -gt 0 ]]; do 32 | case "$1" in 33 | --build) 34 | BUILD=true 35 | shift 36 | ;; 37 | --run) 38 | RUN=true 39 | shift 40 | ;; 41 | --playbooks) 42 | PLAYBOOKS_DIR="$2" 43 | shift 2 44 | ;; 45 | --aws) 46 | AWS_CREDS=true 47 | shift 48 | ;; 49 | --terraform) 50 | TERRAFORM_DIR="$2" 51 | shift 2 52 | ;; 53 | --help) 54 | show_usage 55 | exit 0 56 | ;; 57 | *) 58 | echo "Unknown option: $1" 59 | show_usage 60 | exit 1 61 | ;; 62 | esac 63 | done 64 | 65 | # Check if at least one action is specified 66 | if [[ "$BUILD" == "false" && "$RUN" == "false" ]]; then 67 | echo "Error: No action specified. Use --build, --run, or both." 68 | show_usage 69 | exit 1 70 | fi 71 | 72 | # Build the Docker image if requested 73 | if [[ "$BUILD" == "true" ]]; then 74 | echo "Building Docker image..." 75 | docker build -t sysoperator-mcp . 76 | 77 | if [[ $? -ne 0 ]]; then 78 | echo "Error: Docker build failed." 79 | exit 1 80 | fi 81 | 82 | echo "Docker image built successfully." 83 | fi 84 | 85 | # Run the Docker container if requested 86 | if [[ "$RUN" == "true" ]]; then 87 | # Prepare the Docker run command 88 | CMD="docker run -i" 89 | 90 | # Add volume mounts if specified 91 | if [[ -n "$PLAYBOOKS_DIR" ]]; then 92 | if [[ ! -d "$PLAYBOOKS_DIR" ]]; then 93 | echo "Error: Playbooks directory '$PLAYBOOKS_DIR' does not exist." 94 | exit 1 95 | fi 96 | CMD="$CMD -v $(realpath $PLAYBOOKS_DIR):/playbooks" 97 | fi 98 | 99 | # Add AWS credentials if requested 100 | if [[ "$AWS_CREDS" == "true" ]]; then 101 | if [[ ! -d "$HOME/.aws" ]]; then 102 | echo "Warning: AWS credentials directory '$HOME/.aws' does not exist." 103 | else 104 | CMD="$CMD -v $HOME/.aws:/root/.aws" 105 | fi 106 | fi 107 | 108 | # Add Terraform directory if specified 109 | if [[ -n "$TERRAFORM_DIR" ]]; then 110 | if [[ ! -d "$TERRAFORM_DIR" ]]; then 111 | echo "Error: Terraform directory '$TERRAFORM_DIR' does not exist." 112 | exit 1 113 | fi 114 | CMD="$CMD -v $(realpath $TERRAFORM_DIR):/terraform" 115 | fi 116 | 117 | # Add the image name to the command 118 | CMD="$CMD sysoperator-mcp" 119 | 120 | echo "Running Docker container..." 121 | echo "Command: $CMD" 122 | 123 | # Execute the Docker run command 124 | eval $CMD 125 | fi 126 | -------------------------------------------------------------------------------- /localstack/sample_playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Sample Ansible playbook for LocalStack 3 | # This playbook demonstrates how to use LocalStack with Ansible 4 | 5 | - name: LocalStack AWS Operations 6 | hosts: localhost 7 | connection: local 8 | gather_facts: false 9 | tasks: 10 | - name: Check if LocalStack is running 11 | shell: awslocal s3 ls 12 | register: localstack_check 13 | ignore_errors: true 14 | 15 | - name: Fail if LocalStack is not running 16 | fail: 17 | msg: "LocalStack is not running. Please start LocalStack with 'localstack start'." 18 | when: localstack_check.rc != 0 19 | 20 | - name: List S3 buckets 21 | shell: awslocal s3 ls 22 | register: s3_buckets 23 | 24 | - name: Display S3 buckets 25 | debug: 26 | var: s3_buckets.stdout_lines 27 | 28 | - name: Create a new S3 bucket 29 | shell: awslocal s3 mb s3://ansible-localstack-bucket 30 | register: create_bucket 31 | ignore_errors: yes 32 | 33 | - name: Display bucket creation result 34 | debug: 35 | var: create_bucket 36 | 37 | - name: Create a test file 38 | copy: 39 | dest: /tmp/localstack-test.txt 40 | content: | 41 | This is a test file for LocalStack S3 upload. 42 | Created by Ansible playbook. 43 | 44 | - name: Upload file to S3 45 | shell: awslocal s3 cp /tmp/localstack-test.txt s3://ansible-localstack-bucket/test.txt 46 | register: upload_file 47 | 48 | - name: Display upload result 49 | debug: 50 | var: upload_file 51 | 52 | - name: List objects in the bucket 53 | shell: awslocal s3 ls s3://ansible-localstack-bucket 54 | register: list_objects 55 | 56 | - name: Display objects 57 | debug: 58 | var: list_objects.stdout_lines 59 | 60 | - name: Create CloudFormation stack 61 | block: 62 | - name: Create CloudFormation template file 63 | copy: 64 | dest: /tmp/cloudformation-template.json 65 | content: | 66 | { 67 | "Resources": { 68 | "MyBucket": { 69 | "Type": "AWS::S3::Bucket", 70 | "Properties": { 71 | "BucketName": "cf-created-bucket" 72 | } 73 | } 74 | } 75 | } 76 | 77 | - name: Deploy CloudFormation stack 78 | shell: awslocal cloudformation create-stack --stack-name ansible-test-stack --template-body file:///tmp/cloudformation-template.json 79 | register: cf_result 80 | 81 | - name: Display CloudFormation result 82 | debug: 83 | var: cf_result 84 | rescue: 85 | - name: Display CloudFormation error 86 | debug: 87 | msg: "CloudFormation operation failed. This might be because the stack already exists or CloudFormation is not fully supported in your LocalStack setup." 88 | 89 | - name: List EC2 instances 90 | shell: awslocal ec2 describe-instances 91 | register: ec2_instances 92 | 93 | - name: Display EC2 instances 94 | debug: 95 | var: ec2_instances.stdout 96 | 97 | - name: Clean up 98 | block: 99 | - name: Remove test file 100 | file: 101 | path: /tmp/localstack-test.txt 102 | state: absent 103 | 104 | - name: Remove CloudFormation template 105 | file: 106 | path: /tmp/cloudformation-template.json 107 | state: absent 108 | ignore_errors: yes 109 | -------------------------------------------------------------------------------- /localstack/README.md: -------------------------------------------------------------------------------- 1 | # LocalStack Integration for MCP SysOperator Server 2 | 3 | This directory contains scripts and utilities for integrating the MCP SysOperator server with LocalStack, allowing you to test AWS operations locally without real AWS credentials. 4 | 5 | ## Overview 6 | 7 | LocalStack is a cloud service emulator that runs in a single container on your laptop or in your CI environment. It allows you to run AWS applications or Lambda functions without connecting to a remote AWS cloud. 8 | 9 | This integration enables you to: 10 | 11 | 1. Test IaC that use AWS services locally 12 | 2. Develop and test AWS operations without incurring AWS costs 13 | 3. Run tests without requiring real AWS credentials 14 | 4. Validate your infrastructure code before deploying to real AWS 15 | 16 | ## Prerequisites 17 | 18 | - Node.js 18 or higher 19 | - npm or yarn 20 | - Ansible installed and in PATH 21 | - LocalStack installed and running 22 | - awslocal CLI installed 23 | 24 | ### Installing LocalStack 25 | 26 | ```bash 27 | # Install LocalStack 28 | pip install localstack 29 | 30 | # Install awslocal CLI 31 | pip install awscli-local 32 | 33 | # Start LocalStack 34 | localstack start 35 | ``` 36 | 37 | ## Files 38 | 39 | - **localstack_test.mjs**: Basic test script for running Ansible playbooks with LocalStack 40 | - **localstack_aws_operations.mjs**: Utility library for AWS operations using LocalStack 41 | - **test_mcp_with_localstack.mjs**: Comprehensive test script for MCP Ansible server with LocalStack 42 | - **mcp_localstack_patch.js**: Documentation for modifying the MCP Ansible server to use LocalStack 43 | 44 | ## Usage 45 | 46 | ### Running the Test Scripts 47 | 48 | ```bash 49 | # Run the basic test script 50 | node localstack/localstack_test.mjs 51 | 52 | # Run the AWS operations utility 53 | node localstack/localstack_aws_operations.mjs 54 | 55 | # Run the comprehensive test script 56 | node localstack/test_mcp_with_localstack.mjs 57 | ``` 58 | 59 | ### Modifying the MCP SysOperator Server for LocalStack 60 | 61 | To modify the MCP Ansible server to use LocalStack instead of real AWS: 62 | 63 | 1. Edit `src/ansible-mcp-server/common/utils.ts`: 64 | - Replace `aws --version` with `awslocal --version` in `checkAwsCliInstalled()` 65 | - Replace `aws sts get-caller-identity` with `awslocal sts get-caller-identity` in `checkAwsCredentials()` 66 | 67 | 2. Rebuild the server: 68 | ```bash 69 | npm run build 70 | ``` 71 | 72 | ## Creating Ansible Playbooks for LocalStack 73 | 74 | When creating Ansible playbooks for LocalStack, use shell commands with `awslocal` instead of AWS modules: 75 | 76 | ```yaml 77 | - name: List S3 buckets 78 | shell: awslocal s3 ls 79 | register: s3_buckets 80 | 81 | - name: Display buckets 82 | debug: 83 | var: s3_buckets.stdout_lines 84 | ``` 85 | 86 | ## Supported AWS Services 87 | 88 | The following AWS services have been tested with this integration: 89 | 90 | - S3: Create buckets, upload files, list objects 91 | - CloudFormation: Create stacks, deploy templates 92 | - EC2: List instances (creating instances requires AMI setup in LocalStack) 93 | 94 | ## Implementation Strategy 95 | 96 | For a complete integration of the MCP SysOperator server with LocalStack: 97 | 98 | 1. Create a fork of the MCP SysOperator server repository 99 | 2. Modify the utils.ts file to use awslocal instead of aws 100 | 3. Modify the aws.ts file to use shell commands with awslocal instead of AWS modules 101 | 4. Add a flag or environment variable to toggle between real AWS and LocalStack 102 | 5. Rebuild the server and test with LocalStack 103 | 104 | This approach allows you to use the MCP SysOperator server with LocalStack for testing without affecting the ability to use it with real AWS when needed. 105 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/test_with_localstack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script to test the LAMP stack deployment with LocalStack 3 | 4 | # Set environment variables 5 | export ENVIRONMENT=localstack 6 | export AWS_REGION=us-east-1 7 | export AWS_ACCESS_KEY_ID=test 8 | export AWS_SECRET_ACCESS_KEY=test 9 | 10 | # Check if LocalStack is running 11 | if ! curl -s http://localhost:4566/_localstack/health | grep -q "running"; then 12 | echo "LocalStack is not running. Please start LocalStack first." 13 | echo "You can start LocalStack with: docker run --rm -it -p 4566:4566 -p 4571:4571 localstack/localstack" 14 | exit 1 15 | fi 16 | 17 | # Create a directory for logs 18 | mkdir -p logs 19 | 20 | # Run the main playbook 21 | echo "Running LAMP stack deployment with LocalStack..." 22 | ansible-playbook playbooks/main.yml -v | tee logs/deployment.log 23 | 24 | # Check the result 25 | if [ ${PIPESTATUS[0]} -eq 0 ]; then 26 | echo "Deployment completed successfully!" 27 | 28 | # Display the resources created 29 | echo "Resources created:" 30 | echo "===================" 31 | 32 | # VPC 33 | echo "VPC:" 34 | aws --endpoint-url=http://localhost:4566 ec2 describe-vpcs --query "Vpcs[?Tags[?Key=='Name' && Value=='*lamp*']].{VpcId:VpcId,CidrBlock:CidrBlock,Name:Tags[?Key=='Name'].Value|[0]}" --output table 35 | 36 | # Subnets 37 | echo "Subnets:" 38 | aws --endpoint-url=http://localhost:4566 ec2 describe-subnets --query "Subnets[?Tags[?Key=='Name' && Value=='*lamp*']].{SubnetId:SubnetId,CidrBlock:CidrBlock,AvailabilityZone:AvailabilityZone,Name:Tags[?Key=='Name'].Value|[0]}" --output table 39 | 40 | # Security Groups 41 | echo "Security Groups:" 42 | aws --endpoint-url=http://localhost:4566 ec2 describe-security-groups --query "SecurityGroups[?GroupName!='default' && Tags[?Key=='Name' && Value=='*lamp*']].{GroupId:GroupId,GroupName:GroupName,Description:Description,Name:Tags[?Key=='Name'].Value|[0]}" --output table 43 | 44 | # EC2 Instances 45 | echo "EC2 Instances:" 46 | aws --endpoint-url=http://localhost:4566 ec2 describe-instances --query "Reservations[].Instances[?Tags[?Key=='Name' && Value=='*lamp*']].{InstanceId:InstanceId,InstanceType:InstanceType,State:State.Name,PrivateIp:PrivateIpAddress,Name:Tags[?Key=='Name'].Value|[0]}" --output table 47 | 48 | # RDS 49 | echo "RDS Clusters:" 50 | aws --endpoint-url=http://localhost:4566 rds describe-db-clusters --query "DBClusters[?DBClusterIdentifier=='*lamp*'].{ClusterId:DBClusterIdentifier,Engine:Engine,Status:Status,Endpoint:Endpoint}" --output table 51 | 52 | # EFS 53 | echo "EFS File Systems:" 54 | aws --endpoint-url=http://localhost:4566 efs describe-file-systems --query "FileSystems[?Tags[?Key=='Name' && Value=='*lamp*']].{FileSystemId:FileSystemId,LifeCycleState:LifeCycleState,Name:Tags[?Key=='Name'].Value|[0]}" --output table 55 | 56 | # Load Balancers 57 | echo "Load Balancers:" 58 | aws --endpoint-url=http://localhost:4566 elbv2 describe-load-balancers --query "LoadBalancers[?LoadBalancerName=='*lamp*'].{LoadBalancerName:LoadBalancerName,DNSName:DNSName,State:State.Code,Type:Type}" --output table 59 | 60 | echo "===================" 61 | echo "You can now test the deployment by accessing the load balancer DNS name." 62 | echo "Note: In LocalStack, the DNS name is not resolvable, but you can test the API endpoints directly." 63 | 64 | # Run a cleanup if requested 65 | read -p "Do you want to clean up the resources? (y/n) " -n 1 -r 66 | echo 67 | if [[ $REPLY =~ ^[Yy]$ ]]; then 68 | echo "Cleaning up resources..." 69 | ansible-playbook playbooks/cleanup.yml -v | tee logs/cleanup.log 70 | 71 | if [ ${PIPESTATUS[0]} -eq 0 ]; then 72 | echo "Cleanup completed successfully!" 73 | else 74 | echo "Cleanup failed. Check logs/cleanup.log for details." 75 | fi 76 | fi 77 | else 78 | echo "Deployment failed. Check logs/deployment.log for details." 79 | fi 80 | -------------------------------------------------------------------------------- /examples/example-script.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * This is an example script demonstrating how a client might interact 5 | * with the Ansible MCP Server directly. 6 | * 7 | * Prerequisites: 8 | * - Node.js installed 9 | * - MCP Ansible server configured 10 | * - Ansible installed 11 | */ 12 | 13 | import { McpClient } from '@modelcontextprotocol/sdk/client/index.js'; 14 | import { SubprocessClientTransport } from '@modelcontextprotocol/sdk/client/subprocess.js'; 15 | 16 | async function main() { 17 | // 1. Create client transport to communicate with the MCP server 18 | const transport = new SubprocessClientTransport({ 19 | command: 'node', 20 | args: ['/path/to/mcp-ansible/build/index.js'], 21 | env: {} 22 | }); 23 | 24 | // 2. Create MCP client 25 | const client = new McpClient(); 26 | console.log('Connecting to Ansible MCP server...'); 27 | await client.connect(transport); 28 | 29 | try { 30 | // 3. List available tools 31 | console.log('\n=== Available tools ==='); 32 | const toolsResponse = await client.listTools(); 33 | console.log(`Found ${toolsResponse.tools.length} tools:`); 34 | for (const tool of toolsResponse.tools) { 35 | console.log(`- ${tool.name}: ${tool.description}`); 36 | } 37 | 38 | // 4. Check if the default inventory resource is available 39 | console.log('\n=== Available resources ==='); 40 | const resourcesResponse = await client.listResources(); 41 | console.log(`Found ${resourcesResponse.resources.length} resources:`); 42 | for (const resource of resourcesResponse.resources) { 43 | console.log(`- ${resource.uri}: ${resource.name}`); 44 | } 45 | 46 | // 5. List inventory using the list_inventory tool 47 | console.log('\n=== Listing inventory ==='); 48 | try { 49 | const inventoryResult = await client.callTool('list_inventory', { 50 | inventory: './examples/inventory.ini' 51 | }); 52 | console.log(inventoryResult.content[0].text); 53 | } catch (error) { 54 | console.error('Error listing inventory:', error.message); 55 | } 56 | 57 | // 6. Check syntax of the example playbook 58 | console.log('\n=== Checking playbook syntax ==='); 59 | try { 60 | const syntaxResult = await client.callTool('check_syntax', { 61 | playbook: './examples/playbook.yml' 62 | }); 63 | console.log(syntaxResult.content[0].text); 64 | } catch (error) { 65 | console.error('Error checking syntax:', error.message); 66 | } 67 | 68 | // 7. List tasks in the example playbook 69 | console.log('\n=== Listing playbook tasks ==='); 70 | try { 71 | const tasksResult = await client.callTool('list_tasks', { 72 | playbook: './examples/playbook.yml' 73 | }); 74 | console.log(tasksResult.content[0].text); 75 | } catch (error) { 76 | console.error('Error listing tasks:', error.message); 77 | } 78 | 79 | // 8. Run the playbook with check mode (--check flag) to simulate 80 | console.log('\n=== Running playbook in check mode ==='); 81 | try { 82 | const runResult = await client.callTool('run_playbook', { 83 | playbook: './examples/playbook.yml', 84 | inventory: './examples/inventory.ini', 85 | extraVars: { 86 | check_mode: true 87 | }, 88 | tags: 'setup', 89 | limit: 'webservers' 90 | }); 91 | console.log(runResult.content[0].text); 92 | } catch (error) { 93 | console.error('Error running playbook:', error.message); 94 | } 95 | 96 | } catch (error) { 97 | console.error('MCP Client error:', error); 98 | } finally { 99 | // 9. Close the connection 100 | await client.close(); 101 | console.log('\nConnection closed.'); 102 | } 103 | } 104 | 105 | main().catch(console.error); 106 | -------------------------------------------------------------------------------- /localstack/localstack_test.mjs: -------------------------------------------------------------------------------- 1 | // LocalStack Test Script for MCP Ansible Server 2 | // This script demonstrates how to use the MCP Ansible server with LocalStack 3 | 4 | import { execSync } from 'child_process'; 5 | import fs from 'fs'; 6 | import path from 'path'; 7 | import os from 'os'; 8 | import { fileURLToPath } from 'url'; 9 | 10 | // Get current file directory (equivalent to __dirname in CommonJS) 11 | const __filename = fileURLToPath(import.meta.url); 12 | const __dirname = path.dirname(__filename); 13 | 14 | // Helper function to execute shell commands 15 | function runCommand(command) { 16 | console.log(`Executing: ${command}`); 17 | try { 18 | const output = execSync(command, { encoding: 'utf8' }); 19 | console.log(output); 20 | return output; 21 | } catch (error) { 22 | console.error(`Error executing command: ${error.message}`); 23 | console.error(error.stderr); 24 | throw error; 25 | } 26 | } 27 | 28 | // Create a temporary directory for our test files 29 | function createTempDir() { 30 | const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ansible-localstack-')); 31 | console.log(`Created temporary directory: ${tempDir}`); 32 | return tempDir; 33 | } 34 | 35 | // Create a playbook that uses awslocal instead of aws 36 | function createLocalStackPlaybook(tempDir) { 37 | const playbookContent = `--- 38 | # Example Ansible playbook for LocalStack 39 | - name: LocalStack S3 Operations 40 | hosts: localhost 41 | connection: local 42 | gather_facts: false 43 | tasks: 44 | - name: List S3 buckets using awslocal 45 | shell: awslocal s3 ls 46 | register: s3_buckets 47 | 48 | - name: Display S3 buckets 49 | debug: 50 | var: s3_buckets.stdout_lines 51 | 52 | - name: Create a new S3 bucket 53 | shell: awslocal s3 mb s3://ansible-test-bucket 54 | register: create_bucket 55 | ignore_errors: yes 56 | 57 | - name: Display bucket creation result 58 | debug: 59 | var: create_bucket 60 | 61 | - name: Upload a file to the bucket 62 | shell: echo "Hello from Ansible MCP" > /tmp/test.txt && awslocal s3 cp /tmp/test.txt s3://ansible-test-bucket/test.txt 63 | register: upload_file 64 | 65 | - name: Display upload result 66 | debug: 67 | var: upload_file 68 | 69 | - name: List objects in the bucket 70 | shell: awslocal s3 ls s3://ansible-test-bucket 71 | register: list_objects 72 | 73 | - name: Display objects 74 | debug: 75 | var: list_objects.stdout_lines 76 | `; 77 | 78 | const playbookPath = path.join(tempDir, 'localstack_playbook.yml'); 79 | fs.writeFileSync(playbookPath, playbookContent); 80 | console.log(`Created playbook at: ${playbookPath}`); 81 | return playbookPath; 82 | } 83 | 84 | // Main function to run the test 85 | async function runTest() { 86 | try { 87 | // Check if LocalStack is running 88 | console.log("Checking if LocalStack is running..."); 89 | runCommand('awslocal s3 ls'); 90 | console.log("LocalStack is running!"); 91 | 92 | // Create temporary directory and playbook 93 | const tempDir = createTempDir(); 94 | const playbookPath = createLocalStackPlaybook(tempDir); 95 | 96 | // Run the playbook using ansible-playbook directly 97 | console.log("\nRunning playbook with ansible-playbook..."); 98 | runCommand(`ansible-playbook ${playbookPath}`); 99 | 100 | // Clean up 101 | console.log("\nCleaning up..."); 102 | fs.rmSync(tempDir, { recursive: true, force: true }); 103 | console.log(`Removed temporary directory: ${tempDir}`); 104 | 105 | console.log("\nTest completed successfully!"); 106 | } catch (error) { 107 | console.error("Test failed:", error); 108 | } 109 | } 110 | 111 | // Run the test 112 | runTest(); 113 | -------------------------------------------------------------------------------- /src/sysoperator/operations/terraform.README.md: -------------------------------------------------------------------------------- 1 | # Terraform Operations in MCP SysOperator 2 | 3 | This module provides functionality to execute Terraform commands through the MCP SysOperator server. It supports both standard Terraform and tflocal (Terraform with LocalStack) for local cloud development and testing. 4 | 5 | ## Features 6 | 7 | - Execute all common Terraform operations (init, plan, apply, destroy, etc.) 8 | - Support for variable files and inline variables 9 | - Backend configuration 10 | - State file management 11 | - Workspace management 12 | - LocalStack integration via tflocal 13 | 14 | ## Requirements 15 | 16 | - Terraform CLI installed and in PATH 17 | - For LocalStack integration: tflocal installed and LocalStack running 18 | 19 | ## Usage 20 | 21 | The Terraform operations are exposed through the `terraform` tool in the MCP SysOperator server. Here's how to use it: 22 | 23 | ### Basic Operations 24 | 25 | ``` 26 | 27 | ansible 28 | terraform 29 | 30 | { 31 | "action": "init|plan|apply|destroy|validate|output|import|workspace", 32 | "workingDir": "/path/to/terraform/project" 33 | } 34 | 35 | 36 | ``` 37 | 38 | ### Terraform with Variables 39 | 40 | ``` 41 | 42 | ansible 43 | terraform 44 | 45 | { 46 | "action": "apply", 47 | "workingDir": "/path/to/terraform/project", 48 | "vars": { 49 | "instance_type": "t2.micro", 50 | "region": "us-west-2", 51 | "count": 3 52 | }, 53 | "varFiles": [ 54 | "/path/to/terraform.tfvars", 55 | "/path/to/environment.tfvars" 56 | ] 57 | } 58 | 59 | 60 | ``` 61 | 62 | ### Terraform with LocalStack (tflocal) 63 | 64 | ``` 65 | 66 | ansible 67 | terraform 68 | 69 | { 70 | "action": "apply", 71 | "workingDir": "/path/to/terraform/project", 72 | "useLocalstack": true, 73 | "autoApprove": true 74 | } 75 | 76 | 77 | ``` 78 | 79 | ### Advanced Options 80 | 81 | ``` 82 | 83 | ansible 84 | terraform 85 | 86 | { 87 | "action": "plan", 88 | "workingDir": "/path/to/terraform/project", 89 | "state": "custom.tfstate", 90 | "target": ["aws_instance.web", "aws_security_group.allow_http"], 91 | "lockTimeout": "30s", 92 | "refresh": false, 93 | "backendConfig": { 94 | "bucket": "my-terraform-state", 95 | "key": "prod/terraform.tfstate", 96 | "region": "us-west-2" 97 | } 98 | } 99 | 100 | 101 | ``` 102 | 103 | ## Error Handling 104 | 105 | The Terraform operations module provides detailed error messages when Terraform commands fail. These include the standard error output from Terraform, which can be useful for debugging issues. 106 | 107 | ## Implementation Details 108 | 109 | The Terraform operations module is implemented in `terraform.ts` and works by: 110 | 111 | 1. Verifying Terraform or tflocal is installed 112 | 2. Constructing the appropriate command with all specified options 113 | 3. Executing the command in the specified working directory 114 | 4. Parsing and returning the output 115 | 116 | Special handling is provided for certain commands like `output`, which attempts to parse the JSON output to provide structured data. 117 | 118 | ## Development Workflow 119 | 120 | When using Terraform with LocalStack in a development workflow: 121 | 122 | 1. Start LocalStack: `localstack start` 123 | 2. Use the standard Terraform actions but set `useLocalstack: true` 124 | 3. tflocal will automatically redirect AWS API calls to LocalStack 125 | 126 | This allows for rapid development and testing of Terraform configurations without incurring AWS costs or requiring real AWS credentials. 127 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/inventory/localstack.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # LocalStack Inventory Configuration 3 | # This file provides a static inventory for testing with LocalStack 4 | 5 | all: 6 | vars: 7 | # Common variables for all hosts 8 | ansible_connection: local 9 | ansible_python_interpreter: "{{ ansible_playbook_python }}" 10 | skip_aws_dependencies: true 11 | environment: development 12 | aws_region: us-east-1 13 | 14 | # LocalStack connection details 15 | localstack_host: localhost 16 | localstack_port: 4566 17 | localstack_endpoint: "http://{{ localstack_host }}:{{ localstack_port }}" 18 | 19 | # AWS credentials for LocalStack (these are dummy values) 20 | aws_access_key: test 21 | aws_secret_key: test 22 | 23 | # Database connection details for LocalStack 24 | db_host: "{{ localstack_host }}" 25 | db_port: 4510 # LocalStack RDS port 26 | db_name: lamp_db 27 | db_user: dbuser 28 | db_password: dbpassword 29 | 30 | # EFS mount point for LocalStack 31 | efs_mount_point: "/tmp/localstack/efs" 32 | 33 | # Web server configuration 34 | apache_document_root: "/tmp/localstack/www" 35 | 36 | # Application settings 37 | app_environment: development 38 | app_debug: true 39 | app_url: "http://localhost:8080" 40 | 41 | # Disable AWS services that aren't needed for local testing 42 | cloudwatch_enabled: false 43 | waf_enabled: false 44 | route53_enabled: false 45 | cloudfront_enabled: false 46 | 47 | # Testing flags 48 | is_localstack: true 49 | 50 | children: 51 | # Web server group 52 | web_servers: 53 | hosts: 54 | web1.localstack: 55 | ansible_host: localhost 56 | instance_id: i-localstack-web1 57 | private_ip: 127.0.0.1 58 | public_ip: 127.0.0.1 59 | instance_type: t3.micro 60 | aws_zone: us-east-1a 61 | role: web 62 | component: app 63 | 64 | # Database server group 65 | db_servers: 66 | hosts: 67 | db1.localstack: 68 | ansible_host: localhost 69 | instance_id: i-localstack-db1 70 | private_ip: 127.0.0.1 71 | public_ip: 127.0.0.1 72 | instance_type: db.t3.micro 73 | aws_zone: us-east-1a 74 | role: db 75 | component: database 76 | 77 | # Load balancer group 78 | load_balancers: 79 | hosts: 80 | lb1.localstack: 81 | ansible_host: localhost 82 | instance_id: i-localstack-lb1 83 | private_ip: 127.0.0.1 84 | public_ip: 127.0.0.1 85 | instance_type: t3.micro 86 | aws_zone: us-east-1a 87 | role: lb 88 | component: loadbalancer 89 | 90 | # EFS storage group 91 | storage_servers: 92 | hosts: 93 | efs1.localstack: 94 | ansible_host: localhost 95 | instance_id: i-localstack-efs1 96 | private_ip: 127.0.0.1 97 | public_ip: 127.0.0.1 98 | instance_type: t3.micro 99 | aws_zone: us-east-1a 100 | role: storage 101 | component: efs 102 | 103 | # Development group 104 | development: 105 | children: 106 | web_servers: 107 | db_servers: 108 | load_balancers: 109 | storage_servers: 110 | vars: 111 | environment: development 112 | 113 | # LocalStack group 114 | localstack: 115 | children: 116 | web_servers: 117 | db_servers: 118 | load_balancers: 119 | storage_servers: 120 | vars: 121 | is_localstack: true 122 | 123 | # LAMP stack project group 124 | lamp_stack: 125 | children: 126 | web_servers: 127 | db_servers: 128 | load_balancers: 129 | storage_servers: 130 | vars: 131 | project: lamp-stack 132 | -------------------------------------------------------------------------------- /localstack/restore_original.mjs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | // Script to restore the original MCP Ansible server files after patching for LocalStack 4 | // This script restores the original utils.ts and aws.ts files from their backups 5 | 6 | import { execSync } from 'child_process'; 7 | import { fileURLToPath } from 'url'; 8 | import path from 'path'; 9 | import fs from 'fs'; 10 | 11 | // Get current file directory (equivalent to __dirname in CommonJS) 12 | const __filename = fileURLToPath(import.meta.url); 13 | const __dirname = path.dirname(__filename); 14 | const rootDir = path.resolve(__dirname, '..'); 15 | 16 | // Helper function to execute shell commands 17 | function runCommand(command) { 18 | console.log(`Executing: ${command}`); 19 | try { 20 | const output = execSync(command, { encoding: 'utf8' }); 21 | console.log(output); 22 | return output; 23 | } catch (error) { 24 | console.error(`Error executing command: ${error.message}`); 25 | if (error.stderr) console.error(error.stderr); 26 | throw error; 27 | } 28 | } 29 | 30 | // Restore the utils.ts file 31 | function restoreUtilsFile() { 32 | const utilsPath = path.join(rootDir, 'src', 'ansible-mcp-server', 'common', 'utils.ts'); 33 | const backupPath = `${utilsPath}.bak`; 34 | 35 | if (!fs.existsSync(backupPath)) { 36 | console.error(`Backup file not found: ${backupPath}`); 37 | return false; 38 | } 39 | 40 | // Restore the original file 41 | fs.copyFileSync(backupPath, utilsPath); 42 | console.log(`Restored original utils.ts from ${backupPath}`); 43 | 44 | // Remove the backup file 45 | fs.unlinkSync(backupPath); 46 | console.log(`Removed backup file: ${backupPath}`); 47 | 48 | return true; 49 | } 50 | 51 | // Restore the aws.ts file 52 | function restoreAwsFile() { 53 | const awsPath = path.join(rootDir, 'src', 'ansible-mcp-server', 'operations', 'aws.ts'); 54 | const backupPath = `${awsPath}.bak`; 55 | 56 | if (!fs.existsSync(backupPath)) { 57 | console.error(`Backup file not found: ${backupPath}`); 58 | return false; 59 | } 60 | 61 | // Restore the original file 62 | fs.copyFileSync(backupPath, awsPath); 63 | console.log(`Restored original aws.ts from ${backupPath}`); 64 | 65 | // Remove the backup file 66 | fs.unlinkSync(backupPath); 67 | console.log(`Removed backup file: ${backupPath}`); 68 | 69 | return true; 70 | } 71 | 72 | // Remove the config file 73 | function removeConfigFile() { 74 | const configPath = path.join(rootDir, 'src', 'ansible-mcp-server', 'config', 'localstack.js'); 75 | 76 | if (!fs.existsSync(configPath)) { 77 | console.error(`Config file not found: ${configPath}`); 78 | return false; 79 | } 80 | 81 | // Remove the config file 82 | fs.unlinkSync(configPath); 83 | console.log(`Removed config file: ${configPath}`); 84 | 85 | return true; 86 | } 87 | 88 | // Restore the original MCP Ansible server files 89 | async function restoreOriginalFiles() { 90 | try { 91 | console.log("Restoring original MCP Ansible server files..."); 92 | 93 | // Restore the utils.ts file 94 | const utilsRestored = restoreUtilsFile(); 95 | if (!utilsRestored) { 96 | console.error("Failed to restore utils.ts"); 97 | return; 98 | } 99 | 100 | // Restore the aws.ts file 101 | const awsRestored = restoreAwsFile(); 102 | if (!awsRestored) { 103 | console.error("Failed to restore aws.ts"); 104 | return; 105 | } 106 | 107 | // Remove the config file 108 | const configRemoved = removeConfigFile(); 109 | if (!configRemoved) { 110 | console.error("Failed to remove config file"); 111 | return; 112 | } 113 | 114 | console.log("\nBuilding the MCP Ansible server..."); 115 | runCommand('npm run build'); 116 | 117 | console.log("\nMCP Ansible server has been restored to use real AWS!"); 118 | console.log("You can now use the MCP Ansible server with real AWS."); 119 | } catch (error) { 120 | console.error("Failed to restore original files:", error); 121 | } 122 | } 123 | 124 | // Run the restore script 125 | restoreOriginalFiles(); 126 | -------------------------------------------------------------------------------- /AWS_README.md: -------------------------------------------------------------------------------- 1 | # Using SysOperator MCP Server with AWS 2 | 3 | This guide demonstrates how to use the SysOperator MCP server with AWS. The examples provided show various AWS operations that can be performed using Ansible. 4 | 5 | ## Prerequisites 6 | 7 | 1. AWS CLI installed and configured with valid credentials 8 | 2. Ansible installed 9 | 3. Ansible MCP server installed and configured 10 | 4. Required Ansible collections for AWS: 11 | ``` 12 | ansible-galaxy collection install amazon.aws 13 | ansible-galaxy collection install community.aws 14 | ``` 15 | 16 | ## Example Files 17 | 18 | This repository includes several example files for working with AWS: 19 | 20 | 1. **aws_example.yml**: A comprehensive playbook demonstrating various AWS operations including EC2, S3, RDS, VPC, Route53, and Lambda. 21 | 2. **aws_inventory.yml**: A dynamic inventory configuration for AWS EC2 instances. 22 | 3. **cloudformation_example.yml**: A playbook for managing AWS CloudFormation stacks. 23 | 4. **cloudformation_template.json**: A CloudFormation template for creating a simple EC2 instance. 24 | 25 | ## Using the MCP Server with AWS 26 | 27 | The SysOperator MCP server provides several tools for working with AWS: 28 | 29 | ### 1. aws_ec2: Manage AWS EC2 instances 30 | 31 | ``` 32 | 33 | ansible 34 | aws_ec2 35 | 36 | { 37 | "action": "list", 38 | "region": "us-west-2" 39 | } 40 | 41 | 42 | ``` 43 | 44 | ### 2. aws_s3: Manage AWS S3 buckets and objects 45 | 46 | ``` 47 | 48 | ansible 49 | aws_s3 50 | 51 | { 52 | "action": "list_buckets", 53 | "region": "us-west-2" 54 | } 55 | 56 | 57 | ``` 58 | 59 | ### 3. aws_vpc: Manage AWS VPC networks 60 | 61 | ``` 62 | 63 | ansible 64 | aws_vpc 65 | 66 | { 67 | "action": "list", 68 | "region": "us-west-2" 69 | } 70 | 71 | 72 | ``` 73 | 74 | ### 4. aws_cloudformation: Manage AWS CloudFormation stacks 75 | 76 | ``` 77 | 78 | ansible 79 | aws_cloudformation 80 | 81 | { 82 | "action": "list", 83 | "region": "us-west-2" 84 | } 85 | 86 | 87 | ``` 88 | 89 | ### 5. aws_dynamic_inventory: Create AWS dynamic inventory 90 | 91 | ``` 92 | 93 | ansible 94 | aws_dynamic_inventory 95 | 96 | { 97 | "region": "us-west-2", 98 | "keyed_groups": [ 99 | { 100 | "prefix": "tag", 101 | "key": "tags.Name" 102 | }, 103 | { 104 | "prefix": "instance_type", 105 | "key": "instance_type" 106 | } 107 | ], 108 | "hostnames": [ 109 | "tag:Name", 110 | "public_ip_address", 111 | "private_ip_address", 112 | "instance_id" 113 | ] 114 | } 115 | 116 | 117 | ``` 118 | 119 | ## Running the Example Playbooks 120 | 121 | You can run the example playbooks using the SysOperator MCP server: 122 | 123 | ``` 124 | 125 | ansible 126 | run_playbook 127 | 128 | { 129 | "playbook": "/path/to/aws_example.yml", 130 | "tags": "info" 131 | } 132 | 133 | 134 | ``` 135 | 136 | ## Using Dynamic Inventory 137 | 138 | To use the AWS dynamic inventory with the SysOperator MCP server: 139 | 140 | 1. Ensure the `aws_inventory.yml` file is properly configured 141 | 2. Use it with the `run_playbook` tool: 142 | 143 | ``` 144 | 145 | ansible 146 | run_playbook 147 | 148 | { 149 | "playbook": "/path/to/your/playbook.yml", 150 | "inventory": "/path/to/aws_inventory.yml" 151 | } 152 | 153 | 154 | ``` 155 | 156 | ## Notes 157 | 158 | - These examples require valid AWS credentials to run 159 | - Be cautious when running examples that create AWS resources, as they may incur costs 160 | - Always clean up resources after testing to avoid unnecessary charges 161 | -------------------------------------------------------------------------------- /src/sysoperator/operations/playbooks.ts: -------------------------------------------------------------------------------- 1 | import { AnsibleExecutionError } from '../common/errors.js'; 2 | import { RunPlaybookOptions, CheckSyntaxOptions, ListTasksOptions } from '../common/types.js'; 3 | import { execAsync, validatePlaybookPath, validateInventoryPath } from '../common/utils.js'; 4 | 5 | /** 6 | * Runs an Ansible playbook 7 | * @param options Options for running the playbook 8 | * @returns Standard output from ansible-playbook command 9 | * @throws AnsiblePlaybookNotFoundError if the playbook doesn't exist 10 | * @throws AnsibleInventoryNotFoundError if the specified inventory doesn't exist 11 | * @throws AnsibleExecutionError if the playbook execution fails 12 | */ 13 | export async function runPlaybook(options: RunPlaybookOptions): Promise { 14 | const playbookPath = validatePlaybookPath(options.playbook); 15 | const inventoryPath = validateInventoryPath(options.inventory); 16 | 17 | // Build command 18 | let command = `ansible-playbook ${playbookPath}`; 19 | 20 | // Add inventory if specified 21 | if (inventoryPath) { 22 | command += ` -i ${inventoryPath}`; 23 | } 24 | 25 | // Add extra vars if specified 26 | if (options.extraVars && Object.keys(options.extraVars).length > 0) { 27 | const extraVarsJson = JSON.stringify(options.extraVars); 28 | command += ` --extra-vars '${extraVarsJson}'`; 29 | } 30 | 31 | // Add tags if specified 32 | if (options.tags) { 33 | command += ` --tags "${options.tags}"`; 34 | } 35 | 36 | // Add limit if specified 37 | if (options.limit) { 38 | command += ` --limit "${options.limit}"`; 39 | } 40 | 41 | try { 42 | // Execute command 43 | const { stdout, stderr } = await execAsync(command); 44 | return stdout || 'Playbook executed successfully (no output)'; 45 | } catch (error) { 46 | // Handle exec error 47 | const execError = error as { stderr?: string; message: string }; 48 | throw new AnsibleExecutionError( 49 | `Error running playbook: ${execError.message}`, 50 | execError.stderr 51 | ); 52 | } 53 | } 54 | 55 | /** 56 | * Checks the syntax of an Ansible playbook without executing it 57 | * @param options Options containing the playbook path 58 | * @returns Standard output from ansible-playbook --syntax-check command 59 | * @throws AnsiblePlaybookNotFoundError if the playbook doesn't exist 60 | * @throws AnsibleExecutionError if the syntax check fails 61 | */ 62 | export async function checkSyntax(options: CheckSyntaxOptions): Promise { 63 | const playbookPath = validatePlaybookPath(options.playbook); 64 | 65 | // Build command with syntax-check option 66 | const command = `ansible-playbook ${playbookPath} --syntax-check`; 67 | 68 | try { 69 | // Execute command 70 | const { stdout, stderr } = await execAsync(command); 71 | return stdout || 'Syntax check passed (no issues found)'; 72 | } catch (error) { 73 | // Handle exec error - in this case, a syntax error 74 | const execError = error as { stderr?: string; message: string }; 75 | throw new AnsibleExecutionError( 76 | `Syntax error: ${execError.message}`, 77 | execError.stderr 78 | ); 79 | } 80 | } 81 | 82 | /** 83 | * Lists all tasks that would be executed by a playbook 84 | * @param options Options containing the playbook path 85 | * @returns Standard output from ansible-playbook --list-tasks command 86 | * @throws AnsiblePlaybookNotFoundError if the playbook doesn't exist 87 | * @throws AnsibleExecutionError if the listing fails 88 | */ 89 | export async function listTasks(options: ListTasksOptions): Promise { 90 | const playbookPath = validatePlaybookPath(options.playbook); 91 | 92 | // Build command with list-tasks option 93 | const command = `ansible-playbook ${playbookPath} --list-tasks`; 94 | 95 | try { 96 | // Execute command 97 | const { stdout, stderr } = await execAsync(command); 98 | return stdout || 'No tasks found in playbook'; 99 | } catch (error) { 100 | // Handle exec error 101 | const execError = error as { stderr?: string; message: string }; 102 | throw new AnsibleExecutionError( 103 | `Error listing tasks: ${execError.message}`, 104 | execError.stderr 105 | ); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/sysoperator/operations/vault.ts: -------------------------------------------------------------------------------- 1 | import { spawn } from 'child_process'; 2 | import { AnsibleExecutionError } from '../common/errors.js'; 3 | import { VaultEncryptStringOptions, VaultDecryptStringOptions } from '../common/types.js'; 4 | import { execAsync, createTempDirectory, writeTempFile, cleanupTempDirectory } from '../common/utils.js'; 5 | 6 | /** 7 | * Encrypts a string using Ansible Vault 8 | * @param options Options for encryption 9 | * @returns Encrypted string 10 | * @throws AnsibleExecutionError if encryption fails 11 | */ 12 | export async function encryptString(options: VaultEncryptStringOptions): Promise { 13 | return new Promise((resolve, reject) => { 14 | const args = ['encrypt_string']; 15 | 16 | // Add vault ID if specified 17 | if (options.vault_id) { 18 | args.push(`--vault-id=${options.vault_id}`); 19 | } 20 | 21 | // Add vault password file if specified 22 | if (options.vault_password_file) { 23 | args.push(`--vault-password-file=${options.vault_password_file}`); 24 | } 25 | 26 | // Add name if specified 27 | if (options.name) { 28 | args.push(`--name=${options.name}`); 29 | } 30 | 31 | // Add --stdin flag to read from stdin 32 | args.push('--stdin'); 33 | 34 | console.error(`Executing: ansible-vault ${args.join(' ')} (with string piped to stdin)`); 35 | const vaultProcess = spawn('ansible-vault', args, { stdio: ['pipe', 'pipe', 'pipe'] }); 36 | 37 | let stdoutData = ''; 38 | let stderrData = ''; 39 | 40 | vaultProcess.stdout.on('data', (data) => { 41 | stdoutData += data.toString(); 42 | }); 43 | 44 | vaultProcess.stderr.on('data', (data) => { 45 | stderrData += data.toString(); 46 | }); 47 | 48 | vaultProcess.on('close', (code) => { 49 | if (code === 0) { 50 | resolve(stdoutData.trim()); 51 | } else { 52 | const errorMessage = stderrData || `ansible-vault exited with code ${code}`; 53 | reject(new AnsibleExecutionError(`Error encrypting string: ${errorMessage}`, stderrData)); 54 | } 55 | }); 56 | 57 | vaultProcess.on('error', (err) => { 58 | reject(new AnsibleExecutionError(`Failed to start ansible-vault: ${err.message}`)); 59 | }); 60 | 61 | // Write the string to encrypt to stdin 62 | vaultProcess.stdin.write(options.string); 63 | vaultProcess.stdin.end(); 64 | }); 65 | } 66 | 67 | /** 68 | * Decrypts a string using Ansible Vault 69 | * @param options Options for decryption 70 | * @returns Decrypted string 71 | * @throws AnsibleExecutionError if decryption fails 72 | */ 73 | export async function decryptString(options: VaultDecryptStringOptions): Promise { 74 | let tempDir: string | undefined; 75 | try { 76 | // Create a unique temporary directory 77 | tempDir = await createTempDirectory('ansible-vault-decrypt'); 78 | 79 | // Write the encrypted string to a temporary file 80 | const tempFilePath = await writeTempFile(tempDir, 'encrypted.txt', options.string); 81 | 82 | // Build the decrypt command arguments 83 | const args = ['decrypt', tempFilePath, '--output=-']; // Output to stdout 84 | 85 | // Add vault ID if specified 86 | if (options.vault_id) { 87 | args.splice(1, 0, `--vault-id=${options.vault_id}`); // Insert after 'decrypt' 88 | } 89 | 90 | // Add vault password file if specified 91 | if (options.vault_password_file) { 92 | args.splice(1, 0, `--vault-password-file=${options.vault_password_file}`); // Insert after 'decrypt' 93 | } 94 | 95 | const command = `ansible-vault ${args.join(' ')}`; 96 | console.error(`Executing: ${command}`); 97 | 98 | // Execute the command asynchronously 99 | const { stdout, stderr } = await execAsync(command); 100 | return stdout.trim(); 101 | 102 | } catch (error: any) { 103 | // Handle execution errors 104 | const errorMessage = error.stderr || error.message || 'Unknown error'; 105 | throw new AnsibleExecutionError(`Error decrypting string: ${errorMessage}`, error.stderr); 106 | } finally { 107 | // Ensure cleanup happens even if errors occur 108 | if (tempDir) { 109 | await cleanupTempDirectory(tempDir); 110 | } 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /localstack/mcp_localstack_patch.js: -------------------------------------------------------------------------------- 1 | // MCP Ansible Server LocalStack Patch 2 | // This file shows the changes needed to make the MCP Ansible server work with LocalStack 3 | 4 | /** 5 | * To modify the MCP Ansible server to use LocalStack instead of real AWS, 6 | * you need to make the following changes: 7 | * 8 | * 1. Modify src/ansible-mcp-server/common/utils.ts to use awslocal instead of aws 9 | * 2. Rebuild the server with npm run build 10 | * 11 | * Below are the specific changes needed: 12 | */ 13 | 14 | /** 15 | * Original functions in src/ansible-mcp-server/common/utils.ts: 16 | */ 17 | 18 | /* 19 | export async function checkAwsCliInstalled(): Promise { 20 | try { 21 | await execAsync('aws --version'); 22 | return true; 23 | } catch (error) { 24 | return false; 25 | } 26 | } 27 | 28 | export async function checkAwsCredentials(): Promise { 29 | try { 30 | await execAsync('aws sts get-caller-identity'); 31 | return true; 32 | } catch (error) { 33 | return false; 34 | } 35 | } 36 | */ 37 | 38 | /** 39 | * Modified functions to use awslocal instead of aws: 40 | */ 41 | 42 | /* 43 | export async function checkAwsCliInstalled(): Promise { 44 | try { 45 | await execAsync('awslocal --version'); 46 | return true; 47 | } catch (error) { 48 | return false; 49 | } 50 | } 51 | 52 | export async function checkAwsCredentials(): Promise { 53 | try { 54 | await execAsync('awslocal sts get-caller-identity'); 55 | return true; 56 | } catch (error) { 57 | return false; 58 | } 59 | } 60 | */ 61 | 62 | /** 63 | * To apply these changes, you would: 64 | * 65 | * 1. Edit src/ansible-mcp-server/common/utils.ts 66 | * 2. Replace the aws commands with awslocal commands 67 | * 3. Run npm run build to rebuild the server 68 | * 69 | * Alternatively, you can create a new version of the server specifically for LocalStack: 70 | */ 71 | 72 | // Example of how to create a modified version of verifyAwsCredentials 73 | function verifyAwsCredentialsLocalStack() { 74 | // First verify LocalStack CLI is installed 75 | const isInstalled = checkAwsLocalCliInstalled(); 76 | if (!isInstalled) { 77 | throw new Error('LocalStack CLI (awslocal) is not installed'); 78 | } 79 | 80 | // Then check LocalStack is running 81 | const isRunning = checkLocalStackRunning(); 82 | if (!isRunning) { 83 | throw new Error('LocalStack is not running'); 84 | } 85 | } 86 | 87 | // Check if awslocal CLI is installed 88 | function checkAwsLocalCliInstalled() { 89 | try { 90 | require('child_process').execSync('awslocal --version'); 91 | return true; 92 | } catch (error) { 93 | return false; 94 | } 95 | } 96 | 97 | // Check if LocalStack is running 98 | function checkLocalStackRunning() { 99 | try { 100 | require('child_process').execSync('awslocal s3 ls'); 101 | return true; 102 | } catch (error) { 103 | return false; 104 | } 105 | } 106 | 107 | /** 108 | * You would also need to modify the AWS operations in src/ansible-mcp-server/operations/aws.ts 109 | * to use awslocal instead of aws. This would involve: 110 | * 111 | * 1. Modifying the playbook content to use shell commands with awslocal 112 | * 2. Or creating custom modules that use awslocal 113 | * 114 | * For example, instead of using the amazon.aws.s3_bucket module, you might use: 115 | */ 116 | 117 | /* 118 | playbookContent += ` 119 | - name: List S3 buckets 120 | shell: awslocal s3 ls 121 | register: s3_buckets 122 | 123 | - name: Display buckets 124 | debug: 125 | var: s3_buckets.stdout_lines`; 126 | */ 127 | 128 | /** 129 | * Complete Implementation Strategy: 130 | * 131 | * 1. Create a fork of the MCP Ansible server repository 132 | * 2. Modify the utils.ts file to use awslocal instead of aws 133 | * 3. Modify the aws.ts file to use shell commands with awslocal instead of AWS modules 134 | * 4. Add a flag or environment variable to toggle between real AWS and LocalStack 135 | * 5. Rebuild the server and test with LocalStack 136 | * 137 | * This approach allows you to use the MCP Ansible server with LocalStack for testing 138 | * without affecting the ability to use it with real AWS when needed. 139 | */ 140 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Common role tasks 3 | # These tasks are applied to all servers 4 | 5 | - name: Update all packages 6 | package: 7 | name: "*" 8 | state: latest 9 | when: environment != 'localstack' 10 | 11 | - name: Install common packages 12 | package: 13 | name: 14 | - vim 15 | - htop 16 | - git 17 | - curl 18 | - wget 19 | - unzip 20 | - python3 21 | - python3-pip 22 | - jq 23 | - net-tools 24 | - ntp 25 | - logrotate 26 | state: present 27 | when: environment != 'localstack' 28 | 29 | - name: Set timezone to UTC 30 | timezone: 31 | name: UTC 32 | when: environment != 'localstack' 33 | 34 | - name: Configure NTP 35 | service: 36 | name: ntpd 37 | state: started 38 | enabled: yes 39 | when: environment != 'localstack' 40 | 41 | - name: Create admin user 42 | user: 43 | name: admin 44 | groups: wheel 45 | shell: /bin/bash 46 | create_home: yes 47 | when: environment != 'localstack' 48 | 49 | - name: Set up sudo for admin user 50 | lineinfile: 51 | path: /etc/sudoers.d/admin 52 | line: "admin ALL=(ALL) NOPASSWD: ALL" 53 | state: present 54 | mode: 0440 55 | create: yes 56 | validate: 'visudo -cf %s' 57 | when: environment != 'localstack' 58 | 59 | - name: Set up SSH authorized keys for admin user 60 | authorized_key: 61 | user: admin 62 | key: "{{ lookup('env', 'SSH_PUBLIC_KEY') | default('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 example@example.com') }}" 63 | state: present 64 | when: environment != 'localstack' 65 | 66 | - name: Configure SSH server 67 | lineinfile: 68 | path: /etc/ssh/sshd_config 69 | regexp: "{{ item.regexp }}" 70 | line: "{{ item.line }}" 71 | state: present 72 | loop: 73 | - { regexp: '^PermitRootLogin', line: 'PermitRootLogin no' } 74 | - { regexp: '^PasswordAuthentication', line: 'PasswordAuthentication no' } 75 | - { regexp: '^X11Forwarding', line: 'X11Forwarding no' } 76 | - { regexp: '^MaxAuthTries', line: 'MaxAuthTries 3' } 77 | notify: Restart SSH 78 | when: environment != 'localstack' 79 | 80 | - name: Set up basic firewall rules 81 | firewalld: 82 | service: "{{ item }}" 83 | permanent: yes 84 | state: enabled 85 | loop: 86 | - ssh 87 | - http 88 | - https 89 | notify: Restart firewalld 90 | when: environment != 'localstack' 91 | 92 | - name: Configure logrotate 93 | copy: 94 | content: | 95 | /var/log/*.log { 96 | weekly 97 | rotate 4 98 | compress 99 | delaycompress 100 | missingok 101 | notifempty 102 | create 0640 root root 103 | } 104 | dest: /etc/logrotate.d/custom 105 | owner: root 106 | group: root 107 | mode: 0644 108 | when: environment != 'localstack' 109 | 110 | - name: Set up basic monitoring 111 | copy: 112 | content: | 113 | #!/bin/bash 114 | # Basic system monitoring script 115 | 116 | DATE=$(date '+%Y-%m-%d %H:%M:%S') 117 | HOSTNAME=$(hostname) 118 | UPTIME=$(uptime) 119 | LOAD=$(uptime | awk '{print $(NF-2)" "$(NF-1)" "$(NF)}') 120 | MEMORY=$(free -h) 121 | DISK=$(df -h) 122 | 123 | echo "===== System Report: $DATE =====" 124 | echo "Hostname: $HOSTNAME" 125 | echo "Uptime: $UPTIME" 126 | echo "Load: $LOAD" 127 | echo "Memory:" 128 | echo "$MEMORY" 129 | echo "Disk:" 130 | echo "$DISK" 131 | echo "=================================" 132 | dest: /usr/local/bin/system-monitor.sh 133 | owner: root 134 | group: root 135 | mode: 0755 136 | when: environment != 'localstack' 137 | 138 | - name: Set up cron job for monitoring 139 | cron: 140 | name: "System monitoring" 141 | minute: "0" 142 | hour: "*/6" 143 | job: "/usr/local/bin/system-monitor.sh > /var/log/system-monitor.log 2>&1" 144 | when: environment != 'localstack' 145 | 146 | - name: Create a dummy file for LocalStack testing 147 | file: 148 | path: /tmp/common-role-applied 149 | state: touch 150 | mode: 0644 151 | when: environment == 'localstack' 152 | -------------------------------------------------------------------------------- /docker-README.md: -------------------------------------------------------------------------------- 1 | # SysOperator MCP Server Docker Image 2 | 3 | This repository contains a Dockerfile for running the SysOperator MCP (Model Context Protocol) server in a Docker container. The SysOperator MCP server provides tools for infrastructure as code operations, including Ansible, AWS, and Terraform. 4 | 5 | ## Prerequisites 6 | 7 | - Docker installed on your system 8 | - Git (to clone the repository) 9 | 10 | ## Building and Running with the Helper Script 11 | 12 | A helper script `docker-build-run.sh` is provided to simplify building and running the Docker container: 13 | 14 | ```bash 15 | # Build the Docker image 16 | ./docker-build-run.sh --build 17 | 18 | # Run the Docker container 19 | ./docker-build-run.sh --run 20 | 21 | # Build and run in one command 22 | ./docker-build-run.sh --build --run 23 | 24 | # Run with mounted playbooks directory and AWS credentials 25 | ./docker-build-run.sh --run --playbooks ./playbooks --aws 26 | 27 | # Run with mounted Terraform directory 28 | ./docker-build-run.sh --run --terraform ./terraform 29 | 30 | # Show help 31 | ./docker-build-run.sh --help 32 | ``` 33 | 34 | ## Manual Building and Running 35 | 36 | ### Building the Docker Image 37 | 38 | To manually build the Docker image, run the following command from the root directory of the repository: 39 | 40 | ```bash 41 | docker build -t sysoperator-mcp . 42 | ``` 43 | 44 | ### Running the Docker Container 45 | 46 | The SysOperator MCP server is designed to communicate via stdin/stdout, so you need to run the container in interactive mode: 47 | 48 | ```bash 49 | docker run -i sysoperator-mcp 50 | ``` 51 | 52 | ### Mounting Volumes 53 | 54 | For Ansible playbooks, inventory files, and other resources, you might want to mount volumes: 55 | 56 | ```bash 57 | docker run -i -v /path/to/playbooks:/playbooks sysoperator-mcp 58 | ``` 59 | 60 | ### AWS Credentials 61 | 62 | For AWS operations, you need to provide AWS credentials. You can do this in several ways: 63 | 64 | 1. Using environment variables: 65 | 66 | ```bash 67 | docker run -i \ 68 | -e AWS_ACCESS_KEY_ID=your_key \ 69 | -e AWS_SECRET_ACCESS_KEY=your_secret \ 70 | sysoperator-mcp 71 | ``` 72 | 73 | 2. Mounting your AWS credentials file: 74 | 75 | ```bash 76 | docker run -i -v ~/.aws:/root/.aws sysoperator-mcp 77 | ``` 78 | 79 | ### Terraform State 80 | 81 | For Terraform operations, you might want to persist the Terraform state: 82 | 83 | ```bash 84 | docker run -i -v /path/to/terraform/state:/terraform sysoperator-mcp 85 | ``` 86 | 87 | ## Using Docker Compose 88 | 89 | A `docker-compose.yml` file is provided for users who prefer Docker Compose: 90 | 91 | ```bash 92 | # Build the image 93 | docker-compose build 94 | 95 | # Run the container 96 | docker-compose up 97 | 98 | # Run in detached mode 99 | docker-compose up -d 100 | 101 | # Stop the container 102 | docker-compose down 103 | ``` 104 | 105 | Edit the `docker-compose.yml` file to customize volume mounts and environment variables according to your needs. 106 | 107 | ## Features 108 | 109 | The Docker image includes: 110 | 111 | - Node.js 18 112 | - Ansible 113 | - AWS CLI 114 | - Terraform 115 | 116 | ## Customizing the Docker Image 117 | 118 | If you need to customize the Docker image, you can modify the Dockerfile and rebuild the image. For example, you might want to: 119 | 120 | - Install additional system dependencies 121 | - Configure Ansible, AWS CLI, or Terraform 122 | - Add custom scripts or configuration files 123 | 124 | ## Testing the MCP Server 125 | 126 | A test script `docker-test.js` is provided to demonstrate how to interact with the SysOperator MCP server running in a Docker container: 127 | 128 | ```bash 129 | # Method 1: Pipe the script to the Docker container 130 | docker run -i sysoperator-mcp < docker-test.js 131 | 132 | # Method 2: Use the helper script 133 | ./docker-build-run.sh --run | node docker-test.js 134 | ``` 135 | 136 | The test script sends a request to list available tools and then executes a simple ad-hoc command. You can modify this script to test other MCP server functionality. 137 | 138 | ## Troubleshooting 139 | 140 | If you encounter issues with the Docker container, you can: 141 | 142 | 1. Check the container logs: 143 | 144 | ```bash 145 | docker logs 146 | ``` 147 | 148 | 2. Run the container with a shell instead of the MCP server: 149 | 150 | ```bash 151 | docker run -it --entrypoint /bin/bash sysoperator-mcp 152 | ``` 153 | 154 | This will give you a shell inside the container where you can troubleshoot issues. 155 | 156 | ## License 157 | 158 | This project is licensed under the MIT License - see the LICENSE file for details. 159 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/LOCALSTACK_COMPATIBILITY.md: -------------------------------------------------------------------------------- 1 | # LocalStack Compatibility 2 | 3 | This document outlines the compatibility of the AWS LAMP Stack infrastructure with LocalStack for local testing purposes. 4 | 5 | ## Overview 6 | 7 | [LocalStack](https://localstack.cloud/) is a cloud service emulator that runs in a single container on your laptop or in your CI environment. It provides an easy-to-use test/mocking framework for developing cloud applications. 8 | 9 | This project uses LocalStack to test the AWS infrastructure locally before deploying to the actual AWS cloud, saving time and costs during development. 10 | 11 | ## Supported AWS Services 12 | 13 | The following AWS services used in this project are supported by LocalStack: 14 | 15 | | AWS Service | LocalStack Support | Notes | 16 | |-------------|-------------------|-------| 17 | | EC2 | ✅ Partial | Basic instance operations supported | 18 | | VPC | ✅ Partial | Basic networking supported | 19 | | RDS | ✅ Partial | Basic database operations supported | 20 | | EFS | ✅ Partial | Basic file system operations supported | 21 | | ALB/ELB | ✅ Partial | Basic load balancer operations supported | 22 | | Route53 | ✅ Partial | Basic DNS operations supported | 23 | | IAM | ✅ Yes | Most IAM operations supported | 24 | | CloudWatch | ✅ Partial | Basic monitoring supported | 25 | | CloudTrail | ✅ Partial | Basic logging supported | 26 | | WAF | ✅ Partial | Basic web application firewall supported | 27 | | Auto Scaling | ✅ Partial | Basic auto scaling supported | 28 | 29 | ## Limitations 30 | 31 | When testing with LocalStack, be aware of the following limitations: 32 | 33 | 1. **EC2 Instances**: LocalStack doesn't actually run EC2 instances. It simulates the API but doesn't provide actual compute resources. 34 | 35 | 2. **RDS Databases**: LocalStack simulates RDS APIs but uses a local database engine instead of the actual AWS RDS service. 36 | 37 | 3. **EFS**: File system operations are simulated but don't provide actual distributed file system capabilities. 38 | 39 | 4. **Networking**: While VPC, subnets, and security groups can be created, actual network isolation isn't enforced. 40 | 41 | 5. **Load Balancing**: ALB/ELB APIs are simulated but don't actually distribute traffic. 42 | 43 | 6. **Auto Scaling**: The API is simulated but doesn't actually scale resources. 44 | 45 | 7. **WAF**: Rules can be created but aren't actually enforced. 46 | 47 | ## Testing Strategy 48 | 49 | Given these limitations, our testing strategy with LocalStack focuses on: 50 | 51 | 1. **Infrastructure Provisioning**: Verifying that Terraform can create all required resources without errors. 52 | 53 | 2. **Configuration Management**: Testing that Ansible playbooks run correctly against local resources. 54 | 55 | 3. **API Interactions**: Ensuring that our scripts correctly interact with AWS APIs. 56 | 57 | 4. **Resource Dependencies**: Verifying that resource dependencies are correctly defined. 58 | 59 | ## LocalStack Configuration 60 | 61 | The `test_with_localstack.sh` script configures LocalStack with the necessary services and endpoints. It sets up: 62 | 63 | 1. A local provider configuration for Terraform that points to LocalStack endpoints. 64 | 2. Environment variables for AWS credentials and region. 65 | 3. A local inventory for Ansible that simulates the AWS resources. 66 | 67 | ## Running Tests 68 | 69 | To run tests with LocalStack: 70 | 71 | 1. Start LocalStack: 72 | ```bash 73 | localstack start 74 | ``` 75 | 76 | 2. Run the test script: 77 | ```bash 78 | ./test_with_localstack.sh 79 | ``` 80 | 81 | The script will: 82 | - Initialize Terraform with LocalStack provider configuration 83 | - Apply the Terraform configuration to create simulated resources 84 | - Run Ansible playbooks against the local environment 85 | - Perform basic tests to verify the setup 86 | - Clean up resources (optional) 87 | 88 | ## Pro Version Features 89 | 90 | Some features used in this project may require LocalStack Pro, including: 91 | 92 | - Advanced VPC networking 93 | - Complex IAM policies 94 | - WAF rule enforcement 95 | - CloudTrail advanced logging 96 | 97 | If you encounter limitations with the community version, consider upgrading to LocalStack Pro for more comprehensive testing. 98 | 99 | ## Troubleshooting 100 | 101 | If you encounter issues with LocalStack testing: 102 | 103 | 1. Check LocalStack logs: 104 | ```bash 105 | localstack logs 106 | ``` 107 | 108 | 2. Verify LocalStack is running and healthy: 109 | ```bash 110 | curl http://localhost:4566/_localstack/health 111 | ``` 112 | 113 | 3. Ensure all required services are enabled in LocalStack. 114 | 115 | 4. Check that AWS credentials are set to the test values used by LocalStack. 116 | -------------------------------------------------------------------------------- /aws_example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Example Ansible playbook for AWS operations 3 | # Note: This playbook requires valid AWS credentials to run 4 | 5 | - name: AWS Operations Example 6 | hosts: localhost 7 | connection: local 8 | gather_facts: false 9 | vars: 10 | aws_region: us-west-2 11 | instance_type: t2.micro 12 | ami_id: ami-0c55b159cbfafe1f0 # Amazon Linux 2 AMI (adjust for your region) 13 | vpc_id: vpc-example 14 | key_name: my-key-pair 15 | security_group: my-security-group 16 | instance_name: example-instance 17 | bucket_name: example-bucket-{{ ansible_date_time.epoch }} 18 | 19 | tasks: 20 | - name: Get information about AWS regions 21 | amazon.aws.aws_region_info: 22 | register: aws_regions 23 | tags: 24 | - info 25 | 26 | - name: Display AWS regions 27 | debug: 28 | var: aws_regions 29 | tags: 30 | - info 31 | 32 | - name: Get information about EC2 instances 33 | amazon.aws.ec2_instance_info: 34 | region: "{{ aws_region }}" 35 | register: ec2_instances 36 | tags: 37 | - ec2 38 | - info 39 | 40 | - name: Display EC2 instances 41 | debug: 42 | var: ec2_instances 43 | tags: 44 | - ec2 45 | - info 46 | 47 | - name: Create a security group 48 | amazon.aws.ec2_security_group: 49 | name: "{{ security_group }}" 50 | description: Security group for example instances 51 | vpc_id: "{{ vpc_id }}" 52 | region: "{{ aws_region }}" 53 | rules: 54 | - proto: tcp 55 | ports: 22 56 | cidr_ip: 0.0.0.0/0 57 | rule_desc: Allow SSH 58 | - proto: tcp 59 | ports: 80 60 | cidr_ip: 0.0.0.0/0 61 | rule_desc: Allow HTTP 62 | register: security_group_result 63 | tags: 64 | - ec2 65 | - create 66 | 67 | - name: Launch EC2 instance 68 | amazon.aws.ec2_instance: 69 | name: "{{ instance_name }}" 70 | key_name: "{{ key_name }}" 71 | security_group: "{{ security_group }}" 72 | instance_type: "{{ instance_type }}" 73 | image_id: "{{ ami_id }}" 74 | region: "{{ aws_region }}" 75 | wait: yes 76 | network: 77 | assign_public_ip: yes 78 | tags: 79 | Name: "{{ instance_name }}" 80 | Environment: Development 81 | register: ec2_result 82 | tags: 83 | - ec2 84 | - create 85 | 86 | - name: Create S3 bucket 87 | amazon.aws.s3_bucket: 88 | name: "{{ bucket_name }}" 89 | region: "{{ aws_region }}" 90 | tags: 91 | Name: "{{ bucket_name }}" 92 | Environment: Development 93 | register: s3_result 94 | tags: 95 | - s3 96 | - create 97 | 98 | - name: Upload file to S3 bucket 99 | amazon.aws.s3_object: 100 | bucket: "{{ bucket_name }}" 101 | object: example.txt 102 | content: "This is an example file uploaded by Ansible" 103 | mode: put 104 | region: "{{ aws_region }}" 105 | when: s3_result.changed 106 | tags: 107 | - s3 108 | - create 109 | 110 | - name: Get RDS instances 111 | community.aws.rds_instance_info: 112 | region: "{{ aws_region }}" 113 | register: rds_instances 114 | tags: 115 | - rds 116 | - info 117 | 118 | - name: Display RDS instances 119 | debug: 120 | var: rds_instances 121 | tags: 122 | - rds 123 | - info 124 | 125 | - name: Get VPC information 126 | amazon.aws.ec2_vpc_net_info: 127 | region: "{{ aws_region }}" 128 | register: vpc_info 129 | tags: 130 | - vpc 131 | - info 132 | 133 | - name: Display VPC information 134 | debug: 135 | var: vpc_info 136 | tags: 137 | - vpc 138 | - info 139 | 140 | - name: Get Route53 zones 141 | community.aws.route53_info: 142 | region: "{{ aws_region }}" 143 | register: route53_zones 144 | tags: 145 | - route53 146 | - info 147 | 148 | - name: Display Route53 zones 149 | debug: 150 | var: route53_zones 151 | tags: 152 | - route53 153 | - info 154 | 155 | - name: Get Lambda functions 156 | community.aws.lambda_info: 157 | region: "{{ aws_region }}" 158 | register: lambda_functions 159 | tags: 160 | - lambda 161 | - info 162 | 163 | - name: Display Lambda functions 164 | debug: 165 | var: lambda_functions 166 | tags: 167 | - lambda 168 | - info 169 | -------------------------------------------------------------------------------- /demos/aws-terraform-lamp/ansible/roles/db_client/templates/cloudwatch-db.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "agent": { 3 | "metrics_collection_interval": 60, 4 | "run_as_user": "root" 5 | }, 6 | "logs": { 7 | "logs_collected": { 8 | "files": { 9 | "collect_list": [ 10 | { 11 | "file_path": "/var/log/db-backup.log", 12 | "log_group_name": "{{ cloudwatch_log_group | default('lamp-logs') }}/db-backup", 13 | "log_stream_name": "{instance_id}-db-backup", 14 | "retention_in_days": {{ cloudwatch_log_retention_days | default(30) }}, 15 | "timezone": "LOCAL" 16 | }, 17 | { 18 | "file_path": "/var/log/db-monitor.log", 19 | "log_group_name": "{{ cloudwatch_log_group | default('lamp-logs') }}/db-monitor", 20 | "log_stream_name": "{instance_id}-db-monitor", 21 | "retention_in_days": {{ cloudwatch_log_retention_days | default(30) }}, 22 | "timezone": "LOCAL" 23 | }, 24 | { 25 | "file_path": "/var/log/php/db-errors.log", 26 | "log_group_name": "{{ cloudwatch_log_group | default('lamp-logs') }}/php-db-errors", 27 | "log_stream_name": "{instance_id}-php-db-errors", 28 | "retention_in_days": {{ cloudwatch_log_retention_days | default(30) }}, 29 | "timezone": "LOCAL" 30 | }, 31 | { 32 | "file_path": "/var/log/mysql/mysql-slow.log", 33 | "log_group_name": "{{ cloudwatch_log_group | default('lamp-logs') }}/mysql-slow", 34 | "log_stream_name": "{instance_id}-mysql-slow", 35 | "retention_in_days": {{ cloudwatch_log_retention_days | default(30) }}, 36 | "timezone": "LOCAL" 37 | } 38 | ] 39 | } 40 | }, 41 | "force_flush_interval": 15 42 | }, 43 | "metrics": { 44 | "namespace": "LAMP/Database", 45 | "metrics_collected": { 46 | "cpu": { 47 | "resources": [ 48 | "*" 49 | ], 50 | "measurement": [ 51 | "cpu_usage_idle", 52 | "cpu_usage_iowait", 53 | "cpu_usage_user", 54 | "cpu_usage_system" 55 | ], 56 | "totalcpu": true, 57 | "metrics_collection_interval": 60 58 | }, 59 | "disk": { 60 | "resources": [ 61 | "/", 62 | "{{ db_backup_dir | default('/var/backups/mysql') }}" 63 | ], 64 | "measurement": [ 65 | "used_percent", 66 | "inodes_used_percent", 67 | "disk_used_percent", 68 | "disk_free", 69 | "disk_total" 70 | ], 71 | "metrics_collection_interval": 60, 72 | "ignore_file_system_types": [ 73 | "sysfs", "devtmpfs", "tmpfs", "proc", "overlay", "aufs", "squashfs" 74 | ] 75 | }, 76 | "diskio": { 77 | "resources": [ 78 | "*" 79 | ], 80 | "measurement": [ 81 | "io_time", 82 | "write_bytes", 83 | "read_bytes", 84 | "writes", 85 | "reads" 86 | ], 87 | "metrics_collection_interval": 60 88 | }, 89 | "mem": { 90 | "measurement": [ 91 | "mem_used_percent", 92 | "mem_available_percent", 93 | "mem_available", 94 | "mem_total" 95 | ], 96 | "metrics_collection_interval": 60 97 | }, 98 | "net": { 99 | "resources": [ 100 | "eth0" 101 | ], 102 | "measurement": [ 103 | "bytes_sent", 104 | "bytes_recv", 105 | "packets_sent", 106 | "packets_recv" 107 | ], 108 | "metrics_collection_interval": 60 109 | }, 110 | "netstat": { 111 | "measurement": [ 112 | "tcp_established", 113 | "tcp_time_wait", 114 | "tcp_close_wait" 115 | ], 116 | "metrics_collection_interval": 60 117 | }, 118 | "processes": { 119 | "measurement": [ 120 | "running", 121 | "sleeping", 122 | "dead" 123 | ], 124 | "metrics_collection_interval": 60 125 | }, 126 | "swap": { 127 | "measurement": [ 128 | "swap_used_percent", 129 | "swap_free", 130 | "swap_used" 131 | ], 132 | "metrics_collection_interval": 60 133 | } 134 | }, 135 | "append_dimensions": { 136 | "InstanceId": "${aws:InstanceId}", 137 | "InstanceType": "${aws:InstanceType}", 138 | "AutoScalingGroupName": "${aws:AutoScalingGroupName}" 139 | }, 140 | "aggregation_dimensions": [ 141 | ["InstanceId"], 142 | ["AutoScalingGroupName"], 143 | ["InstanceType"], 144 | [] 145 | ] 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /localstack/test_mcp_integration.mjs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | // Test script for MCP Ansible server with LocalStack 4 | // This script demonstrates how to use the MCP Ansible server with LocalStack 5 | 6 | import { execSync } from 'child_process'; 7 | import { fileURLToPath } from 'url'; 8 | import path from 'path'; 9 | import fs from 'fs'; 10 | 11 | // Get current file directory (equivalent to __dirname in CommonJS) 12 | const __filename = fileURLToPath(import.meta.url); 13 | const __dirname = path.dirname(__filename); 14 | 15 | // Helper function to execute shell commands 16 | function runCommand(command) { 17 | console.log(`Executing: ${command}`); 18 | try { 19 | const output = execSync(command, { encoding: 'utf8' }); 20 | console.log(output); 21 | return output; 22 | } catch (error) { 23 | console.error(`Error executing command: ${error.message}`); 24 | if (error.stderr) console.error(error.stderr); 25 | throw error; 26 | } 27 | } 28 | 29 | // Check if LocalStack is running 30 | function checkLocalStackRunning() { 31 | try { 32 | runCommand('awslocal s3 ls'); 33 | console.log("LocalStack is running!"); 34 | return true; 35 | } catch (error) { 36 | console.error("LocalStack is not running. Please start LocalStack with 'localstack start'."); 37 | return false; 38 | } 39 | } 40 | 41 | // Test MCP integration 42 | async function testMcpIntegration() { 43 | try { 44 | // Check if LocalStack is running 45 | console.log("Checking if LocalStack is running..."); 46 | if (!checkLocalStackRunning()) { 47 | return; 48 | } 49 | 50 | // Get paths to playbook and inventory 51 | const playbookPath = path.join(__dirname, 'sample_playbook.yml'); 52 | const inventoryPath = path.join(__dirname, 'inventory.ini'); 53 | 54 | // Verify files exist 55 | if (!fs.existsSync(playbookPath)) { 56 | console.error(`Playbook not found: ${playbookPath}`); 57 | return; 58 | } 59 | 60 | if (!fs.existsSync(inventoryPath)) { 61 | console.error(`Inventory not found: ${inventoryPath}`); 62 | return; 63 | } 64 | 65 | // Display MCP tool usage example 66 | console.log("\nTo use the MCP Ansible server with LocalStack, you would use:"); 67 | console.log(` 68 | 69 | ansible 70 | run_playbook 71 | 72 | { 73 | "playbook": "${playbookPath}", 74 | "inventory": "${inventoryPath}" 75 | } 76 | 77 | 78 | `); 79 | 80 | // Display AWS S3 example 81 | console.log("\nTo list S3 buckets with the MCP Ansible server and LocalStack, you would use:"); 82 | console.log(` 83 | 84 | ansible 85 | aws_s3 86 | 87 | { 88 | "action": "list_buckets", 89 | "region": "us-east-1" 90 | } 91 | 92 | 93 | `); 94 | 95 | // Display AWS EC2 example 96 | console.log("\nTo list EC2 instances with the MCP Ansible server and LocalStack, you would use:"); 97 | console.log(` 98 | 99 | ansible 100 | aws_ec2 101 | 102 | { 103 | "action": "list", 104 | "region": "us-east-1" 105 | } 106 | 107 | 108 | `); 109 | 110 | // Display AWS CloudFormation example 111 | console.log("\nTo create a CloudFormation stack with the MCP Ansible server and LocalStack, you would use:"); 112 | console.log(` 113 | 114 | ansible 115 | aws_cloudformation 116 | 117 | { 118 | "action": "create", 119 | "region": "us-east-1", 120 | "stackName": "test-stack", 121 | "templateBody": "{\\"Resources\\":{\\"MyBucket\\":{\\"Type\\":\\"AWS::S3::Bucket\\",\\"Properties\\":{\\"BucketName\\":\\"cf-created-bucket\\"}}}}" 122 | } 123 | 124 | 125 | `); 126 | 127 | console.log("\nTo use these examples with the MCP Ansible server, you need to:"); 128 | console.log("1. Modify src/ansible-mcp-server/common/utils.ts to use awslocal instead of aws"); 129 | console.log("2. Rebuild the server with npm run build"); 130 | console.log("3. Configure the MCP server in your MCP settings file"); 131 | console.log("4. Use the MCP tools as shown in the examples above"); 132 | 133 | console.log("\nFor now, we'll run the sample playbook directly with ansible-playbook:"); 134 | runCommand(`ansible-playbook ${playbookPath} -i ${inventoryPath}`); 135 | 136 | console.log("\nTest completed successfully!"); 137 | } catch (error) { 138 | console.error("Test failed:", error); 139 | } 140 | } 141 | 142 | // Run the test 143 | testMcpIntegration(); 144 | -------------------------------------------------------------------------------- /examples/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Example Ansible playbook demonstrating various features 3 | # for testing with the Ansible MCP Server 4 | 5 | - name: Web Server Setup 6 | hosts: webservers 7 | become: true 8 | vars: 9 | app_name: "example-app" 10 | app_version: "1.0.0" 11 | deploy_dir: "/var/www/app" 12 | packages: 13 | - nginx 14 | - python3 15 | - git 16 | 17 | tasks: 18 | - name: Update package cache 19 | apt: 20 | update_cache: yes 21 | cache_valid_time: 3600 22 | tags: 23 | - setup 24 | - packages 25 | 26 | - name: Install required packages 27 | apt: 28 | name: "{{ packages }}" 29 | state: present 30 | tags: 31 | - setup 32 | - packages 33 | 34 | - name: Create deploy directory 35 | file: 36 | path: "{{ deploy_dir }}" 37 | state: directory 38 | owner: www-data 39 | group: www-data 40 | mode: '0755' 41 | tags: 42 | - setup 43 | - deploy 44 | 45 | - name: Configure Nginx 46 | template: 47 | src: templates/nginx.conf.j2 48 | dest: /etc/nginx/sites-available/{{ app_name }} 49 | notify: Restart Nginx 50 | tags: 51 | - config 52 | - nginx 53 | 54 | - name: Enable Nginx site 55 | file: 56 | src: /etc/nginx/sites-available/{{ app_name }} 57 | dest: /etc/nginx/sites-enabled/{{ app_name }} 58 | state: link 59 | notify: Restart Nginx 60 | tags: 61 | - config 62 | - nginx 63 | 64 | - name: Deploy application 65 | git: 66 | repo: https://github.com/example/repo.git 67 | dest: "{{ deploy_dir }}" 68 | version: "{{ app_version }}" 69 | become_user: www-data 70 | tags: 71 | - deploy 72 | 73 | - name: Check application status 74 | uri: 75 | url: http://localhost 76 | return_content: yes 77 | register: app_status 78 | ignore_errors: yes 79 | tags: 80 | - verify 81 | 82 | - name: Display application status 83 | debug: 84 | var: app_status 85 | tags: 86 | - verify 87 | 88 | handlers: 89 | - name: Restart Nginx 90 | service: 91 | name: nginx 92 | state: restarted 93 | 94 | - name: Database Server Setup 95 | hosts: dbservers 96 | become: true 97 | vars: 98 | db_name: appdb 99 | db_user: appuser 100 | db_password: "{{ vault_db_password }}" # Would be stored in a vault file 101 | 102 | tasks: 103 | - name: Install PostgreSQL 104 | apt: 105 | name: 106 | - postgresql 107 | - postgresql-contrib 108 | - python3-psycopg2 109 | state: present 110 | tags: 111 | - database 112 | - setup 113 | 114 | - name: Ensure PostgreSQL is running 115 | service: 116 | name: postgresql 117 | state: started 118 | enabled: yes 119 | tags: 120 | - database 121 | - setup 122 | 123 | - name: Create database 124 | postgresql_db: 125 | name: "{{ db_name }}" 126 | state: present 127 | become_user: postgres 128 | tags: 129 | - database 130 | - setup 131 | 132 | - name: Create database user 133 | postgresql_user: 134 | db: "{{ db_name }}" 135 | name: "{{ db_user }}" 136 | password: "{{ db_password }}" 137 | priv: "ALL" 138 | state: present 139 | become_user: postgres 140 | tags: 141 | - database 142 | - setup 143 | 144 | - name: Configure PostgreSQL to listen on all interfaces 145 | lineinfile: 146 | path: /etc/postgresql/12/main/postgresql.conf 147 | regexp: "^#?listen_addresses\\s*=" 148 | line: "listen_addresses = '*'" 149 | notify: Restart PostgreSQL 150 | tags: 151 | - database 152 | - config 153 | 154 | handlers: 155 | - name: Restart PostgreSQL 156 | service: 157 | name: postgresql 158 | state: restarted 159 | 160 | - name: Load Balancer Setup 161 | hosts: loadbalancers 162 | become: true 163 | tasks: 164 | - name: Install HAProxy 165 | apt: 166 | name: haproxy 167 | state: present 168 | tags: 169 | - loadbalancer 170 | - setup 171 | 172 | - name: Configure HAProxy 173 | template: 174 | src: templates/haproxy.cfg.j2 175 | dest: /etc/haproxy/haproxy.cfg 176 | notify: Restart HAProxy 177 | tags: 178 | - loadbalancer 179 | - config 180 | 181 | handlers: 182 | - name: Restart HAProxy 183 | service: 184 | name: haproxy 185 | state: restarted 186 | -------------------------------------------------------------------------------- /demos/aws-lamp-stack/roles/db/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Database role tasks 3 | # These tasks configure the database servers 4 | 5 | - name: Install MySQL client 6 | package: 7 | name: 8 | - mysql 9 | - mysql-client 10 | state: present 11 | when: environment != 'localstack' 12 | 13 | - name: Create database configuration directory 14 | file: 15 | path: /etc/mysql 16 | state: directory 17 | owner: root 18 | group: root 19 | mode: 0755 20 | when: environment != 'localstack' 21 | 22 | - name: Configure MySQL client 23 | template: 24 | src: my.cnf.j2 25 | dest: /etc/mysql/my.cnf 26 | owner: root 27 | group: root 28 | mode: 0644 29 | when: environment != 'localstack' 30 | 31 | - name: Wait for database to be available 32 | wait_for: 33 | host: "{{ db_cluster_endpoint }}" 34 | port: "{{ db_port }}" 35 | timeout: 300 36 | delay: 10 37 | when: environment != 'localstack' 38 | 39 | - name: Create database schema 40 | mysql_db: 41 | login_host: "{{ db_cluster_endpoint }}" 42 | login_user: "{{ db_username }}" 43 | login_password: "{{ db_password }}" 44 | name: "{{ db_name }}" 45 | state: present 46 | when: environment != 'localstack' 47 | 48 | - name: Create database tables 49 | mysql_query: 50 | login_host: "{{ db_cluster_endpoint }}" 51 | login_user: "{{ db_username }}" 52 | login_password: "{{ db_password }}" 53 | database: "{{ db_name }}" 54 | query: | 55 | CREATE TABLE IF NOT EXISTS users ( 56 | id INT AUTO_INCREMENT PRIMARY KEY, 57 | username VARCHAR(50) NOT NULL UNIQUE, 58 | password VARCHAR(255) NOT NULL, 59 | email VARCHAR(100) NOT NULL UNIQUE, 60 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, 61 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP 62 | ); 63 | 64 | CREATE TABLE IF NOT EXISTS posts ( 65 | id INT AUTO_INCREMENT PRIMARY KEY, 66 | user_id INT NOT NULL, 67 | title VARCHAR(255) NOT NULL, 68 | content TEXT NOT NULL, 69 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, 70 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, 71 | FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE 72 | ); 73 | 74 | CREATE TABLE IF NOT EXISTS comments ( 75 | id INT AUTO_INCREMENT PRIMARY KEY, 76 | post_id INT NOT NULL, 77 | user_id INT NOT NULL, 78 | content TEXT NOT NULL, 79 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, 80 | FOREIGN KEY (post_id) REFERENCES posts(id) ON DELETE CASCADE, 81 | FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE 82 | ); 83 | when: environment != 'localstack' 84 | 85 | - name: Insert sample data 86 | mysql_query: 87 | login_host: "{{ db_cluster_endpoint }}" 88 | login_user: "{{ db_username }}" 89 | login_password: "{{ db_password }}" 90 | database: "{{ db_name }}" 91 | query: | 92 | INSERT IGNORE INTO users (id, username, password, email) VALUES 93 | (1, 'admin', '$2y$10$8KGcS0Uw1CW3FuMsXNjU7uHIGK0iKD7jrIJmB7R.WTEkS1PiYlce.', 'admin@example.com'), 94 | (2, 'user1', '$2y$10$8KGcS0Uw1CW3FuMsXNjU7uHIGK0iKD7jrIJmB7R.WTEkS1PiYlce.', 'user1@example.com'), 95 | (3, 'user2', '$2y$10$8KGcS0Uw1CW3FuMsXNjU7uHIGK0iKD7jrIJmB7R.WTEkS1PiYlce.', 'user2@example.com'); 96 | 97 | INSERT IGNORE INTO posts (id, user_id, title, content) VALUES 98 | (1, 1, 'Welcome to LAMP Stack', 'This is a sample post created by the admin user.'), 99 | (2, 2, 'Getting Started with AWS', 'AWS provides a wide range of services for building scalable applications.'), 100 | (3, 3, 'Using Ansible for Infrastructure as Code', 'Ansible is a powerful tool for automating infrastructure deployment.'); 101 | 102 | INSERT IGNORE INTO comments (post_id, user_id, content) VALUES 103 | (1, 2, 'Great introduction post!'), 104 | (1, 3, 'Looking forward to more content.'), 105 | (2, 1, 'AWS is indeed very versatile.'), 106 | (3, 2, 'Ansible has made our deployments much easier.'); 107 | when: environment != 'localstack' 108 | 109 | - name: Create database backup script 110 | template: 111 | src: backup.sh.j2 112 | dest: /usr/local/bin/db-backup.sh 113 | owner: root 114 | group: root 115 | mode: 0755 116 | when: environment != 'localstack' 117 | 118 | - name: Set up cron job for database backups 119 | cron: 120 | name: "Database backup" 121 | minute: "0" 122 | hour: "3" 123 | job: "/usr/local/bin/db-backup.sh > /var/log/db-backup.log 2>&1" 124 | when: environment != 'localstack' and backup_enabled 125 | 126 | - name: Create a dummy file for LocalStack testing 127 | file: 128 | path: /tmp/db-role-applied 129 | state: touch 130 | mode: 0644 131 | when: environment == 'localstack' 132 | --------------------------------------------------------------------------------