├── .github ├── FUNDING.yml └── workflows │ └── checkout.yml ├── .gitignore ├── Ansible ├── GCP │ └── create-vm.yml ├── Kubernetes │ ├── certmanager │ │ ├── README.md │ │ ├── configure_certmanager.yml │ │ ├── install_certmanager.yml │ │ ├── inventory.ini │ │ └── uninstall_certmanager.yml │ ├── helm │ │ ├── README.md │ │ ├── install_helm.yml │ │ └── inventory.ini │ ├── ingress-nginx │ │ ├── README.md │ │ ├── install_nginx_ingress.yml │ │ ├── inventory.ini │ │ └── uninstall_nginx_ingress.yml │ ├── kubectl │ │ ├── README.md │ │ ├── install_kubectl.yml │ │ └── inventory.ini │ ├── kubernetes-dashboard │ │ ├── expose_kubernetes_dashboard.yml │ │ ├── install.yml │ │ ├── inventory.ini │ │ └── uninstall.yml │ ├── kubesphere-dashboard │ │ ├── expose_kubesphere.yml │ │ ├── install.yml │ │ ├── inventory.ini │ │ └── uninstall.yml │ ├── metallb │ │ ├── README.md │ │ ├── configure_metallb.yml │ │ ├── install_metallb.yml │ │ ├── inventory.ini │ │ └── uninstall_metallb.yml │ └── rancher-dashboard │ │ ├── expose_rancher_ui.yml │ │ ├── install.yml │ │ ├── inventory.ini │ │ └── uninstall.yml ├── activemq-artemis │ ├── artemis.service.j2 │ ├── clenup.yml │ ├── cluster │ │ ├── ansible.cfg │ │ ├── bootstrap_server.yml │ │ ├── cluster_bootstrap.yml │ │ ├── install_artemis.yml │ │ ├── inventory.ini │ │ ├── main.yml │ │ ├── setup_nfs.yml │ │ ├── templates │ │ │ ├── artemis.service.j2 │ │ │ ├── master-broker.xml.j2 │ │ │ └── node-broker.xml.j2 │ │ ├── vars.yml │ │ └── vip_ip.yml │ ├── install.yml │ ├── inventory.ini │ └── vars.yml ├── cassandra │ ├── README.md │ ├── configure_cassandra.yml │ ├── install.yml │ ├── inventory.ini │ └── vars.yml ├── certbot │ ├── README.md │ ├── install.yml │ ├── inventory.ini │ ├── obtain_ssl.yml │ └── setup.yml ├── docker │ ├── README.md │ ├── install_docker-compose.yml │ ├── install_docker.yml │ ├── inventory.ini │ └── uninstall_docker.yml ├── dozzle │ ├── cleanup.yml │ ├── dozzle_cleanup.yml │ ├── dozzle_setup.yml │ ├── install.yml │ ├── inventory.ini │ └── vars.yml ├── elasticsearch │ ├── ansible.cfg │ ├── configure-elasticsearch.yml │ ├── install.yml │ ├── inventory.ini │ ├── templates │ │ ├── elasticsearch-apt.j2 │ │ ├── elasticsearch-yum.j2 │ │ └── elasticsearch.yml.j2 │ └── vars.yml ├── firewall │ ├── README.md │ ├── ansible.cfg │ ├── configure.yml │ ├── firewalld.yml │ ├── inventory.ini │ ├── ufw.yml │ └── vars.yml ├── go-lang │ ├── README.md │ ├── ansible.cfg │ ├── install_go.yml │ ├── inventory.ini │ └── vars.yml ├── grafana-stack │ ├── README.md │ ├── grafana-collection │ │ ├── README.md │ │ ├── configure_promtail.yml │ │ ├── configure_promtail_docker.yml │ │ ├── install_grafana.yml │ │ ├── install_loki.yml │ │ ├── install_promtail.yml │ │ ├── install_promtail_docker.yml │ │ ├── inventory.ini │ │ └── promtail_config │ │ │ └── config.yml │ └── prometheus-collection │ │ ├── README.md │ │ ├── install_prometheus.yml │ │ ├── inventory.ini │ │ ├── prometheus.yml │ │ ├── prometheus_exporters.yml │ │ └── vars.yml ├── haproxy │ ├── README.md │ ├── haproxy_2_8_debian.yml │ ├── install.yml │ ├── inventory.ini │ ├── list.sh │ └── specific-version.yml ├── harbor │ ├── README.md │ ├── ansible.cfg │ ├── bootstrap_harbor.yml │ ├── cleanup.yml │ ├── inventory.ini │ └── vars.yml ├── hashicorp-vault │ ├── ansible.cfg │ ├── cleanup._package.yml │ ├── configure_vault.yml │ ├── install_package.yml │ ├── inventory.ini │ ├── vars.yml │ └── vault.hcl.j2 ├── java │ ├── README.md │ ├── install_java.yml │ ├── inventory.ini │ ├── java_17.yml │ └── java_8_11.yml ├── jenkins │ ├── README.md │ ├── install_jenkins.yml │ ├── inventory.ini │ └── uninstall_jenkins.yml ├── kafka │ ├── install.yml │ └── inventory.ini ├── keepalived │ ├── ansible.cfg │ ├── configure.yml │ ├── install.yml │ ├── inventory.ini │ ├── templates │ │ ├── master.conf.j2 │ │ └── slave.conf.j2 │ └── vars.yml ├── keydb │ ├── install.yml │ ├── invnetory.ini │ └── keydb_docker.yml ├── kibana │ ├── ansible.cfg │ ├── install.yml │ ├── inventory.ini │ ├── templates │ │ └── kibana-yum.j2 │ └── vars.yml ├── minio │ ├── README.md │ ├── install_minio.yml │ ├── inventory.ini │ ├── uninstall_minio.yml │ └── vars.yml ├── mongodb │ ├── README.md │ ├── configure_mongodb.yml │ ├── install.yml │ └── inventory.ini ├── mysql │ ├── README.md │ ├── configure_mysql.yml │ ├── install_mysql.yml │ ├── inventory.ini │ └── vars.yml ├── nexus │ ├── README.md │ ├── ansible.cfg │ ├── cleanup_nexus_docker.yml │ ├── install_nexus.yml │ ├── invnetory.ini │ └── vars.yml ├── nfs │ ├── README.md │ ├── inventory.ini │ ├── nfs_setup.yml │ └── vars.yml ├── nginx │ ├── README.md │ ├── install_nginx.yml │ ├── inventory.ini │ └── nginx_config.yml ├── nodejs │ ├── README.md │ ├── install_nodejs.yml │ ├── install_nodejs_nvm.yml │ └── inventory.ini ├── percona-pmm │ ├── README.md │ ├── ansible.cfg │ ├── cleanup_client_docker.yml │ ├── cleanup_package.yml │ ├── cleanup_server.yml │ ├── inventory.ini │ ├── pmm_client_with_docker.yml │ ├── pmm_client_with_package.yml │ ├── pmm_postgresql.yml │ ├── pmm_server.yml │ └── vars.yml ├── percona │ └── postgresql │ │ ├── ansible.cfg │ │ ├── inventory.ini │ │ ├── package │ │ ├── install.yml │ │ └── s,.txt │ │ └── vars.yml ├── php │ ├── README.md │ ├── install_php.yml │ └── inventory.ini ├── portainer │ ├── README.md │ ├── clean_portainer.yml │ ├── install_portainer_ce.yml │ ├── install_portainer_ee.yml │ └── inventory.ini ├── postgresql │ ├── README.md │ ├── ansible.cfg │ ├── configure_postgresql.yml │ ├── install_postgresql.yml │ ├── inventory.ini │ └── vars.yml ├── proxy │ ├── README.md │ ├── inventory.ini │ └── proxy.yml ├── rabbitmq │ ├── README.md │ ├── ansible.cfg │ ├── cleanup_docker.yml │ ├── configure_rabbitmq.yml │ ├── docker.yml │ ├── install_rabbitmq.yml │ ├── inventory.ini │ └── vars.yml ├── redis │ ├── README.md │ ├── configure_redis.yml │ ├── install_redis.yml │ └── inventory.ini ├── security │ ├── README.md │ ├── inventory.ini │ ├── main.yml │ └── vars.yml ├── semaphore-ui │ ├── installation_with_binary.yml │ ├── installation_with_docker.yml │ ├── installation_with_package.yml │ └── inventory.ini ├── sonarqube │ ├── cleanup.yml │ ├── install_with_docker.yml │ └── inventory.ini ├── trivy │ ├── README.md │ ├── install_trivy_binary.yml │ ├── install_trivy_docker.yml │ ├── install_trivy_helm.yml │ ├── install_trivy_os.yml │ └── inventory.ini ├── update-upgrade │ ├── README.md │ ├── ansible.cfg │ ├── inventory.ini │ └── update_upgrade_tools.yml ├── wazuh-ansible │ ├── .ansible-lint │ ├── .gitignore │ ├── .yamllint │ ├── README.md │ ├── inventory.ini │ ├── playbooks │ │ ├── ansible.cfg │ │ ├── wazuh-dashboard.yml │ │ ├── wazuh-indexer.yml │ │ ├── wazuh-manager-oss-cluster.yml │ │ ├── wazuh-manager-oss.yml │ │ ├── wazuh-production-ready.yml │ │ └── wazuh-single.yml │ ├── roles │ │ ├── elastic-stack │ │ │ └── ansible-kibana │ │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── opendistro │ │ │ └── opendistro-kibana │ │ │ │ └── defaults │ │ │ │ └── main.yml │ │ └── wazuh │ │ │ ├── ansible-filebeat-oss │ │ │ ├── README.md │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── Debian.yml │ │ │ │ ├── RMDebian.yml │ │ │ │ ├── RMRedHat.yml │ │ │ │ ├── RedHat.yml │ │ │ │ ├── config.yml │ │ │ │ ├── main.yml │ │ │ │ └── security_actions.yml │ │ │ └── templates │ │ │ │ └── filebeat.yml.j2 │ │ │ ├── ansible-filebeat │ │ │ └── defaults │ │ │ │ └── main.yml │ │ │ ├── ansible-wazuh-agent │ │ │ ├── README.md │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── Debian.yml │ │ │ │ ├── Linux.yml │ │ │ │ ├── RMDebian.yml │ │ │ │ ├── RMRedHat.yml │ │ │ │ ├── RedHat.yml │ │ │ │ ├── Windows.yml │ │ │ │ ├── installation_from_custom_packages.yml │ │ │ │ ├── macOS.yml │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ ├── authd_pass.j2 │ │ │ │ ├── var-ossec-etc-local-internal-options.conf.j2 │ │ │ │ └── var-ossec-etc-ossec-agent.conf.j2 │ │ │ ├── ansible-wazuh-manager │ │ │ ├── README.md │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── files │ │ │ │ ├── create_user.py │ │ │ │ └── custom_ruleset │ │ │ │ │ ├── decoders │ │ │ │ │ └── sample_custom_decoders.xml │ │ │ │ │ └── rules │ │ │ │ │ └── sample_custom_rules.xml │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── Debian.yml │ │ │ │ ├── RedHat.yml │ │ │ │ ├── install_cmake.yml │ │ │ │ ├── installation_from_custom_packages.yml │ │ │ │ ├── main.yml │ │ │ │ └── uninstall.yml │ │ │ ├── templates │ │ │ │ ├── admin.json.j2 │ │ │ │ ├── agentless.j2 │ │ │ │ ├── authd_pass.j2 │ │ │ │ ├── cdb_lists.j2 │ │ │ │ ├── var-ossec-etc-local-internal-options.conf.j2 │ │ │ │ ├── var-ossec-etc-ossec-server.conf.j2 │ │ │ │ ├── var-ossec-etc-shared-agent.conf.j2 │ │ │ │ ├── var-ossec-rules-local_decoder.xml.j2 │ │ │ │ └── var-ossec-rules-local_rules.xml.j2 │ │ │ └── vars │ │ │ │ ├── agentless_creds.yml │ │ │ │ ├── authd_pass.yml │ │ │ │ └── install_cmake.yml │ │ │ ├── check-packages │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── files │ │ │ │ └── packages_uri.txt │ │ │ ├── scripts │ │ │ │ └── check_packages.sh │ │ │ └── tasks │ │ │ │ └── main.yml │ │ │ ├── vars │ │ │ ├── repo.yml │ │ │ ├── repo_pre-release.yml │ │ │ ├── repo_staging.yml │ │ │ └── repo_vars.yml │ │ │ ├── wazuh-dashboard │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── Debian.yml │ │ │ │ ├── RMRedHat.yml │ │ │ │ ├── RedHat.yml │ │ │ │ ├── main.yml │ │ │ │ └── security_actions.yml │ │ │ ├── templates │ │ │ │ ├── opensearch_dashboards.yml.j2 │ │ │ │ └── wazuh.yml.j2 │ │ │ └── vars │ │ │ │ └── debian.yml │ │ │ └── wazuh-indexer │ │ │ ├── defaults │ │ │ └── main.yml │ │ │ ├── handlers │ │ │ └── main.yml │ │ │ ├── meta │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ ├── Debian.yml │ │ │ ├── RMRedHat.yml │ │ │ ├── RedHat.yml │ │ │ ├── local_actions.yml │ │ │ ├── main.yml │ │ │ └── security_actions.yml │ │ │ └── templates │ │ │ ├── config.yml.j2 │ │ │ ├── disabledlog4j.options.j2 │ │ │ ├── internal_users.yml.j2 │ │ │ ├── jvm.options.j2 │ │ │ ├── opensearch.yml.j2 │ │ │ └── tlsconfig.yml.j2 │ ├── wazuh-agent-install.yml │ └── wazuh-cluster.yml ├── zabbix │ ├── README.md │ ├── inventory.ini │ ├── vars.yml │ ├── zabbix-hosts-management.yml │ ├── zabbix-templates-management.yml │ ├── zabbix_agent.yml │ └── zabbix_server.yml └── zookeeper │ ├── README.md │ ├── install.yml │ ├── inventory.ini │ └── java_8_11.yml ├── LICENSE ├── README.md └── Terraform └── GCP ├── enable-gcp-services ├── README.md ├── main.tf ├── terraform.tfvars └── variables.tf ├── firewall ├── main.tf ├── terraform.tfvars └── variables.tf ├── gcp-project-creation ├── main.tf ├── outputs.tf ├── terraform.tfvars └── variables.tf ├── vm-template ├── README.md ├── main.tf ├── terraform.tfvars └── variables.tf └── vpc-network ├── README.md ├── main.tf ├── terraform.tfvars └── variables.tf /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: [ismoilovdevml] -------------------------------------------------------------------------------- /.github/workflows/checkout.yml: -------------------------------------------------------------------------------- 1 | name: Code Checkout 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - name: Checkout Repository 14 | uses: actions/checkout@v4 15 | with: 16 | repository: ismoilovdevml/infra-as-code 17 | path: infra-as-code -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Terraform 2 | *.tfstate 3 | *.tfstate.* 4 | .terraform/ 5 | crash.log 6 | crash.*.log 7 | override.tf 8 | override.tf.json 9 | terraform.rc 10 | .terraformrc 11 | *.tfplan 12 | .terraform.lock.hcl 13 | .terraform.tfstate.backup 14 | .vscode 15 | Ansible/nexus/myenv/* -------------------------------------------------------------------------------- /Ansible/GCP/create-vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Manage GCP VM instances 3 | hosts: localhost 4 | gather_facts: no 5 | vars_prompt: 6 | - name: "action" 7 | prompt: "Enter action (create/delete/start/stop)" 8 | private: no 9 | 10 | tasks: 11 | - name: Create VM instances 12 | when: action == 'create' 13 | shell: > 14 | gcloud compute instances create vm-instance-1 15 | --zone=us-central1-a 16 | --machine-type=e2-medium 17 | --image-family=ubuntu-2004-lts 18 | --image-project=ubuntu-os-cloud 19 | --boot-disk-size=30GB 20 | --boot-disk-type=pd-ssd 21 | --scopes=default,cloud-platform 22 | 23 | - name: Delete VM instance 1 24 | when: action == 'delete' 25 | shell: > 26 | gcloud compute instances delete vm-instance-1 27 | --zone=us-central1-a 28 | --quiet 29 | 30 | - name: Start VM instance 1 31 | when: action == 'start' 32 | shell: > 33 | gcloud compute instances start vm-instance-1 34 | --zone=us-central1-a 35 | 36 | - name: Stop VM instance 1 37 | when: action == 'stop' 38 | shell: > 39 | gcloud compute instances stop vm-instance-1 40 | --zone=us-central1-a -------------------------------------------------------------------------------- /Ansible/Kubernetes/certmanager/configure_certmanager.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure Cert-Manager with Let's Encrypt ClusterIssuer 3 | hosts: all 4 | become: yes 5 | vars: 6 | email: "teshmat@gmail.com" # Use your real email address here! 7 | cert_manager_namespace: "cert-manager" 8 | version: "v1.15.3" # Cert-manager version 9 | 10 | tasks: 11 | - name: Create clusterissuer configuration file 12 | ansible.builtin.copy: 13 | dest: /tmp/clusterissuer.yaml 14 | content: | 15 | apiVersion: cert-manager.io/v1 16 | kind: ClusterIssuer 17 | metadata: 18 | name: letsencrypt-prod 19 | spec: 20 | acme: 21 | server: https://acme-v02.api.letsencrypt.org/directory 22 | email: {{ email }} 23 | privateKeySecretRef: 24 | name: letsencrypt-prod 25 | solvers: 26 | - http01: 27 | ingress: 28 | class: nginx 29 | 30 | - name: Apply ClusterIssuer configuration 31 | ansible.builtin.command: 32 | cmd: kubectl apply -f /tmp/clusterissuer.yaml 33 | register: clusterissuer_apply_output 34 | 35 | - name: Show ClusterIssuer apply output 36 | debug: 37 | var: clusterissuer_apply_output.stdout 38 | 39 | - name: Get secrets in cert-manager namespace 40 | ansible.builtin.command: 41 | cmd: kubectl get secret -n {{ cert_manager_namespace }} 42 | register: cert_manager_secrets 43 | 44 | - name: Show cert-manager secrets 45 | debug: 46 | var: cert_manager_secrets.stdout -------------------------------------------------------------------------------- /Ansible/Kubernetes/certmanager/install_certmanager.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Cert-Manager 3 | hosts: all 4 | become: yes 5 | vars: 6 | version: "v1.15.3" # Cert-manager version 7 | namespace: "cert-manager" # Cert-manager namespace 8 | 9 | tasks: 10 | - name: Apply cert-manager CRDs 11 | ansible.builtin.command: 12 | cmd: kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/{{ version }}/cert-manager.crds.yaml 13 | 14 | - name: Add Jetstack Helm repo 15 | ansible.builtin.command: 16 | cmd: helm repo add jetstack https://charts.jetstack.io 17 | 18 | - name: Update Helm repo 19 | ansible.builtin.command: 20 | cmd: helm repo update 21 | 22 | - name: Install cert-manager using Helm 23 | ansible.builtin.command: 24 | cmd: helm install cert-manager jetstack/cert-manager --namespace {{ namespace }} --create-namespace --version {{ version }} 25 | register: helm_install_output 26 | 27 | - name: Wait for 1 minute to let Cert-Manager pods start 28 | ansible.builtin.pause: 29 | minutes: 1 30 | 31 | - name: Get all Cert-Manager resources 32 | ansible.builtin.command: 33 | cmd: kubectl get all -n {{ namespace }} 34 | register: cert_manager_resources 35 | 36 | - name: Show Cert-Manager resources 37 | debug: 38 | var: cert_manager_resources.stdout -------------------------------------------------------------------------------- /Ansible/Kubernetes/certmanager/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | master1 ansible_host=159.223.131.86 ansible_user=root -------------------------------------------------------------------------------- /Ansible/Kubernetes/certmanager/uninstall_certmanager.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Uninstall Cert-Manager and clean up 3 | hosts: all 4 | become: yes 5 | vars: 6 | version: "v1.15.3" # Cert-manager version 7 | namespace: "cert-manager" # Cert-manager namespace 8 | 9 | tasks: 10 | - name: Uninstall cert-manager Helm release 11 | ansible.builtin.command: 12 | cmd: helm uninstall cert-manager --namespace {{ namespace }} 13 | register: helm_uninstall_output 14 | 15 | - name: Show Helm uninstall output 16 | debug: 17 | var: helm_uninstall_output.stdout 18 | 19 | - name: Delete cert-manager namespace 20 | ansible.builtin.command: 21 | cmd: kubectl delete namespace {{ namespace }} 22 | register: delete_namespace_output 23 | 24 | - name: Show namespace deletion output 25 | debug: 26 | var: delete_namespace_output.stdout 27 | 28 | - name: Remove cert-manager CRDs 29 | ansible.builtin.command: 30 | cmd: kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/{{ version }}/cert-manager.crds.yaml 31 | register: remove_crds_output 32 | 33 | - name: Show CRDs removal output 34 | debug: 35 | var: remove_crds_output.stdout -------------------------------------------------------------------------------- /Ansible/Kubernetes/helm/README.md: -------------------------------------------------------------------------------- 1 | # Helm Installation with Ansible 🚀 2 | 3 | This playbook installs Helm v3 on a target machine using an official installation script. 4 | 5 | ## Requirements 📋 6 | 7 | - Ansible installed on your control node. 8 | - Sudo privileges on the target machine. 9 | 10 | ## Playbook Overview 📝 11 | 12 | This playbook performs the following steps: 13 | 14 | 1. Downloads the official Helm installation script. 15 | 2. Runs the installation script. 16 | 3. Verifies the installation by checking the Helm version. 17 | 18 | ## Usage ⚙️ 19 | 20 | 1. **Prepare your inventory file**: 21 | ```ini 22 | [all] 23 | target-host ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 24 | ``` 25 | 26 | 2. **Run the playbook**: 27 | ```bash 28 | ansible-playbook -i inventory.ini install_helm.yml 29 | ``` 30 | 31 | ## Playbook Tasks Overview 🧩 32 | 33 | - **Download Helm script**: Fetches the official installation script. 34 | - **Run Helm script**: Installs Helm on the target machine. 35 | - **Verify installation**: Checks the installed Helm version. -------------------------------------------------------------------------------- /Ansible/Kubernetes/helm/install_helm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Helm 3 | hosts: all 4 | become: yes 5 | 6 | tasks: 7 | - name: Download Helm install script 8 | ansible.builtin.get_url: 9 | url: https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 10 | dest: /tmp/get_helm.sh 11 | mode: '0700' 12 | 13 | - name: Run Helm install script 14 | ansible.builtin.command: 15 | cmd: /tmp/get_helm.sh 16 | creates: /usr/local/bin/helm 17 | 18 | - name: Verify Helm installation 19 | ansible.builtin.command: 20 | cmd: helm version 21 | register: helm_version 22 | 23 | - name: Show Helm version 24 | debug: 25 | var: helm_version.stdout -------------------------------------------------------------------------------- /Ansible/Kubernetes/helm/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | master1 ansible_host=142.93.203.29 ansible_user=root 3 | master2 ansible_host=147.182.175.175 ansible_user=root 4 | master3 ansible_host=159.223.131.86 ansible_user=root -------------------------------------------------------------------------------- /Ansible/Kubernetes/ingress-nginx/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | master1 ansible_host=159.223.131.86 ansible_user=root -------------------------------------------------------------------------------- /Ansible/Kubernetes/ingress-nginx/uninstall_nginx_ingress.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Uninstall NGINX Ingress and clean up 3 | hosts: all 4 | become: yes 5 | 6 | tasks: 7 | - name: Uninstall ingress-nginx Helm release 8 | ansible.builtin.command: 9 | cmd: helm uninstall ingress-nginx --namespace ingress-nginx 10 | register: helm_uninstall 11 | 12 | - name: Show Helm uninstall output 13 | debug: 14 | var: helm_uninstall.stdout 15 | 16 | - name: Delete ingress-nginx namespace 17 | ansible.builtin.command: 18 | cmd: kubectl delete namespace ingress-nginx 19 | register: delete_namespace 20 | 21 | - name: Show namespace deletion output 22 | debug: 23 | var: delete_namespace.stdout 24 | 25 | - name: Clean up Helm repository 26 | ansible.builtin.command: 27 | cmd: helm repo remove ingress-nginx 28 | register: helm_repo_remove 29 | 30 | - name: Show Helm repo remove output 31 | debug: 32 | var: helm_repo_remove.stdout -------------------------------------------------------------------------------- /Ansible/Kubernetes/kubectl/README.md: -------------------------------------------------------------------------------- 1 | # Kubectl Installation with Ansible 🚀 2 | 3 | This playbook automates the installation of `kubectl` for either x86_64 or arm64 architecture on a target machine. 4 | 5 | ## Requirements 📋 6 | 7 | - Ansible installed on your control node. 8 | - Sudo privileges on the target machine. 9 | 10 | ## Playbook Overview 📝 11 | 12 | This playbook performs the following actions: 13 | 14 | 1. Downloads the latest stable `kubectl` binary for the appropriate architecture. 15 | 2. Verifies the SHA256 checksum of the downloaded binary. 16 | 3. Installs `kubectl` and verifies the installation. 17 | 18 | ## Usage ⚙️ 19 | 20 | 1. **Prepare your inventory file**: 21 | ```ini 22 | [all] 23 | target-host ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 24 | ``` 25 | 26 | 2. **Run the playbook**: 27 | ```bash 28 | ansible-playbook -i inventory.ini install_kubectl.yml 29 | ``` 30 | 31 | ## Playbook Tasks Overview 🧩 32 | 33 | - **Download kubectl**: Fetches the latest stable release for the target architecture. 34 | - **Verify checksum**: Ensures the integrity of the downloaded binary. 35 | - **Install kubectl**: Installs `kubectl` in `/usr/local/bin`. 36 | - **Check version**: Verifies the installed `kubectl` version. -------------------------------------------------------------------------------- /Ansible/Kubernetes/kubectl/install_kubectl.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install kubectl for x86_64 or arm64 architecture 3 | hosts: all 4 | become: yes 5 | vars: 6 | kubectl_architecture: "{{ 'amd64' if ansible_architecture == 'x86_64' else 'arm64' }}" 7 | 8 | tasks: 9 | - name: Download kubectl binary 10 | ansible.builtin.get_url: 11 | url: "https://dl.k8s.io/release/{{ lookup('url', 'https://dl.k8s.io/release/stable.txt') }}/bin/linux/{{ kubectl_architecture }}/kubectl" 12 | dest: /tmp/kubectl 13 | mode: '0755' 14 | 15 | - name: Download kubectl sha256 file 16 | ansible.builtin.get_url: 17 | url: "https://dl.k8s.io/release/{{ lookup('url', 'https://dl.k8s.io/release/stable.txt') }}/bin/linux/{{ kubectl_architecture }}/kubectl.sha256" 18 | dest: /tmp/kubectl.sha256 19 | 20 | - name: Verify kubectl sha256 checksum 21 | ansible.builtin.shell: | 22 | echo "$(cat /tmp/kubectl.sha256) /tmp/kubectl" | sha256sum --check 23 | register: sha256_check 24 | failed_when: "'OK' not in sha256_check.stdout" 25 | 26 | - name: Install kubectl 27 | ansible.builtin.command: 28 | cmd: "install -o root -g root -m 0755 /tmp/kubectl /usr/local/bin/kubectl" 29 | 30 | - name: Check kubectl version 31 | ansible.builtin.command: 32 | cmd: kubectl version --client 33 | register: kubectl_version 34 | 35 | - name: Show kubectl version 36 | debug: 37 | var: kubectl_version.stdout 38 | -------------------------------------------------------------------------------- /Ansible/Kubernetes/kubectl/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | master1 ansible_host=142.93.203.29 ansible_user=root 3 | master2 ansible_host=147.182.175.175 ansible_user=root 4 | master3 ansible_host=159.223.131.86 ansible_user=root -------------------------------------------------------------------------------- /Ansible/Kubernetes/kubernetes-dashboard/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | master1 ansible_host=159.223.131.86 ansible_user=root -------------------------------------------------------------------------------- /Ansible/Kubernetes/kubernetes-dashboard/uninstall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Uninstall Kubernetes Dashboard 3 | hosts: all 4 | become: yes 5 | vars: 6 | dashboard_namespace: "kubernetes-dashboard" # Namespace for Kubernetes dashboard 7 | helm_release_name: "kubernetes-dashboard" # Helm release name 8 | 9 | tasks: 10 | - name: Uninstall the Kubernetes Dashboard using Helm 11 | ansible.builtin.command: 12 | cmd: helm uninstall {{ helm_release_name }} --namespace {{ dashboard_namespace }} 13 | register: helm_uninstall_output 14 | 15 | - name: Show Helm uninstall output 16 | debug: 17 | var: helm_uninstall_output.stdout 18 | 19 | - name: Delete the Kubernetes Dashboard namespace 20 | ansible.builtin.command: 21 | cmd: kubectl delete namespace {{ dashboard_namespace }} 22 | register: delete_namespace_output 23 | 24 | - name: Show namespace deletion output 25 | debug: 26 | var: delete_namespace_output.stdout -------------------------------------------------------------------------------- /Ansible/Kubernetes/kubesphere-dashboard/expose_kubesphere.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Expose Kubesphere Dashboard 3 | hosts: all 4 | become: yes 5 | vars: 6 | kubesphere_namespace: "kubesphere-system" 7 | ingress_name: "kubesphere-dashboard-ingress" 8 | ingress_host: "ui.helm.uz" 9 | tls_secret_name: "ks-console-tls" 10 | cluster_issuer: "letsencrypt-prod" 11 | service_name: "ks-console" 12 | service_port: 80 # Ichki port bo'lishi kerak 13 | 14 | tasks: 15 | - name: Create Ingress configuration file 16 | ansible.builtin.copy: 17 | dest: /tmp/kubesphere-dashboard-ingress.yaml 18 | content: | 19 | apiVersion: networking.k8s.io/v1 20 | kind: Ingress 21 | metadata: 22 | name: {{ ingress_name }} 23 | namespace: {{ kubesphere_namespace }} 24 | annotations: 25 | cert-manager.io/cluster-issuer: "{{ cluster_issuer }}" 26 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 27 | spec: 28 | ingressClassName: "nginx" 29 | rules: 30 | - host: {{ ingress_host }} 31 | http: 32 | paths: 33 | - path: / 34 | pathType: Prefix 35 | backend: 36 | service: 37 | name: {{ service_name }} 38 | port: 39 | number: {{ service_port }} # ClusterIP port bo'lishi kerak 40 | tls: 41 | - hosts: 42 | - {{ ingress_host }} 43 | secretName: {{ tls_secret_name }} 44 | 45 | - name: Apply Ingress configuration 46 | ansible.builtin.command: 47 | cmd: kubectl apply -f /tmp/kubesphere-dashboard-ingress.yaml 48 | register: ingress_apply_output 49 | 50 | - name: Show Ingress apply output 51 | debug: 52 | var: ingress_apply_output.stdout -------------------------------------------------------------------------------- /Ansible/Kubernetes/kubesphere-dashboard/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Kubesphere 3 | hosts: all 4 | become: yes 5 | tasks: 6 | - name: Install Kubesphere installer 7 | command: kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml 8 | 9 | - name: Apply Kubesphere cluster configuration 10 | command: kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml 11 | 12 | - name: Wait for 3 minutes before proceeding 13 | wait_for: 14 | timeout: 180 # 3 minutes (180 seconds) 15 | 16 | - name: Delete ks-apiserver pod in kubesphere-system namespace 17 | command: kubectl delete pod -n kubesphere-system -l app=ks-apiserver 18 | ignore_errors: yes -------------------------------------------------------------------------------- /Ansible/Kubernetes/kubesphere-dashboard/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | master1 ansible_host=159.223.131.86 ansible_user=root -------------------------------------------------------------------------------- /Ansible/Kubernetes/metallb/configure_metallb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure MetalLB IP Address Pool 3 | hosts: all 4 | become: yes 5 | vars: 6 | metallb_namespace: "metallb-system" 7 | ip_pool_start: "10.116.0.20" 8 | ip_pool_end: "10.116.0.40" 9 | ip_pool_name: "first-pool" 10 | 11 | tasks: 12 | - name: Create directory for MetalLB configuration 13 | ansible.builtin.file: 14 | path: /tmp/metallb 15 | state: directory 16 | 17 | - name: Create MetalLB address pool configuration 18 | ansible.builtin.copy: 19 | dest: /tmp/metallb/address-pool.yaml 20 | content: | 21 | apiVersion: metallb.io/v1beta1 22 | kind: IPAddressPool 23 | metadata: 24 | name: {{ ip_pool_name }} 25 | namespace: {{ metallb_namespace }} 26 | spec: 27 | addresses: 28 | - {{ ip_pool_start }}-{{ ip_pool_end }} 29 | --- 30 | apiVersion: metallb.io/v1beta1 31 | kind: L2Advertisement 32 | metadata: 33 | name: {{ ip_pool_name }} 34 | namespace: {{ metallb_namespace }} 35 | spec: 36 | ipAddressPools: 37 | - {{ ip_pool_name }} 38 | 39 | - name: Apply MetalLB address pool configuration 40 | ansible.builtin.command: 41 | cmd: kubectl apply -f /tmp/metallb/address-pool.yaml 42 | register: config_apply_output 43 | 44 | - name: Show MetalLB config apply output 45 | debug: 46 | var: config_apply_output.stdout -------------------------------------------------------------------------------- /Ansible/Kubernetes/metallb/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | master1 ansible_host=159.223.131.86 ansible_user=root -------------------------------------------------------------------------------- /Ansible/Kubernetes/metallb/uninstall_metallb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Uninstall MetalLB and clean up 3 | hosts: all 4 | become: yes 5 | vars: 6 | metallb_namespace: "metallb-system" 7 | 8 | tasks: 9 | - name: Uninstall MetalLB using Helm 10 | ansible.builtin.command: 11 | cmd: helm uninstall metallb --namespace {{ metallb_namespace }} 12 | register: helm_uninstall 13 | 14 | - name: Show Helm uninstall output 15 | debug: 16 | var: helm_uninstall.stdout 17 | 18 | - name: Delete MetalLB namespace 19 | ansible.builtin.command: 20 | cmd: kubectl delete namespace {{ metallb_namespace }} 21 | register: delete_namespace 22 | 23 | - name: Show namespace deletion output 24 | debug: 25 | var: delete_namespace.stdout -------------------------------------------------------------------------------- /Ansible/Kubernetes/rancher-dashboard/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | master1 ansible_host=159.223.131.86 ansible_user=root -------------------------------------------------------------------------------- /Ansible/activemq-artemis/artemis.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=ActiveMQ Artemis Service 3 | After=network.target 4 | 5 | [Service] 6 | Type=forking 7 | ExecStart={{ artemis_instance_dir }}/bin/artemis-service start 8 | ExecStop={{ artemis_instance_dir }}/bin/artemis-service stop 9 | User={{ artemis_user }} 10 | Group={{ artemis_group }} 11 | Restart=on-abort 12 | 13 | [Install] 14 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /Ansible/activemq-artemis/clenup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Cleanup ActiveMQ Artemis installation 3 | hosts: artemis_servers 4 | become: yes 5 | vars_files: 6 | - vars.yml 7 | tasks: 8 | - name: Stop and disable Artemis service 9 | systemd: 10 | name: artemis 11 | state: stopped 12 | enabled: no 13 | ignore_errors: yes 14 | 15 | - name: Remove Artemis systemd service file 16 | file: 17 | path: /etc/systemd/system/artemis.service 18 | state: absent 19 | 20 | - name: Reload systemd daemon after service removal 21 | command: systemctl daemon-reload 22 | 23 | - name: Delete Artemis instance directory 24 | file: 25 | path: "{{ artemis_instance_dir }}" 26 | state: absent 27 | 28 | - name: Delete Artemis installation directory 29 | file: 30 | path: "{{ artemis_install_dir }}" 31 | state: absent 32 | 33 | - name: Remove temporary downloaded file 34 | file: 35 | path: /tmp/apache-artemis-{{ artemis_version }}-bin.tar.gz 36 | state: absent 37 | 38 | - name: Remove artemis user 39 | user: 40 | name: "{{ artemis_user }}" 41 | state: absent 42 | ignore_errors: yes 43 | 44 | - name: Remove artemis group 45 | group: 46 | name: "{{ artemis_group }}" 47 | state: absent 48 | ignore_errors: yes -------------------------------------------------------------------------------- /Ansible/activemq-artemis/cluster/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/activemq-artemis/cluster/bootstrap_server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Boostratp Servers 3 | hosts: master,nodes 4 | become: yes 5 | vars_files: 6 | - vars.yml 7 | 8 | tasks: 9 | - name: Ensure required packages are installed 10 | package: 11 | name: 12 | - curl 13 | - wget 14 | - net-tools 15 | - git 16 | - "{{ java_packages[ansible_os_family] }}" 17 | state: present 18 | 19 | - name: Locate Java 17 executable on Debian-based systems 20 | ansible.builtin.shell: "find /usr/lib/jvm -name 'java' | grep 'java-17'" 21 | register: debian_java_path 22 | when: ansible_os_family == "Debian" 23 | 24 | - name: Locate Java 17 executable on RedHat-based systems 25 | ansible.builtin.shell: "find /usr/lib/jvm -name 'java' | grep 'java-17'" 26 | register: redhat_java_path 27 | when: ansible_os_family == "RedHat" 28 | 29 | - name: Set Java 17 as the default alternative on Debian-based systems 30 | community.general.alternatives: 31 | name: java 32 | path: "{{ debian_java_path.stdout | trim }}" 33 | priority: 100 34 | when: ansible_os_family == "Debian" 35 | 36 | - name: Set Java 17 as the default alternative on RedHat-based systems 37 | community.general.alternatives: 38 | name: java 39 | path: "{{ redhat_java_path.stdout | trim }}" 40 | link: "/usr/bin/java" 41 | priority: 100 42 | when: ansible_os_family == "RedHat" 43 | 44 | - name: Verify Java 17 installation 45 | ansible.builtin.command: java -version 46 | register: java_version 47 | ignore_errors: true 48 | 49 | - name: Display Java version 50 | ansible.builtin.debug: 51 | msg: "Java installed successfully. Version: {{ java_version.stderr_lines | default([]) | union(java_version.stdout_lines | default([])) }}" 52 | when: java_version is defined -------------------------------------------------------------------------------- /Ansible/activemq-artemis/cluster/cluster_bootstrap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure ActiveMQ Artemis Cluster 3 | hosts: master 4 | become: true 5 | vars_files: 6 | - vars.yml 7 | 8 | tasks: 9 | - name: Configure master broker 10 | ansible.builtin.template: 11 | src: "templates/master-broker.xml.j2" 12 | dest: "{{ artemis_instance_dir }}/etc/broker.xml" 13 | owner: "{{ artemis_user }}" 14 | group: "{{ artemis_group }}" 15 | mode: '0644' 16 | 17 | - name: Restart ActiveMQ Artemis on master 18 | ansible.builtin.systemd: 19 | name: artemis 20 | state: restarted 21 | enabled: yes 22 | notify: "Pause before node restart" 23 | 24 | handlers: 25 | - name: Pause before node restart 26 | ansible.builtin.pause: 27 | minutes: 1 28 | 29 | - name: Configure ActiveMQ Artemis Nodes 30 | hosts: nodes 31 | become: true 32 | vars_files: 33 | - vars.yml 34 | 35 | tasks: 36 | - name: Configure node broker 37 | ansible.builtin.template: 38 | src: "templates/node-broker.xml.j2" 39 | dest: "{{ artemis_instance_dir }}/etc/broker.xml" 40 | owner: "{{ artemis_user }}" 41 | group: "{{ artemis_group }}" 42 | mode: '0644' 43 | 44 | - name: Restart ActiveMQ Artemis on nodes 45 | ansible.builtin.systemd: 46 | name: artemis 47 | state: restarted 48 | enabled: yes 49 | 50 | handlers: 51 | - name: Pause before node restart 52 | ansible.builtin.pause: 53 | minutes: 1 -------------------------------------------------------------------------------- /Ansible/activemq-artemis/cluster/inventory.ini: -------------------------------------------------------------------------------- 1 | [master] 2 | master ansible_host=142.93.56.4 ansible_user=root 3 | 4 | [nodes] 5 | node1 ansible_host=198.211.96.150 ansible_user=root 6 | node2 ansible_host=68.183.107.200 ansible_user=root 7 | 8 | [nfs_server] 9 | nfs ansible_host=34.70.204.185 ansible_user=root -------------------------------------------------------------------------------- /Ansible/activemq-artemis/cluster/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Bootstrap server 3 | import_playbook: bootstrap_server.yml 4 | 5 | - name: Setup NFS 6 | import_playbook: setup_nfs.yml 7 | 8 | - name: Install Artemis 9 | import_playbook: install_artemis.yml 10 | 11 | # - name: Cluster Bootstrap 12 | # import_playbook: cluster_bootstrap.yml -------------------------------------------------------------------------------- /Ansible/activemq-artemis/cluster/templates/artemis.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=ActiveMQ Artemis Service 3 | After=network.target 4 | 5 | [Service] 6 | Type=forking 7 | ExecStart={{ artemis_instance_dir }}/bin/artemis-service start 8 | ExecStop={{ artemis_instance_dir }}/bin/artemis-service stop 9 | User={{ artemis_user }} 10 | Group={{ artemis_group }} 11 | Restart=on-abort 12 | 13 | [Install] 14 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /Ansible/activemq-artemis/cluster/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | java_packages: 3 | Debian: "openjdk-17-jdk" 4 | RedHat: "java-17-openjdk" 5 | 6 | artemis_version: "2.38.0" 7 | artemis_url: "https://downloads.apache.org/activemq/activemq-artemis/{{ artemis_version }}/apache-artemis-{{ artemis_version }}-bin.tar.gz" 8 | artemis_install_dir: "/opt/artemis" 9 | artemis_instance_dir: "/opt/artemis-instance" 10 | artemis_user: "artemis" 11 | artemis_group: "artemis" 12 | artemis_password: "329awfas" 13 | 14 | nfs_export_path: "/srv/nfs" 15 | nfs_allowed_hosts: "*" 16 | nfs_service_name: 17 | RedHat: nfs-server 18 | Debian: nfs-kernel-server 19 | nfs_server_ip: "34.70.204.185" 20 | nfs_mount_paths: 21 | master: "/mnt/artemis-cluster-master-prod" 22 | node1: "/mnt/artemis-cluster-node1-prod" 23 | node2: "/mnt/artemis-cluster-node2-prod" 24 | 25 | connectors: 26 | master: "tcp://{{ hostvars['master']['ansible_host'] }}:61616" 27 | node1: "tcp://{{ hostvars['node1']['ansible_host'] }}:61616" 28 | node2: "tcp://{{ hostvars['node2']['ansible_host'] }}:61616" -------------------------------------------------------------------------------- /Ansible/activemq-artemis/cluster/vip_ip.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismoilovdevml/infra-as-code/82f52afdae3971556eac9e8d12efb59830837c14/Ansible/activemq-artemis/cluster/vip_ip.yml -------------------------------------------------------------------------------- /Ansible/activemq-artemis/inventory.ini: -------------------------------------------------------------------------------- 1 | [artemis_servers] 2 | debian ansible_host=157.245.130.56 ansible_user=root 3 | ubuntu ansible_host=134.209.77.220 ansible_user=root 4 | rockylinux ansible_host=142.93.56.4 ansible_user=root -------------------------------------------------------------------------------- /Ansible/activemq-artemis/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | java_packages: 3 | Debian: "openjdk-17-jdk" 4 | RedHat: "java-17-openjdk" 5 | artemis_version: "2.38.0" 6 | artemis_url: "https://downloads.apache.org/activemq/activemq-artemis/{{ artemis_version }}/apache-artemis-{{ artemis_version }}-bin.tar.gz" 7 | artemis_install_dir: "/opt/artemis" 8 | artemis_instance_dir: "/opt/artemis-instance" 9 | artemis_user: "artemis" 10 | artemis_group: "artemis" 11 | artemis_password: "329awfas" -------------------------------------------------------------------------------- /Ansible/cassandra/README.md: -------------------------------------------------------------------------------- 1 | ```bash 2 | ansible-galaxy collection install community.cassandra 3 | ``` -------------------------------------------------------------------------------- /Ansible/cassandra/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Cassandra on target hosts 3 | hosts: cassandra_nodes 4 | become: yes 5 | 6 | collections: 7 | - community.cassandra 8 | 9 | roles: 10 | - cassandra_repository 11 | - cassandra_install 12 | 13 | vars_files: 14 | - vars.yml 15 | 16 | tasks: 17 | - name: Ensure required firewall ports are open 18 | include_role: 19 | name: cassandra_firewall 20 | vars: 21 | cassandra_firewall_ports: "{{ firewall_ports }}" -------------------------------------------------------------------------------- /Ansible/cassandra/inventory.ini: -------------------------------------------------------------------------------- 1 | [cassandra_nodes] 2 | debian ansible_host=161.35.3.173 ansible_user=root 3 | ubuntu ansible_host=167.71.105.192 ansible_user=root 4 | rhel ansible_host=167.172.139.139 ansible_user=root -------------------------------------------------------------------------------- /Ansible/cassandra/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables used for Cassandra installation and configuration 3 | 4 | cassandra_user: cassandra 5 | cassandra_password: DL3sdf@jdxsw 6 | listen_address: 0.0.0.0 7 | rpc_address: 0.0.0.0 8 | broadcast_address: "{{ ansible_default_ipv4.address }}" 9 | authenticator: PasswordAuthenticator 10 | authorizer: CassandraAuthorizer 11 | cassandra_version: "41x" # Specify the desired version 12 | firewall_ports: 13 | - "7000" 14 | - "7001" 15 | - "7199" 16 | - "9042" 17 | - "9160" -------------------------------------------------------------------------------- /Ansible/certbot/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Certbot and NGINX plugin on multiple OSes 3 | hosts: certbot_servers 4 | become: yes 5 | tasks: 6 | - name: Install Certbot and NGINX plugin on Debian/Ubuntu 7 | apt: 8 | name: 9 | - certbot 10 | - python3-certbot-nginx 11 | state: present 12 | when: ansible_facts['os_family'] == "Debian" 13 | 14 | - name: Install Certbot and NGINX plugin on CentOS/RHEL/Rocky 15 | dnf: 16 | name: 17 | - certbot 18 | - python3-certbot-nginx 19 | state: present 20 | when: ansible_facts['os_family'] == "RedHat" -------------------------------------------------------------------------------- /Ansible/certbot/inventory.ini: -------------------------------------------------------------------------------- 1 | [certbot_servers] 2 | debian ansible_host=10.128.0.47 ansible_user=root 3 | ubuntu ansible_host=10.128.0.48 ansible_user=root 4 | rhel ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/certbot/obtain_ssl.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Obtain SSL certificate for NGINX and configure it 3 | hosts: certbot_servers 4 | become: yes 5 | vars: 6 | domen: nginx.helm.uz 7 | tasks: 8 | - name: Obtain SSL certificate with Certbot for NGINX 9 | shell: | 10 | certbot --nginx -d {{ domen }} -d www.{{ domen }} --non-interactive --agree-tos --email teshmat@gmail.com 11 | register: certbot_output 12 | 13 | - name: Debug Certbot output 14 | debug: 15 | var: certbot_output.stdout 16 | 17 | - name: Restart NGINX to apply SSL certificate 18 | service: 19 | name: nginx 20 | state: restarted -------------------------------------------------------------------------------- /Ansible/certbot/setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Certbot and obtain SSL certificate for NGINX 3 | hosts: certbot_servers 4 | become: yes 5 | roles: 6 | - role: geerlingguy.certbot 7 | vars: 8 | certbot_install_method: package # Force Snap installation on all systems 9 | certbot_email: "teshmat@gmail.com" # Email for Let's Encrypt notifications 10 | certbot_certs: 11 | - domains: 12 | - nginx.helm.uz # Main domain 13 | - www.nginx.helm.uz # Additional domain 14 | webserver: nginx # Configure Certbot for NGINX web server 15 | certbot_auto_renew: true # Enable auto-renewal of certificates 16 | certbot_auto_renew_hour: "0,12" # Renew certificates at midnight and noon 17 | certbot_auto_renew_minute: "30" # Renew at 30 minutes past the hour 18 | certbot_auto_renew_user: "root" # Ensure renewal runs as root user 19 | 20 | tasks: 21 | - name: Ensure NGINX is stopped before standalone Certbot execution 22 | service: 23 | name: nginx 24 | state: stopped 25 | when: certbot_create_method == "standalone" 26 | 27 | - name: Start NGINX after Certbot obtains certificates 28 | service: 29 | name: nginx 30 | state: started 31 | when: certbot_create_method == "standalone" -------------------------------------------------------------------------------- /Ansible/docker/README.md: -------------------------------------------------------------------------------- 1 | # 📋 Docker Installation and Uninstallation Playbook 2 | 3 | This playbook installs Docker on multiple Linux operating systems and provides a method to uninstall Docker, including the cleanup of associated files and directories. 4 | 5 | ## 🛠️ Usage 6 | 7 | ### Install Docker 8 | Run the following command to install Docker on your servers: 9 | ```bash 10 | ansible-galaxy collection install community.general 11 | ansible-playbook -i inventory.ini ./install_docker.yml 12 | ``` 13 | # Clean up and Uninstall Docker 14 | To clean up and uninstall Docker from your servers, use the following command: 15 | ```bash 16 | ansible-playbook -i inventory.ini ./uninstall_docker.yml 17 | ``` 18 | This will: 19 | * Stop all Docker services. 20 | * Remove all Docker containers, images, volumes, and associated files. 21 | * Remove the Docker GPG keys and repository sources. 22 | * Uninstall Docker packages. 23 | * Remove Docker user and group. 24 | 25 | 26 | # 💻 Supported Linux Operating Systems 27 | This playbook supports the following Linux distributions: 28 | * 🐧 **Debian:** 11,12 29 | * 🐧 **Ubuntu:** 20.04,22.04 30 | * 🐧 **RHEL:** 7,8 31 | * 🐧 **Rocky Linux:** 8,9 32 | 33 | # ✅ Tested Operating Systems 34 | The playbook has been tested on the following OS versions: 35 | * ✅**Debian:** 11,12 36 | * ✅**Ubuntu:** 20.04,22.04 37 | * ✅**RHEL:** 7,8 38 | * ✅**Rocky Linux:** 8,9 39 | 40 | # ⚙️ Supported Ansible Versions 41 | * ✅ ansible [core 2.16.3] 42 | * ❗️ ansible [core 2.17.3] (compatibility issues) 43 | 44 | > Note: The playbook assumes you are running Ansible as the root user. For non-root users, ensure you have `become` privileges configured. -------------------------------------------------------------------------------- /Ansible/docker/install_docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Docker Compose on Multiple OS and Architectures 3 | hosts: all 4 | become: true 5 | vars: 6 | compose_version: "2.29.7" 7 | compose_architectures: 8 | linux: 9 | - x86_64 10 | - aarch64 11 | - armv7 12 | - ppc64le 13 | - riscv64 14 | - s390x 15 | windows: 16 | - x86_64 17 | - aarch64 18 | download_url: "https://github.com/docker/compose/releases/download" 19 | 20 | tasks: 21 | - name: Set the Docker Compose binary URL based on architecture and OS 22 | set_fact: 23 | compose_url: "{{ download_url }}/v{{ compose_version }}/docker-compose-{{ ansible_system | lower }}-{{ ansible_architecture }}" 24 | 25 | - name: Download Docker Compose binary 26 | ansible.builtin.get_url: 27 | url: "{{ compose_url }}" 28 | dest: /usr/local/bin/docker-compose 29 | mode: '0755' 30 | when: ansible_architecture in compose_architectures[ansible_system | lower] 31 | 32 | - name: Verify installation 33 | ansible.builtin.command: docker-compose --version 34 | register: compose_version_output 35 | ignore_errors: yes 36 | 37 | - name: Show Docker Compose version 38 | ansible.builtin.debug: 39 | msg: "{{ compose_version_output.stdout }}" -------------------------------------------------------------------------------- /Ansible/docker/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | rockylinux ansible_host=24.144.106.189 ansible_user=root 3 | debian ansible_host=165.22.3.122 ansible_user=root 4 | ubuntu ansible_host=159.223.131.6 ansible_user=root -------------------------------------------------------------------------------- /Ansible/dozzle/cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Cleanup Dozzle in Docker 3 | hosts: all 4 | become: yes 5 | vars: 6 | container_name: "dozzle" 7 | dozzle_image: "amir20/dozzle:latest" 8 | 9 | tasks: 10 | - name: Stop Dozzle container if it exists 11 | docker_container: 12 | name: "{{ container_name }}" 13 | state: stopped 14 | ignore_errors: yes 15 | failed_when: false 16 | 17 | - name: Remove Dozzle container if it exists 18 | docker_container: 19 | name: "{{ container_name }}" 20 | state: absent 21 | ignore_errors: yes 22 | failed_when: false 23 | 24 | - name: Remove Dozzle Docker image if it exists 25 | docker_image: 26 | name: "{{ dozzle_image }}" 27 | state: absent 28 | ignore_errors: yes 29 | failed_when: false -------------------------------------------------------------------------------- /Ansible/dozzle/dozzle_cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Cleanup Dozzle Server 3 | hosts: dozzle_servers 4 | become: yes 5 | vars: 6 | container_name: "dozzle_server" 7 | dozzle_image: "amir20/dozzle:latest" 8 | 9 | tasks: 10 | - name: Stop Dozzle server container if it exists 11 | docker_container: 12 | name: "{{ container_name }}" 13 | state: stopped 14 | ignore_errors: yes 15 | failed_when: false 16 | 17 | - name: Remove Dozzle server container if it exists 18 | docker_container: 19 | name: "{{ container_name }}" 20 | state: absent 21 | ignore_errors: yes 22 | failed_when: false 23 | 24 | - name: Remove Dozzle Docker image if it exists 25 | docker_image: 26 | name: "{{ dozzle_image }}" 27 | state: absent 28 | ignore_errors: yes 29 | failed_when: false 30 | 31 | - name: Cleanup Dozzle Agent 32 | hosts: dozzle_agents 33 | become: yes 34 | vars: 35 | container_name: "dozzle_agent" 36 | dozzle_image: "amir20/dozzle:latest" 37 | 38 | tasks: 39 | - name: Stop Dozzle agent container if it exists 40 | docker_container: 41 | name: "{{ container_name }}" 42 | state: stopped 43 | ignore_errors: yes 44 | failed_when: false 45 | 46 | - name: Remove Dozzle agent container if it exists 47 | docker_container: 48 | name: "{{ container_name }}" 49 | state: absent 50 | ignore_errors: yes 51 | failed_when: false 52 | 53 | - name: Remove Dozzle Docker image if it exists 54 | docker_image: 55 | name: "{{ dozzle_image }}" 56 | state: absent 57 | ignore_errors: yes 58 | failed_when: false -------------------------------------------------------------------------------- /Ansible/dozzle/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Dozzle in Docker 3 | hosts: all 4 | become: yes 5 | vars: 6 | dozzle_image: "amir20/dozzle:latest" 7 | container_name: "dozzle" 8 | host_port: 8081 9 | docker_sock_path: "/var/run/docker.sock" 10 | 11 | tasks: 12 | - name: Check if Docker binary exists 13 | shell: "command -v docker" 14 | register: docker_check 15 | changed_when: false 16 | failed_when: false 17 | 18 | - name: Display message if Docker is not installed 19 | debug: 20 | msg: "Docker is not installed on this host." 21 | when: docker_check.rc != 0 22 | 23 | - name: Skip host if Docker is missing 24 | meta: end_host 25 | when: docker_check.rc != 0 26 | 27 | - name: Pull Dozzle image 28 | ansible.builtin.docker_image: 29 | name: "{{ dozzle_image }}" 30 | source: pull 31 | 32 | - name: Run Dozzle container 33 | ansible.builtin.docker_container: 34 | name: "{{ container_name }}" 35 | image: "{{ dozzle_image }}" 36 | state: started 37 | restart_policy: always 38 | volumes: 39 | - "{{ docker_sock_path }}:/var/run/docker.sock" 40 | published_ports: 41 | - "{{ host_port }}:8080" 42 | -------------------------------------------------------------------------------- /Ansible/dozzle/inventory.ini: -------------------------------------------------------------------------------- 1 | [dozzle_servers] 2 | rockylinux ansible_host=137.184.143.123 ansible_user=root 3 | 4 | [dozzle_agents] 5 | debian ansible_host=165.22.3.122 ansible_user=root 6 | ubuntu ansible_host=159.223.131.6 ansible_user=root -------------------------------------------------------------------------------- /Ansible/dozzle/vars.yml: -------------------------------------------------------------------------------- 1 | # vars.yml 2 | dozzle_version: "latest" 3 | dozzle_port: 8081 4 | agent_port: 7007 5 | auth_provider: "simple" 6 | auth_ttl: "48h" 7 | dozzle_data_path: "/data/dozzle" 8 | users_file_path: "{{ dozzle_data_path }}/users.yml" 9 | admin_username: "admin" 10 | admin_password: "oqw0f3092l" 11 | admin_email: "admin@example.com" 12 | admin_name: "Admin" -------------------------------------------------------------------------------- /Ansible/elasticsearch/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/elasticsearch/configure-elasticsearch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure Elasticsearch 3 | hosts: all 4 | become: true 5 | vars_files: 6 | - vars.yml 7 | tasks: 8 | - name: Template elasticsearch.yml configuration 9 | template: 10 | src: templates/elasticsearch.yml.j2 11 | dest: /etc/elasticsearch/elasticsearch.yml 12 | owner: elasticsearch 13 | group: elasticsearch 14 | mode: '0644' 15 | 16 | - name: Restart Elasticsearch service 17 | service: 18 | name: elasticsearch 19 | state: restarted 20 | enabled: true -------------------------------------------------------------------------------- /Ansible/elasticsearch/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | debian ansible_host=142.93.193.106 ansible_user=root 3 | ubuntu ansible_host=143.198.115.57 ansible_user=root 4 | rockylinux ansible_host=178.128.150.243 ansible_user=root -------------------------------------------------------------------------------- /Ansible/elasticsearch/templates/elasticsearch-apt.j2: -------------------------------------------------------------------------------- 1 | deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] {{ elastic_repo_url_apt }} stable main -------------------------------------------------------------------------------- /Ansible/elasticsearch/templates/elasticsearch-yum.j2: -------------------------------------------------------------------------------- 1 | [elasticsearch] 2 | name=Elasticsearch repository for {{ elastic_version }}.x packages 3 | baseurl={{ elastic_repo_url_yum }} 4 | gpgcheck=1 5 | gpgkey={{ elastic_gpg_key }} 6 | enabled=1 -------------------------------------------------------------------------------- /Ansible/elasticsearch/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | elastic_version: "8" # 7 or 8 3 | elastic_gpg_key: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" 4 | elastic_repo_url_apt: "https://artifacts.elastic.co/packages/{{ elastic_version }}.x/apt" 5 | elastic_repo_url_yum: "https://artifacts.elastic.co/packages/{{ elastic_version }}.x/yum" 6 | 7 | # elasticsearch.yml 8 | 9 | cluster_name: my-application 10 | node_name: node-1 11 | node_attr_rack: r1 12 | path_data: /var/lib/elasticsearch 13 | path_logs: /var/log/elasticsearch 14 | network_host: 0.0.0.0 15 | http_port: 9200 16 | discovery_seed_hosts: 17 | - host1 18 | - host2 19 | cluster_initial_master_nodes: 20 | - node-1 21 | - node-2 22 | xpack_security_enabled: true 23 | xpack_security_enrollment_enabled: true 24 | xpack_security_http_ssl_enabled: true 25 | xpack_security_http_ssl_keystore_path: certs/http.p12 26 | xpack_security_transport_ssl_enabled: true 27 | xpack_security_transport_ssl_verification_mode: certificate 28 | xpack_security_transport_ssl_keystore_path: certs/transport.p12 29 | xpack_security_transport_ssl_truststore_path: certs/transport.p12 30 | http_host: 0.0.0.0 -------------------------------------------------------------------------------- /Ansible/firewall/README.md: -------------------------------------------------------------------------------- 1 | ``` 2 | ansible-galaxy role install geerlingguy.firewall 3 | ``` -------------------------------------------------------------------------------- /Ansible/firewall/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/firewall/inventory.ini: -------------------------------------------------------------------------------- 1 | [servers] 2 | debian ansible_host=10.128.0.47 ansible_user=root 3 | ubuntu ansible_host=10.128.0.48 ansible_user=root 4 | rhel ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/firewall/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | firewalld_vars: 3 | install_firewalld: false 4 | open_ports: 5 | - 80 # HTTP 6 | - 443 # HTTPS 7 | - 22 # SSH 8 | close_ports: 9 | - 8080 # Example of closing port 8080 10 | - 3306 # MySQL 11 | ufw_vars: 12 | install_ufw: false 13 | open_ports: 14 | - 80 # HTTP 15 | - 443 # HTTPS 16 | - 22 # SSH 17 | close_ports: 18 | - 8080 # Example of closing port 8080 19 | - 3306 # MySQL 20 | -------------------------------------------------------------------------------- /Ansible/go-lang/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/go-lang/install_go.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: go_servers 3 | become: yes 4 | vars_files: 5 | - vars.yml 6 | roles: 7 | - geerlingguy.go 8 | -------------------------------------------------------------------------------- /Ansible/go-lang/inventory.ini: -------------------------------------------------------------------------------- 1 | [go_servers] 2 | debian ansible_host=10.128.0.47 ansible_user=root 3 | ubuntu ansible_host=10.128.0.48 ansible_user=root 4 | rhel ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/go-lang/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | go_version: "1.23.1" 3 | go_platform: linux 4 | go_arch: amd64 5 | go_tarball: go{{ go_version }}.{{ go_platform }}-{{ go_arch }}.tar.gz 6 | go_download_url: https://dl.google.com/go/{{ go_tarball }} 7 | go_checksum: "49bbb517cfa9eee677e1e7897f7cf9cfdbcf49e05f61984a2789136de359f9bd" # Correct checksum here https://go.dev/dl/ -------------------------------------------------------------------------------- /Ansible/grafana-stack/README.md: -------------------------------------------------------------------------------- 1 | # ⚙️ Grafana Stack Setup Playbook 2 | 3 | This Ansible playbook installs and configures the **Grafana stack**, including **Prometheus** and **Grafana**, using the official collections with custom modifications. It allows you to manage and deploy Prometheus, Grafana, and various Prometheus exporters in a flexible way. 4 | 5 | ## 📦 Collections Used 6 | 7 | This playbook is based on the following Ansible collections: 8 | 9 | - [Grafana Ansible Collection](https://github.com/grafana/grafana-ansible-collection/tree/main) 10 | - [Prometheus Ansible Collection](https://github.com/prometheus-community/ansible) 11 | 12 | ## ✨ Modifications 13 | 14 | The playbook has been modified from the official repositories to provide more flexibility, including: 15 | - **Selective Exporter Installation**: Ability to define which Prometheus exporters to install through variable flags (`ture` or `false`). 16 | - **Preconfigured Alert Manager**: Integrated configuration for **AlertManager** to send notifications to Discord via webhooks. 17 | 18 | ## 📋 Prerequisites 19 | 20 | Before running this playbook, ensure that the following requirements are met: 21 | 22 | - **Ansible 2.9+** is installed on the control node. 23 | - **SSH access** to all target servers is set up. 24 | 25 | You can install the necessary collections using: 26 | 27 | ```bash 28 | ansible-galaxy collection install prometheus.prometheus 29 | ansible-galaxy collection install grafana.grafana 30 | ``` 31 | -------------------------------------------------------------------------------- /Ansible/grafana-stack/grafana-collection/configure_promtail.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure Promtail 3 | hosts: promtail_servers 4 | become: true 5 | collections: 6 | - grafana.grafana 7 | 8 | vars: 9 | config_file_path: "./promtail_config/{{ promtail_config_file_name }}" # define the file name yourself 10 | promtail_service_name: promtail 11 | 12 | tasks: 13 | - name: Copy Promtail config file to the server 14 | ansible.builtin.copy: 15 | src: "{{ config_file_path }}" 16 | dest: /etc/promtail/config.yml # Default configuration path 17 | owner: root 18 | group: root 19 | mode: '0644' 20 | 21 | - name: Restart Promtail service to apply new configuration 22 | ansible.builtin.service: 23 | name: "{{ promtail_service_name }}" 24 | state: restarted 25 | enabled: true -------------------------------------------------------------------------------- /Ansible/grafana-stack/grafana-collection/configure_promtail_docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update Promtail Configuration and Restart Docker Container 3 | hosts: promtail_servers 4 | become: true 5 | 6 | tasks: 7 | - name: Copy new Promtail config file 8 | ansible.builtin.copy: 9 | src: ./promtail_config/{{ promtail_config_file_name }} 10 | dest: /etc/promtail/config.yml 11 | owner: root 12 | group: root 13 | mode: '0644' 14 | 15 | - name: Restart Promtail Docker container 16 | ansible.builtin.docker_container: 17 | name: promtail 18 | image: grafana/promtail:latest 19 | state: restarted 20 | restart_policy: always 21 | volumes: 22 | - /etc/promtail/config.yml:/etc/promtail/config.yml 23 | - /var/lib/docker/containers:/var/lib/docker/containers:ro 24 | - /var/run/docker.sock:/var/run/docker.sock 25 | command: "-config.file=/etc/promtail/config.yml" 26 | ports: 27 | - "9080:9080" -------------------------------------------------------------------------------- /Ansible/grafana-stack/grafana-collection/install_grafana.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install and configure Grafana 3 | hosts: grafana_servers 4 | become: true 5 | collections: 6 | - grafana.grafana 7 | roles: 8 | - grafana 9 | vars: 10 | grafana_security: 11 | admin_user: admin 12 | admin_password: adminpaswordp2184d90KHS -------------------------------------------------------------------------------- /Ansible/grafana-stack/grafana-collection/install_loki.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install and configure Loki 3 | hosts: loki_servers 4 | become: true 5 | collections: 6 | - grafana.grafana 7 | 8 | vars: 9 | loki_version: "latest" # or a specific version 10 | loki_http_listen_port: 3100 11 | loki_http_listen_address: "0.0.0.0" 12 | loki_storage_config: 13 | filesystem: 14 | directory: "/var/lib/loki" 15 | loki_ruler: 16 | alertmanager_url: http://localhost:9093 17 | 18 | tasks: 19 | - name: Install Loki using the Grafana Loki role 20 | include_role: 21 | name: grafana.grafana.loki 22 | 23 | - name: Start and enable Loki service 24 | ansible.builtin.service: 25 | name: loki 26 | state: started 27 | enabled: true -------------------------------------------------------------------------------- /Ansible/grafana-stack/grafana-collection/install_promtail.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Promtail 3 | hosts: promtail_servers 4 | become: true 5 | collections: 6 | - grafana.grafana 7 | 8 | vars: 9 | promtail_version: "latest" # or specific version 10 | promtail_http_listen_port: 9080 11 | promtail_http_listen_address: "0.0.0.0" 12 | 13 | tasks: 14 | - name: Ensure Promtail is installed 15 | ansible.builtin.package: 16 | name: promtail 17 | state: present 18 | become: true 19 | 20 | - name: Start and enable Promtail service 21 | ansible.builtin.service: 22 | name: promtail 23 | state: started 24 | enabled: true -------------------------------------------------------------------------------- /Ansible/grafana-stack/grafana-collection/install_promtail_docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install and Configure Promtail via Docker 3 | hosts: promtail_servers 4 | become: true 5 | 6 | tasks: 7 | - name: Ensure Docker is installed 8 | ansible.builtin.package: 9 | name: docker 10 | state: present 11 | 12 | - name: Start Docker service 13 | ansible.builtin.service: 14 | name: docker 15 | state: started 16 | enabled: true 17 | 18 | - name: Create Promtail configuration directory 19 | ansible.builtin.file: 20 | path: /etc/promtail 21 | state: directory 22 | owner: root 23 | group: root 24 | mode: '0755' 25 | 26 | - name: Copy Promtail config file to the server 27 | ansible.builtin.copy: 28 | src: ./promtail_config/config.yml 29 | dest: /etc/promtail/config.yml 30 | owner: root 31 | group: root 32 | mode: '0644' 33 | 34 | - name: Pull the latest Promtail Docker image 35 | ansible.builtin.docker_image: 36 | name: grafana/promtail 37 | tag: latest 38 | source: pull 39 | 40 | - name: Run Promtail container 41 | ansible.builtin.docker_container: 42 | name: promtail 43 | image: grafana/promtail:latest 44 | state: started 45 | restart_policy: always 46 | volumes: 47 | - /etc/promtail/config.yml:/etc/promtail/config.yml 48 | - /var/lib/docker/containers:/var/lib/docker/containers:ro 49 | - /var/run/docker.sock:/var/run/docker.sock 50 | command: "-config.file=/etc/promtail/config.yml" 51 | ports: 52 | - "9080:9080" -------------------------------------------------------------------------------- /Ansible/grafana-stack/grafana-collection/inventory.ini: -------------------------------------------------------------------------------- 1 | [grafana_servers] 2 | debian ansible_host=10.128.0.47 ansible_user=root 3 | ubuntu ansible_host=10.128.0.48 ansible_user=root 4 | rhel ansible_host=10.128.0.45 ansible_user=root 5 | 6 | [loki_servers] 7 | debian ansible_host=10.128.0.47 ansible_user=root 8 | ubuntu ansible_host=10.128.0.48 ansible_user=root 9 | rhel ansible_host=10.128.0.45 ansible_user=root 10 | 11 | [promtail_servers] 12 | debian ansible_host=10.128.0.47 ansible_user=root 13 | ubuntu ansible_host=10.128.0.48 ansible_user=root 14 | rhel ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/grafana-stack/grafana-collection/promtail_config/config.yml: -------------------------------------------------------------------------------- 1 | server: 2 | http_listen_port: 9080 3 | grpc_listen_port: 0 4 | 5 | positions: 6 | filename: /tmp/positions.yaml 7 | 8 | clients: 9 | - url: http://0.0.0.0:3100/loki/api/v1/push # loki url 10 | 11 | scrape_configs: 12 | - job_name: flog_scrape 13 | docker_sd_configs: 14 | - host: unix:///var/run/docker.sock 15 | refresh_interval: 5s 16 | relabel_configs: 17 | - source_labels: ['__meta_docker_container_name'] 18 | regex: '/(.*)' 19 | target_label: 'container' 20 | - source_labels: ['__meta_docker_container_log_stream'] 21 | target_label: 'logstream' 22 | - source_labels: ['__meta_docker_container_label_logging_jobname'] 23 | target_label: 'job' 24 | 25 | - job_name: systemd-journal 26 | journal_sd_configs: 27 | - path: /var/log/journal 28 | relabel_configs: 29 | - source_labels: ['__systemd_unit'] 30 | target_label: 'systemd_unit' 31 | - source_labels: ['__hostname'] 32 | target_label: 'instance' 33 | 34 | - job_name: varlogs 35 | static_configs: 36 | - targets: 37 | - localhost 38 | labels: 39 | job: varlogs 40 | __path__: /var/log/*.log 41 | 42 | - job_name: nginx-logs 43 | static_configs: 44 | - targets: 45 | - localhost 46 | labels: 47 | job: nginx 48 | __path__: /var/log/nginx/*.log -------------------------------------------------------------------------------- /Ansible/grafana-stack/prometheus-collection/install_prometheus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Prometheus 3 | hosts: prometheus_servers 4 | become: true 5 | collections: 6 | - prometheus.prometheus 7 | roles: 8 | - prometheus -------------------------------------------------------------------------------- /Ansible/grafana-stack/prometheus-collection/inventory.ini: -------------------------------------------------------------------------------- 1 | [prometheus_servers] 2 | server1 ansible_host=10.128.0.47 ansible_user=root 3 | server2 ansible_host=10.128.0.48 ansible_user=root 4 | server3 ansible_host=10.128.0.45 ansible_user=root 5 | 6 | [target_servers] 7 | server1 ansible_host=10.128.0.47 ansible_user=root 8 | server2 ansible_host=10.128.0.48 ansible_user=root 9 | server3 ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/grafana-stack/prometheus-collection/vars.yml: -------------------------------------------------------------------------------- 1 | exporters: 2 | node_exporter: true 3 | alertmanager: true 4 | bind_exporter: false 5 | blackbox_exporter: true 6 | cadvisor: true 7 | chrony_exporter: true 8 | fail2ban_exporter: true 9 | ipmi_exporter: true 10 | memcached_exporter: true 11 | mongodb_exporter: true 12 | mysqld_exporter: true 13 | nginx_exporter: true 14 | postgres_exporter: true 15 | process_exporter: true 16 | pushgateway: false 17 | redis_exporter: true 18 | smartctl_exporter: false 19 | smokeping_prober: false 20 | snmp_exporter: true 21 | systemd_exporter: true -------------------------------------------------------------------------------- /Ansible/haproxy/README.md: -------------------------------------------------------------------------------- 1 | ```bash 2 | ansible-galaxy role install geerlingguy.haproxy 3 | ``` 4 | 5 | -------------------------------------------------------------------------------- /Ansible/haproxy/haproxy_2_8_debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install HAProxy 3 | hosts: haproxy_servers 4 | become: yes 5 | tasks: 6 | - name: Update and upgrade apt packages 7 | ansible.builtin.apt: 8 | update_cache: yes 9 | upgrade: dist 10 | 11 | - name: Install software-properties-common without recommends 12 | ansible.builtin.apt: 13 | name: software-properties-common 14 | state: present 15 | install_recommends: no 16 | 17 | - name: Add the HAProxy PPA repository 18 | ansible.builtin.apt_repository: 19 | repo: ppa:vbernat/haproxy-2.8 20 | state: present 21 | 22 | - name: Install HAProxy version 2.8 23 | ansible.builtin.apt: 24 | name: haproxy=2.8.* 25 | state: present 26 | update_cache: yes -------------------------------------------------------------------------------- /Ansible/haproxy/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install HAProxy 3 | hosts: haproxy_servers 4 | become: yes 5 | roles: 6 | - role: geerlingguy.haproxy -------------------------------------------------------------------------------- /Ansible/haproxy/inventory.ini: -------------------------------------------------------------------------------- 1 | [haproxy_servers] 2 | debian ansible_host=10.128.0.47 ansible_user=root 3 | ubuntu ansible_host=10.128.0.48 ansible_user=root 4 | rhel ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/haproxy/list.sh: -------------------------------------------------------------------------------- 1 | # 2.8 LTS 2 | #debian 12 3 | https://haproxy.debian.net/#distribution=Debian&release=bookworm&version=2.8 4 | #debian 11 5 | https://haproxy.debian.net/#distribution=Debian&release=bullseye&version=2.8 6 | 7 | 8 | # 2.9 LTS 9 | #ubuntu 20.04 10 | https://haproxy.debian.net/#distribution=Ubuntu&release=focal&version=2.9 11 | https://haproxy.debian.net/#distribution=Ubuntu&release=jammy&version=2.9 12 | https://haproxy.debian.net/#distribution=Ubuntu&release=noble&version=2.9 13 | https://haproxy.debian.net/#distribution=Ubuntu&release=noble&version=3.0 14 | https://haproxy.debian.net/#distribution=Ubuntu&release=jammy&version=3.0 -------------------------------------------------------------------------------- /Ansible/harbor/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/harbor/inventory.ini: -------------------------------------------------------------------------------- 1 | [harbor_server] 2 | rockylinux ansible_host=24.144.106.189 ansible_user=root -------------------------------------------------------------------------------- /Ansible/harbor/vars.yml: -------------------------------------------------------------------------------- 1 | harbor_version: "v2.11.1" 2 | harbor_hostname: "harbor.helm.uz" 3 | harbor_admin_password: "Harbor12345" 4 | harbor_db_password: "root123" 5 | ssl_option: "certbot" # "certbot" yoki "self_signed" qiymatlarini olishi mumkin 6 | certbot_cert_path: "/etc/letsencrypt/live/{{ harbor_hostname }}/fullchain.pem" 7 | certbot_key_path: "/etc/letsencrypt/live/{{ harbor_hostname }}/privkey.pem" 8 | self_signed_cert_path: "/path/to/selfsigned/fullchain.pem" # Self-signed sertifikat uchun to'liq path 9 | self_signed_key_path: "/path/to/selfsigned/privkey.pem" # Self-signed kalit uchun to'liq path 10 | harbor_download_url: "https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz" -------------------------------------------------------------------------------- /Ansible/hashicorp-vault/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/hashicorp-vault/inventory.ini: -------------------------------------------------------------------------------- 1 | [rabbitmq_servers] 2 | debian ansible_host=147.182.174.211 ansible_user=root 3 | ubuntu ansible_host=67.207.87.177 ansible_user=root 4 | rockylinux ansible_host=24.144.106.189 ansible_user=root -------------------------------------------------------------------------------- /Ansible/hashicorp-vault/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | vault_config_path: /etc/vault.d 3 | vault_data_path: /opt/vault/data 4 | vault_listen_address: "0.0.0.0:8200" 5 | vault_api_addr: "http://127.0.0.1:8200" -------------------------------------------------------------------------------- /Ansible/hashicorp-vault/vault.hcl.j2: -------------------------------------------------------------------------------- 1 | ui = true 2 | 3 | storage "file" { 4 | path = "{{ vault_data_path }}" 5 | } 6 | 7 | listener "tcp" { 8 | address = "{{ vault_listen_address }}" 9 | tls_disable = 1 10 | } 11 | 12 | api_addr = "{{ vault_api_addr }}" -------------------------------------------------------------------------------- /Ansible/java/README.md: -------------------------------------------------------------------------------- 1 | # 📋 Java Installation Playbook 2 | 3 | This Ansible playbook installs the Java Development Kit (JDK) on multiple servers using the **geerlingguy.java** role. 4 | 5 | ## 🛠️ Usage 6 | 7 | ### Install Java 8 | 9 | To install Java on your servers, follow these steps: 10 | 11 | 1. Install the required Ansible role: 12 | ```bash 13 | ansible-galaxy role install geerlingguy.java 14 | ``` 15 | 2. Run the playbook to install Go: 16 | ```bash 17 | ansible-playbook -i inventory.ini install_java.yml 18 | ``` 19 | The Java version can be customized in the playbook by setting the `java_packages` variable. 20 | 21 | ### Example 22 | 23 | To install OpenJDK 11, set the `java_packages` variable in the playbook as shown below: 24 | 25 | ```yml 26 | vars: 27 | java_packages: 28 | - openjdk-11-jdk 29 | ``` 30 | 31 | ### Playbook Explanation 32 | 33 | * **Role used:** [geerlingguy.java](https://github.com/geerlingguy/ansible-role-java) 34 | * This role handles the installation of the Java packages on supported operating systems. You can specify different JDK versions by updating the `java_packages` variable. 35 | 36 | ### 💻 Supported Linux Operating Systems 37 | This playbook supports the following Linux distributions: 38 | * 🐧 **Debian:** 10,11 39 | * 🐧 **Ubuntu:** 20.04,22.04 40 | * 🐧 **CentOS:** 7,8 41 | 42 | ### ⚙️ Supported Ansible Versions 43 | * ✅ ansible-core 2.11.0 44 | * ✅ ansible-core 2.12.5 45 | * ❗️ ansible [core 2.17.3] (compatibility issues) 46 | 47 | > Note: Ensure you run the playbook with appropriate privileges (e.g., use become: yes if needed). 48 | 49 | ### Resources 50 | 51 | * [Ansible Galaxy - geerlingguy.java Role](https://galaxy.ansible.com/ui/standalone/roles/geerlingguy/java/documentation/) 52 | * [GitHub - geerlingguy/ansible-role-java](https://github.com/geerlingguy/ansible-role-java) -------------------------------------------------------------------------------- /Ansible/java/install_java.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: java_servers 3 | become: yes 4 | tasks: 5 | - name: Install Java on RHEL-based systems 6 | include_role: 7 | name: geerlingguy.java 8 | vars: 9 | java_packages: 10 | - java-11-openjdk 11 | - java-11-openjdk-devel 12 | when: ansible_os_family == 'RedHat' 13 | 14 | - name: Install Java on Debian-based systems 15 | include_role: 16 | name: geerlingguy.java 17 | vars: 18 | java_packages: 19 | - openjdk-17-jdk 20 | - default-jdk 21 | - default-jre 22 | when: ansible_os_family == 'Debian' 23 | 24 | - name: Install Java on Ubuntu systems 25 | include_role: 26 | name: geerlingguy.java 27 | vars: 28 | java_packages: 29 | - openjdk-11-jdk 30 | when: ansible_distribution == 'Ubuntu' -------------------------------------------------------------------------------- /Ansible/java/inventory.ini: -------------------------------------------------------------------------------- 1 | [java_servers] 2 | debian ansible_host=157.245.130.56 ansible_user=root 3 | ubuntu ansible_host=134.209.77.220 ansible_user=root 4 | rockylinux ansible_host=142.93.56.4 ansible_user=root -------------------------------------------------------------------------------- /Ansible/java/java_8_11.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: java_servers 3 | become: yes 4 | tasks: 5 | - name: Install Java 8 or Java 11 on RHEL-based systems 6 | include_role: 7 | name: geerlingguy.java 8 | vars: 9 | java_packages: 10 | - java-1.8.0-openjdk 11 | - java-11-openjdk 12 | when: ansible_os_family == 'RedHat' 13 | 14 | - name: Install Java 8 or Java 11 on Debian-based systems 15 | include_role: 16 | name: geerlingguy.java 17 | vars: 18 | java_packages: 19 | - openjdk-11-jdk 20 | when: ansible_os_family == 'Debian' 21 | 22 | - name: Install Java 8 or Java 11 on Ubuntu systems 23 | include_role: 24 | name: geerlingguy.java 25 | vars: 26 | java_packages: 27 | - openjdk-8-jdk 28 | - openjdk-11-jdk 29 | when: ansible_distribution == 'Ubuntu' -------------------------------------------------------------------------------- /Ansible/jenkins/README.md: -------------------------------------------------------------------------------- 1 | # 📋 Jenkins Installation and Uninstallation Playbook 2 | 3 | This Ansible playbook allows for the installation and uninstallation of Jenkins across multiple Linux distributions. It includes cleanup procedures that remove all Jenkins-related files, services, and configurations. 4 | 5 | ## 🛠️ Usage 6 | 7 | ### Install Jenkins 8 | To install Jenkins on your servers, execute the following command: 9 | ```bash 10 | ansible-playbook -i inventory.ini ./install_jenkins.yml 11 | ``` 12 | ## Uninstall Jenkins and Clean Up 13 | To uninstall Jenkins and clean up associated files and services from your servers, use the following command: 14 | ```bash 15 | ansible-playbook -i inventory.ini ./uninstall_jenkins.yml 16 | ``` 17 | This will: 18 | * Stop all Jenkins services. 19 | * Remove Jenkins packages. 20 | * Remove Jenkins repositories and GPG keys. 21 | * Clean up Jenkins directories (logs, cache, data). 22 | 23 | 24 | ## 💻 Supported Linux Operating Systems 25 | This playbook supports the following Linux distributions: 26 | * 🐧 **Debian:** 11,12 27 | * 🐧 **Ubuntu:** 20.04,22.04 28 | * 🐧 **RHEL:** 7,8 29 | * 🐧 **Rocky Linux:** 8,9 30 | 31 | ## ✅ Tested Operating Systems 32 | The playbook has been tested on the following OS versions: 33 | * ✅**Debian:** 11,12 34 | * ✅**Ubuntu:** 20.04,22.04 35 | * ✅**RHEL:** 7,8 36 | * ✅**Rocky Linux:** 8,9 37 | 38 | ## ⚙️ Supported Ansible Versions 39 | * ✅ ansible [core 2.16.3] 40 | * ❗️ ansible [core 2.17.3] (compatibility issues) 41 | 42 | > Note: The playbook assumes you are running Ansible as the root user. For non-root users, ensure you have `become` privileges configured. -------------------------------------------------------------------------------- /Ansible/jenkins/inventory.ini: -------------------------------------------------------------------------------- 1 | [jenkins_servers] 2 | server1 ansible_host=34.69.104.233 ansible_user=ismoilovdev 3 | server2 ansible_host=34.27.32.115 ansible_user=ismoilovdev 4 | server3 ansible_host=34.170.180.55 ansible_user=ismoilovdev -------------------------------------------------------------------------------- /Ansible/kafka/install.yml: -------------------------------------------------------------------------------- 1 | - hosts: kafka-nodes 2 | become: true 3 | vars: 4 | kafka_download_base_url: "https://downloads.apache.org/kafka" 5 | kafka_download_validate_certs: yes 6 | kafka_version: "3.8.0" 7 | kafka_scala_version: "2.13" 8 | kafka_create_user_group: "true" 9 | kafka_user: "kafka" 10 | kafka_group: "kafka" 11 | kafka_root_dir: "/opt" 12 | kafka_dir: "{{ kafka_root_dir }}/kafka" 13 | kafka_broker_id: 0 14 | kafka_log_dir: "/var/log/kafka" 15 | kafka_data_log_dirs: "/var/lib/kafka/logs" 16 | kafka_num_partitions: 3 17 | kafka_replication_factor: 3 18 | kafka_listeners: PLAINTEXT://0.0.0.0:9092 19 | kafka_zookeeper_connect: "137.184.143.123:2181,165.22.3.122:2181,159.223.131.6:2181" 20 | kafka_auto_create_topics_enable: false 21 | kafka_delete_topic_enable: true 22 | kafka_offsets_topic_replication_factor: 3 23 | kafka_transaction_state_log_replication_factor: 3 24 | kafka_transaction_state_log_min_isr: 2 25 | kafka_log_retention_hours: 168 26 | kafka_log_segment_bytes: 1073741824 27 | kafka_log_retention_check_interval_ms: 300000 28 | kafka_server_config_params: 29 | inter.broker.protocol.version: "3.5" 30 | log.message.format.version: "3.5" 31 | roles: 32 | - sleighzy.kafka -------------------------------------------------------------------------------- /Ansible/kafka/inventory.ini: -------------------------------------------------------------------------------- 1 | [kafka-nodes] 2 | rockylinux ansible_host=137.184.143.123 ansible_user=root 3 | debian ansible_host=165.22.3.122 ansible_user=root 4 | ubuntu ansible_host=159.223.131.6 ansible_user=root -------------------------------------------------------------------------------- /Ansible/keepalived/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/keepalived/configure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure Keepalived on Master and Slave servers 3 | hosts: all 4 | become: true 5 | vars_files: 6 | - vars.yml 7 | 8 | tasks: 9 | - name: Ensure Keepalived is installed 10 | ansible.builtin.package: 11 | name: keepalived 12 | state: present 13 | 14 | - name: Generate Keepalived configuration for master 15 | ansible.builtin.template: 16 | src: templates/master.conf.j2 17 | dest: /etc/keepalived/keepalived.conf 18 | owner: root 19 | group: root 20 | mode: 0644 21 | when: "'master' in group_names" 22 | 23 | - name: Generate Keepalived configuration for slaves 24 | ansible.builtin.template: 25 | src: templates/slave.conf.j2 26 | dest: /etc/keepalived/keepalived.conf 27 | owner: root 28 | group: root 29 | mode: 0644 30 | when: "'slave' in group_names" 31 | 32 | - name: Restart Keepalived service 33 | ansible.builtin.service: 34 | name: keepalived 35 | state: restarted -------------------------------------------------------------------------------- /Ansible/keepalived/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Keepalived on multiple OS families 3 | hosts: all 4 | become: true 5 | tasks: 6 | - name: Install Keepalived on Debian/Ubuntu systems 7 | ansible.builtin.package: 8 | name: keepalived 9 | state: present 10 | when: ansible_facts['os_family'] == "Debian" 11 | 12 | - name: Install Keepalived on Red Hat/CentOS systems 13 | ansible.builtin.package: 14 | name: keepalived 15 | state: present 16 | when: ansible_facts['os_family'] == "RedHat" 17 | 18 | - name: Run keepalived --version and capture output 19 | ansible.builtin.command: keepalived --version 20 | register: keepalived_version 21 | changed_when: false 22 | 23 | - name: Combine stdout and stderr 24 | ansible.builtin.set_fact: 25 | keepalived_version_combined: > 26 | {{ keepalived_version.stdout + keepalived_version.stderr }} 27 | 28 | - name: Extract only Keepalived version 29 | ansible.builtin.set_fact: 30 | keepalived_version_only: > 31 | {{ keepalived_version_combined | regex_search('Keepalived v[0-9.]+') | default('Unknown version') }} 32 | 33 | - name: Display Keepalived version 34 | ansible.builtin.debug: 35 | msg: "Keepalived version: {{ keepalived_version_only }}" 36 | -------------------------------------------------------------------------------- /Ansible/keepalived/inventory.ini: -------------------------------------------------------------------------------- 1 | [master] 2 | rockylinux ansible_host=142.93.56.4 ansible_user=root 3 | 4 | [slave] 5 | debian ansible_host=198.211.96.150 ansible_user=root 6 | ubuntu ansible_host=68.183.107.200 ansible_user=root -------------------------------------------------------------------------------- /Ansible/keepalived/templates/master.conf.j2: -------------------------------------------------------------------------------- 1 | vrrp_instance VI_1 { 2 | state MASTER 3 | interface {{ interface }} 4 | virtual_router_id {{ virtual_router_id }} 5 | priority {{ master_priority }} 6 | advert_int {{ advert_int }} 7 | authentication { 8 | auth_type PASS 9 | auth_pass {{ auth_pass }} 10 | } 11 | virtual_ipaddress { 12 | {{ virtual_ipaddress }} 13 | } 14 | } -------------------------------------------------------------------------------- /Ansible/keepalived/templates/slave.conf.j2: -------------------------------------------------------------------------------- 1 | vrrp_instance VI_1 { 2 | state BACKUP 3 | interface {{ interface }} 4 | virtual_router_id {{ virtual_router_id }} 5 | priority {{ slave_priority }} 6 | advert_int {{ advert_int }} 7 | authentication { 8 | auth_type PASS 9 | auth_pass {{ auth_pass }} 10 | } 11 | virtual_ipaddress { 12 | {{ virtual_ipaddress }} 13 | } 14 | } -------------------------------------------------------------------------------- /Ansible/keepalived/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | interface: eth1 3 | virtual_router_id: 51 4 | auth_pass: 1111 5 | virtual_ipaddress: 10.116.0.68 6 | master_priority: 100 7 | slave_priority: 80 8 | advert_int: 1 -------------------------------------------------------------------------------- /Ansible/keydb/invnetory.ini: -------------------------------------------------------------------------------- 1 | [keydb_servers] 2 | ubuntu ansible_host=146.190.73.184 ansible_user=root 3 | debian ansible_host=157.245.251.104 ansible_user=root 4 | rockylinux ansible_host=157.230.85.171 ansible_user=root -------------------------------------------------------------------------------- /Ansible/keydb/keydb_docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install and run KeyDB using Docker 3 | hosts: all 4 | become: yes 5 | 6 | vars: 7 | keydb_image: "eqalpha/keydb" 8 | keydb_container_name: "keydb" 9 | keydb_port: "6379" 10 | keydb_data_dir: "/opt/keydb/data" 11 | docker_installed: false 12 | 13 | tasks: 14 | - name: Check if Docker is installed 15 | command: docker --version 16 | register: docker_check 17 | ignore_errors: yes 18 | changed_when: false 19 | 20 | - name: Set docker_installed variable if Docker is installed 21 | set_fact: 22 | docker_installed: true 23 | when: docker_check.rc == 0 24 | 25 | - name: Stop playbook if Docker is not installed 26 | fail: 27 | msg: "Docker is not installed. Please install Docker before running this playbook." 28 | when: not docker_installed 29 | 30 | - name: Create KeyDB data directory if it doesn't exist 31 | file: 32 | path: "{{ keydb_data_dir }}" 33 | state: directory 34 | mode: '0755' 35 | 36 | - name: Pull the KeyDB Docker image 37 | docker_image: 38 | name: "{{ keydb_image }}" 39 | source: pull 40 | 41 | - name: Run the KeyDB container 42 | docker_container: 43 | name: "{{ keydb_container_name }}" 44 | image: "{{ keydb_image }}" 45 | state: started 46 | restart_policy: always 47 | ports: 48 | - "{{ keydb_port }}:{{ keydb_port }}" 49 | volumes: 50 | - "{{ keydb_data_dir }}:/data" 51 | command: keydb-server /etc/keydb/keydb.conf --appendonly yes -------------------------------------------------------------------------------- /Ansible/kibana/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/kibana/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | debian ansible_host=142.93.193.106 ansible_user=root 3 | ubuntu ansible_host=143.198.115.57 ansible_user=root 4 | rockylinux ansible_host=178.128.150.243 ansible_user=root -------------------------------------------------------------------------------- /Ansible/kibana/templates/kibana-yum.j2: -------------------------------------------------------------------------------- 1 | [kibana-{{ elastic_version }}.x] 2 | name=Kibana repository for {{ elastic_version }}.x packages 3 | baseurl={{ elastic_repo_url_yum }} 4 | gpgcheck=1 5 | gpgkey={{ elastic_gpg_key }} 6 | enabled=1 7 | autorefresh=1 8 | type=rpm-md -------------------------------------------------------------------------------- /Ansible/kibana/vars.yml: -------------------------------------------------------------------------------- 1 | elastic_version: "8" # 7 or 8 2 | elastic_gpg_key: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" 3 | elastic_repo_url_apt: "https://artifacts.elastic.co/packages/{{ elastic_version }}.x/apt" 4 | elastic_repo_url_yum: "https://artifacts.elastic.co/packages/{{ elastic_version }}.x/yum" -------------------------------------------------------------------------------- /Ansible/minio/README.md: -------------------------------------------------------------------------------- 1 | # 📋 MinIO Object Storage Installation Playbook 2 | 3 | This playbook installs MinIO as a binary on servers using the **Single-Node Single-Drive** installation method. 4 | 5 | ## 🛠️ Usage 6 | 7 | In the `vars.yml` file, the following variables should be defined: 8 | 9 | ```yml 10 | --- 11 | minio_directory: /mnt/data 12 | minio_admin_user: admin 13 | minio_admin_user_password: wo#*4fd-LDSsgsa 14 | ``` 15 | ## Install MinIO 16 | Run the following command to install MinIO on your servers: 17 | ```bash 18 | ansible-playbook -i inventory.ini ./install_minio.yml 19 | ``` 20 | ## Clean up and Uninstall MinIO 21 | To clean up and uninstall MinIO, use the following command: 22 | ```bash 23 | ansible-playbook -i inventory.ini ./uninstall_minio.yml 24 | ``` 25 | 26 | ## 💻 Supported Linux Operating Systems 27 | This playbook supports the following Linux distributions: 28 | * 🐧 **Debian:** 11,12 29 | * 🐧 **Ubuntu:** 20.04,22.04 30 | * 🐧 **RHEL:** 7,8 31 | * 🐧 **Rocky Linux:** 8,9 32 | 33 | ## ✅ Tested Operating Systems 34 | The playbook has been tested on the following OS versions: 35 | * ✅**Debian:** 11,12 36 | * ✅**Ubuntu:** 20.04,22.04 37 | * ✅**RHEL:** 7,8 38 | * ✅**Rocky Linux:** 8,9 39 | 40 | ## ⚙️ Supported Ansible Versions 41 | * ✅ ansible [core 2.16.3] 42 | * ❗️ ansible [core 2.17.3] (compatibility issues) 43 | 44 | > Note: The playbook assumes you are running Ansible as the root user. For non-root users, ensure you have `become` privileges configured. -------------------------------------------------------------------------------- /Ansible/minio/inventory.ini: -------------------------------------------------------------------------------- 1 | [minio_servers] 2 | debian ansible_host=34.170.180.55 ansible_user=ismoilovdev 3 | centos ansible_host=34.69.104.233 ansible_user=ismoilovdev 4 | ubuntu ansible_host=34.27.32.115 ansible_user=ismoilovdev -------------------------------------------------------------------------------- /Ansible/minio/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | minio_directory: /mnt/data 3 | minio_admin_user: admin 4 | minio_admin_user_password: wo#*4fd-LDSsgsa 5 | -------------------------------------------------------------------------------- /Ansible/mongodb/README.md: -------------------------------------------------------------------------------- 1 | # 🚀 MongoDB Setup with Ansible 2 | 3 | This repository contains Ansible playbooks for installing and configuring MongoDB using the [community.mongodb](https://galaxy.ansible.com/community/mongodb) collection. The setup is divided into two stages: 4 | 1. **Installation**: Install MongoDB on the target servers. 5 | 2. **Configuration**: Set up MongoDB with users and security settings. 6 | 7 | ## 📋 Prerequisites 8 | 9 | Before running the playbooks, ensure that the following are installed and properly configured: 10 | 11 | - Ansible 2.9+ 12 | - Target servers with SSH access 13 | - `community.mongodb` Ansible collection 14 | 15 | You can install the required collection with the following command: 16 | 17 | ```bash 18 | ansible-galaxy collection install community.mongodb 19 | ``` 20 | ## 🛠️ Playbooks 21 | 22 | ## Install MongoDB 23 | The install_mongodb.yml playbook installs MongoDB on the target servers. 24 | ```bash 25 | ansible-playbook -i inventory.ini install.yml 26 | ``` 27 | ## Configure MongoDB 28 | The `configure_mongodb.yml` playbook configures MongoDB with authentication, binds to the appropriate IP address, and creates admin users. 29 | 30 | ```bash 31 | ansible-playbook -i inventory.ini configure_mongodb.yml 32 | ``` -------------------------------------------------------------------------------- /Ansible/mongodb/configure_mongodb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure MongoDB 3 | hosts: mongodb_servers 4 | become: true 5 | vars: 6 | admin_user: "admin" 7 | admin_password: "sdlkjqe092" 8 | database_name: "admin" 9 | mongodb_port: 27017 10 | auth_mechanism: "SCRAM-SHA-256" 11 | 12 | roles: 13 | - role: community.mongodb.mongodb_mongod 14 | 15 | tasks: 16 | - name: Configure MongoDB without authentication 17 | include_role: 18 | name: community.mongodb.mongodb_mongod 19 | vars: 20 | mongodb_mongod_security_authorization: "disabled" 21 | mongodb_mongod_net_bind_ip: "0.0.0.0" 22 | mongodb_mongod_net_port: "{{ mongodb_port }}" 23 | 24 | - name: Create admin user 25 | community.mongodb.mongodb_user: 26 | name: "{{ admin_user }}" 27 | password: "{{ admin_password }}" 28 | roles: 29 | - role: "userAdminAnyDatabase" 30 | db: "{{ database_name }}" 31 | database: "{{ database_name }}" 32 | auth_mechanism: "{{ auth_mechanism }}" 33 | 34 | - name: Enable MongoDB authentication 35 | include_role: 36 | name: community.mongodb.mongodb_mongod 37 | vars: 38 | mongodb_mongod_security_authorization: "enabled" 39 | mongodb_mongod_net_bind_ip: "0.0.0.0" 40 | mongodb_mongod_net_port: "{{ mongodb_port }}" 41 | 42 | - name: Restart MongoDB with authentication 43 | ansible.builtin.systemd: 44 | name: mongod 45 | state: restarted 46 | enabled: true 47 | -------------------------------------------------------------------------------- /Ansible/mongodb/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install MongoDB 3 | hosts: mongodb_servers 4 | become: true 5 | vars: 6 | mongodb_version: "7.0" 7 | collections: 8 | - community.mongodb 9 | roles: 10 | - mongodb_repository 11 | - mongodb_install 12 | 13 | tasks: 14 | - name: Start MongoDB service 15 | ansible.builtin.systemd: 16 | name: mongod 17 | state: started 18 | enabled: true 19 | 20 | - name: Wait for MongoDB service to be up 21 | ansible.builtin.wait_for: 22 | host: "{{ ansible_host }}" 23 | port: 27017 24 | state: started -------------------------------------------------------------------------------- /Ansible/mongodb/inventory.ini: -------------------------------------------------------------------------------- 1 | [mongodb_servers] 2 | debian ansible_host=10.128.0.47 ansible_user=root 3 | ubuntu ansible_host=10.128.0.48 ansible_user=root 4 | rhel ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/mysql/README.md: -------------------------------------------------------------------------------- 1 | ```bash 2 | ansible-galaxy role install geerlingguy.mysql 3 | ansible-galaxy collection install community.mysql 4 | ``` -------------------------------------------------------------------------------- /Ansible/mysql/configure_mysql.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure MySQL and grant root access 3 | hosts: mysql_servers 4 | become: yes 5 | vars_files: 6 | - vars.yml 7 | 8 | tasks: 9 | - name: Ensure MySQL is listening on all interfaces (Debian/Ubuntu/MariaDB) 10 | lineinfile: 11 | path: /etc/mysql/my.cnf 12 | regexp: '^bind-address' 13 | line: 'bind-address = {{ bind-address }}' 14 | state: present 15 | when: ansible_os_family == "Debian" 16 | notify: Restart MySQL 17 | 18 | - name: Ensure MySQL is listening on all interfaces (RHEL/CentOS) 19 | lineinfile: 20 | path: /etc/my.cnf 21 | regexp: '^bind-address' 22 | line: 'bind-address = {{ bind-address }}' 23 | state: present 24 | when: ansible_os_family == "RedHat" 25 | notify: Restart MySQL 26 | 27 | - name: Grant root access from any host 28 | mysql_user: 29 | name: root 30 | host: '%' 31 | password: "{{ mysql_root_password }}" 32 | priv: '*.*:ALL,GRANT' 33 | state: present 34 | 35 | - name: Flush privileges to apply changes 36 | mysql_query: 37 | query: "FLUSH PRIVILEGES;" 38 | login_user: root 39 | login_password: "{{ mysql_root_password }}" 40 | ignore_errors: yes 41 | 42 | handlers: 43 | - name: Restart MySQL 44 | service: 45 | name: mysql 46 | state: restarted 47 | -------------------------------------------------------------------------------- /Ansible/mysql/install_mysql.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install MySQL 3 | hosts: mysql_servers 4 | become: yes 5 | 6 | vars: 7 | mysql_root_password: "123" 8 | mysql_bind_address: '0.0.0.0' 9 | mysql_root_password_update: true 10 | mysql_enabled_on_startup: true 11 | 12 | roles: 13 | - role: geerlingguy.mysql -------------------------------------------------------------------------------- /Ansible/mysql/inventory.ini: -------------------------------------------------------------------------------- 1 | [mysql_servers] 2 | debian ansible_host=10.128.0.47 ansible_user=root 3 | ubuntu ansible_host=10.128.0.48 ansible_user=root 4 | rhel ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/mysql/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mysql_root_password: "wfllqw34p5lw" 3 | bind-address: "0.0.0.0" -------------------------------------------------------------------------------- /Ansible/nexus/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/nexus/cleanup_nexus_docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: nexus_servers 3 | become: yes 4 | vars_files: 5 | - vars.yml 6 | tasks: 7 | - name: Stop Nexus container if exists 8 | docker_container: 9 | name: "{{ nexus_container_name }}" 10 | state: stopped 11 | ignore_errors: yes 12 | failed_when: false 13 | 14 | - name: Remove Nexus container if exists 15 | docker_container: 16 | name: "{{ nexus_container_name }}" 17 | state: absent 18 | ignore_errors: yes 19 | failed_when: false 20 | 21 | - name: Remove Nexus Docker image if exists 22 | docker_image: 23 | name: "{{ nexus_image }}" 24 | state: absent 25 | ignore_errors: yes 26 | failed_when: false 27 | 28 | - name: Remove Nexus data directory if exists 29 | file: 30 | path: "{{ nexus_data_dir }}" 31 | state: absent 32 | force: yes 33 | ignore_errors: yes 34 | failed_when: false 35 | 36 | - name: Remove Nexus parent directory if exists 37 | file: 38 | path: "/mnt/nexus" 39 | state: absent 40 | force: yes 41 | ignore_errors: yes 42 | failed_when: false -------------------------------------------------------------------------------- /Ansible/nexus/install_nexus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: nexus_servers 3 | become: yes 4 | vars_files: 5 | - vars.yml 6 | tasks: 7 | - name: Create Nexus data directory 8 | file: 9 | path: "{{ nexus_data_dir }}" 10 | state: directory 11 | owner: 200 12 | recurse: yes 13 | 14 | - name: Run Nexus container 15 | docker_container: 16 | name: "{{ nexus_container_name }}" 17 | image: "{{ nexus_image }}" 18 | state: started 19 | ports: 20 | - "{{ nexus_port }}:{{ nexus_port }}" 21 | volumes: 22 | - "{{ nexus_data_dir }}:/nexus-data" 23 | restart_policy: always 24 | 25 | - name: Wait for 2 minutes before checking Nexus status 26 | pause: 27 | minutes: 2 28 | 29 | - name: Wait for Nexus to start 30 | wait_for: 31 | port: "{{ nexus_port }}" 32 | delay: 30 33 | timeout: 300 34 | 35 | - name: Retrieve Nexus admin password 36 | command: docker exec "{{ nexus_container_name }}" cat "{{ nexus_admin_password_file }}" 37 | register: admin_password_output 38 | 39 | - name: Show Nexus admin password 40 | debug: 41 | msg: "Nexus admin password is: {{ admin_password_output.stdout }}" -------------------------------------------------------------------------------- /Ansible/nexus/invnetory.ini: -------------------------------------------------------------------------------- 1 | [nexus_servers] 2 | nexus-server ansible_host=159.89.94.148 ansible_user=root -------------------------------------------------------------------------------- /Ansible/nexus/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nexus_image: "sonatype/nexus3:latest" 3 | nexus_data_dir: "/mnt/nexus/nexus-data" 4 | nexus_container_name: "nexus" 5 | nexus_port: 8081 6 | nexus_admin_password_file: "/nexus-data/admin.password" 7 | nexus_parent_dir: "/mnt/nexus" -------------------------------------------------------------------------------- /Ansible/nfs/inventory.ini: -------------------------------------------------------------------------------- 1 | [nfs_server] 2 | server1 ansible_host=35.188.79.26 ansible_user=root -------------------------------------------------------------------------------- /Ansible/nfs/nfs_setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: nfs_server 3 | become: yes 4 | vars_files: 5 | - vars.yml 6 | 7 | tasks: 8 | - name: Install NFS packages (RedHat) 9 | yum: 10 | name: nfs-utils 11 | state: present 12 | when: ansible_os_family == "RedHat" 13 | 14 | - name: Install NFS packages (Debian) 15 | apt: 16 | name: 17 | - nfs-kernel-server 18 | - nfs-common 19 | state: present 20 | update_cache: yes 21 | when: ansible_os_family == "Debian" 22 | 23 | - name: Create NFS export directory 24 | file: 25 | path: "{{ nfs_export_path }}" 26 | state: directory 27 | owner: root 28 | group: root 29 | mode: '0755' 30 | 31 | - name: Configure NFS exports 32 | copy: 33 | content: "{{ nfs_export_path }} {{ nfs_allowed_hosts }}(rw,sync,no_root_squash,no_subtree_check)\n" 34 | dest: /etc/exports 35 | owner: root 36 | group: root 37 | mode: '0644' 38 | 39 | - name: Start and enable NFS service 40 | systemd: 41 | name: "{{ nfs_service_name[ansible_os_family] }}" 42 | enabled: yes 43 | state: started 44 | 45 | - name: Export NFS directories 46 | command: exportfs -ra 47 | notify: restart nfs 48 | 49 | handlers: 50 | - name: restart nfs 51 | systemd: 52 | name: "{{ nfs_service_name[ansible_os_family] }}" 53 | state: restarted -------------------------------------------------------------------------------- /Ansible/nfs/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nfs_export_path: /srv/nfs 3 | nfs_allowed_hosts: "*" 4 | nfs_service_name: 5 | RedHat: nfs-server 6 | Debian: nfs-kernel-server -------------------------------------------------------------------------------- /Ansible/nginx/install_nginx.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install and configure NGINX 3 | hosts: nginx_servers 4 | become: yes 5 | roles: 6 | - role: nginxinc.nginx 7 | vars: 8 | nginx_install_epel_repo: true # EPEL repository installation 9 | nginx_enable: true # Enable NGINX service 10 | nginx_start: true # Start NGINX service 11 | 12 | tasks: 13 | - name: Create sites-available directory 14 | file: 15 | path: /etc/nginx/sites-available 16 | state: directory 17 | mode: '0755' 18 | 19 | - name: Create sites-enabled directory 20 | file: 21 | path: /etc/nginx/sites-enabled 22 | state: directory 23 | mode: '0755' 24 | 25 | - name: Add sites-enabled to nginx.conf 26 | lineinfile: 27 | path: /etc/nginx/nginx.conf 28 | regexp: 'include /etc/nginx/conf.d/\*.conf;' 29 | insertafter: 'include /etc/nginx/conf.d/\*.conf;' 30 | line: ' include /etc/nginx/sites-enabled/*;' 31 | state: present 32 | 33 | - name: Create a default site configuration in sites-available 34 | copy: 35 | dest: /etc/nginx/sites-available/default 36 | content: | 37 | server { 38 | listen 80 default_server; 39 | server_name _; 40 | root /var/www/html; 41 | 42 | location / { 43 | try_files $uri $uri/ =404; 44 | } 45 | } 46 | mode: '0644' 47 | 48 | - name: Enable default site by creating symlink in sites-enabled 49 | file: 50 | src: /etc/nginx/sites-available/default 51 | dest: /etc/nginx/sites-enabled/default 52 | state: link 53 | 54 | - name: Reload NGINX to apply changes 55 | service: 56 | name: nginx 57 | state: reloaded 58 | -------------------------------------------------------------------------------- /Ansible/nginx/inventory.ini: -------------------------------------------------------------------------------- 1 | [nginx_servers] 2 | debian ansible_host=10.128.0.47 ansible_user=root 3 | ubuntu ansible_host=10.128.0.48 ansible_user=root 4 | rhel ansible_host=10.128.0.45 ansible_user=root 5 | 6 | [target_servers] 7 | debian ansible_host=10.128.0.47 ansible_user=root 8 | ubuntu ansible_host=10.128.0.48 ansible_user=root 9 | rhel ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/nginx/nginx_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure NGINX reverse proxy 3 | hosts: target_servers 4 | become: yes 5 | vars: 6 | domen: "nginx.helm.uz" 7 | port: "3000" 8 | 9 | tasks: 10 | - name: Create NGINX configuration in sites-available 11 | copy: 12 | dest: /etc/nginx/sites-available/{{ domen }}.conf 13 | content: | 14 | server { 15 | listen 80; 16 | server_name {{ domen }} www.{{ domen }}; 17 | 18 | location / { 19 | proxy_pass http://localhost:{{ port }}; 20 | proxy_set_header Host $host; 21 | proxy_set_header X-Real-IP $remote_addr; 22 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 23 | proxy_set_header X-Forwarded-Proto $scheme; 24 | } 25 | } 26 | mode: '0644' 27 | 28 | - name: Create symbolic link in sites-enabled 29 | file: 30 | src: /etc/nginx/sites-available/{{ domen }}.conf 31 | dest: /etc/nginx/sites-enabled/{{ domen }}.conf 32 | state: link 33 | force: yes 34 | 35 | - name: Reload NGINX to apply new configuration 36 | service: 37 | name: nginx 38 | state: reloaded -------------------------------------------------------------------------------- /Ansible/nodejs/install_nodejs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: nodejs_servers 3 | become: yes 4 | vars: 5 | nodejs_version: "21.x" 6 | roles: 7 | - geerlingguy.nodejs -------------------------------------------------------------------------------- /Ansible/nodejs/inventory.ini: -------------------------------------------------------------------------------- 1 | [nodejs_servers] 2 | debian ansible_host=10.128.0.47 ansible_user=root 3 | ubuntu ansible_host=10.128.0.48 ansible_user=root 4 | rhel ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/percona-pmm/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/percona-pmm/cleanup_client_docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Cleanup PMM Client Docker Setup 3 | hosts: pmm_clients 4 | become: true 5 | vars_files: 6 | - vars.yml 7 | 8 | tasks: 9 | - name: Stop PMM client container if exists 10 | docker_container: 11 | name: "{{ pmm_client_container }}" 12 | state: stopped 13 | ignore_errors: yes 14 | failed_when: false 15 | 16 | - name: Remove PMM client container if exists 17 | docker_container: 18 | name: "{{ pmm_client_container }}" 19 | state: absent 20 | ignore_errors: yes 21 | failed_when: false 22 | 23 | - name: Remove PMM client Docker image if exists 24 | docker_image: 25 | name: "{{ pmm_client_docker_image }}" 26 | tag: "{{ pmm_client_docker_tag }}" 27 | state: absent 28 | ignore_errors: yes 29 | failed_when: false 30 | 31 | - name: Remove PMM data directory if exists 32 | file: 33 | path: "{{ pmm_data_host_path }}" 34 | state: absent 35 | force: yes 36 | ignore_errors: yes 37 | failed_when: false -------------------------------------------------------------------------------- /Ansible/percona-pmm/cleanup_package.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Cleanup and Uninstall PMM Client on multiple OS 3 | hosts: pmm_clients 4 | become: yes 5 | vars_files: 6 | - vars.yml 7 | 8 | tasks: 9 | - name: Remove PMM client package on Debian-based systems 10 | ansible.builtin.apt: 11 | name: pmm2-client 12 | state: absent 13 | purge: yes 14 | when: ansible_os_family == "Debian" 15 | 16 | - name: Remove Percona APT repository package file (Debian-based) 17 | ansible.builtin.file: 18 | path: "/tmp/percona-release_latest.generic_all.deb" 19 | state: absent 20 | when: ansible_os_family == "Debian" 21 | 22 | - name: Clean APT cache (Debian-based) 23 | ansible.builtin.apt: 24 | autoclean: yes 25 | when: ansible_os_family == "Debian" 26 | 27 | - name: Remove PMM client package on Red Hat-based systems 28 | ansible.builtin.yum: 29 | name: pmm2-client 30 | state: absent 31 | when: ansible_os_family == "RedHat" 32 | 33 | - name: Remove Percona YUM repository package (Red Hat-based) 34 | ansible.builtin.command: 35 | cmd: "yum remove -y {{ percona_rpm_repo_url }}" 36 | when: ansible_os_family == "RedHat" 37 | 38 | - name: Clean YUM cache (Red Hat-based) 39 | ansible.builtin.command: 40 | cmd: "yum clean all" 41 | when: ansible_os_family == "RedHat" 42 | 43 | - name: Verify PMM Client removal 44 | ansible.builtin.command: 45 | cmd: "pmm-admin --version" 46 | register: pmm_client_removal_check 47 | failed_when: pmm_client_removal_check.rc == 0 48 | changed_when: false 49 | 50 | - name: Display PMM Client removal status 51 | ansible.builtin.debug: 52 | msg: "PMM Client removal check: OK - PMM Client has been removed" -------------------------------------------------------------------------------- /Ansible/percona-pmm/cleanup_server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Uninstall PMM Server Docker Setup 3 | hosts: pmm_servers 4 | become: yes 5 | vars_files: 6 | - vars.yml 7 | 8 | tasks: 9 | - name: Stop PMM Server container if exists 10 | docker_container: 11 | name: "{{ pmm_container_name }}" 12 | state: stopped 13 | ignore_errors: yes 14 | failed_when: false 15 | 16 | - name: Remove PMM Server container if exists 17 | docker_container: 18 | name: "{{ pmm_container_name }}" 19 | state: absent 20 | ignore_errors: yes 21 | failed_when: false 22 | 23 | - name: Remove PMM Docker image if exists 24 | docker_image: 25 | name: "{{ pmm_image }}" 26 | state: absent 27 | ignore_errors: yes 28 | failed_when: false 29 | 30 | - name: Remove PMM data directory if exists 31 | file: 32 | path: "{{ pmm_data_dir }}" 33 | state: absent 34 | force: yes 35 | ignore_errors: yes 36 | failed_when: false -------------------------------------------------------------------------------- /Ansible/percona-pmm/inventory.ini: -------------------------------------------------------------------------------- 1 | [pmm_servers] 2 | pmm1 ansible_host=34.56.26.160 ansible_user=root 3 | ; pmm2 ansible_host=35.222.205.210 ansible_user=root 4 | 5 | [pmm_clients] 6 | rockylinux ansible_host=142.93.56.4 ansible_user=root 7 | debian ansible_host=198.211.96.150 ansible_user=root 8 | ubuntu ansible_host=68.183.107.200 ansible_user=root 9 | 10 | [postgresql-servers] 11 | rockylinux ansible_host=142.93.56.4 ansible_user=root 12 | debian ansible_host=198.211.96.150 ansible_user=root 13 | ubuntu ansible_host=68.183.107.200 ansible_user=root -------------------------------------------------------------------------------- /Ansible/percona-pmm/pmm_client_with_docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup PMM Client using Docker with Host Path 3 | hosts: pmm_clients 4 | become: true 5 | vars_files: 6 | - vars.yml 7 | 8 | tasks: 9 | - name: Check if Docker is installed 10 | shell: "command -v docker" 11 | register: docker_check 12 | changed_when: false 13 | failed_when: false 14 | 15 | - name: Display message if Docker is not installed 16 | debug: 17 | msg: "Docker is not installed on this host." 18 | when: docker_check.rc != 0 19 | 20 | - name: Skip host if either Docker is missing 21 | meta: end_host 22 | when: docker_check.rc != 0 23 | 24 | - name: Pull PMM client Docker image 25 | docker_image: 26 | name: "{{ pmm_client_docker_image }}" 27 | tag: "{{ pmm_client_docker_tag }}" 28 | source: pull 29 | 30 | - name: Run PMM client container in setup mode with host path for persistence 31 | docker_container: 32 | name: "{{ pmm_client_container }}" 33 | image: "{{ pmm_client_docker_image }}:{{ pmm_client_docker_tag }}" 34 | env: 35 | PMM_AGENT_SERVER_ADDRESS: "{{ pmm_server_address }}" 36 | PMM_AGENT_SERVER_USERNAME: "{{ pmm_server_username }}" 37 | PMM_AGENT_SERVER_PASSWORD: "{{ admin_password }}" 38 | PMM_AGENT_SERVER_INSECURE_TLS: "{{ pmm_server_insecure_tls }}" 39 | PMM_AGENT_SETUP: "1" 40 | PMM_AGENT_CONFIG_FILE: "{{ pmm_agent_config_file }}" 41 | volumes: 42 | - "{{ pmm_data_host_path }}:/srv" 43 | state: started 44 | restart_policy: no 45 | 46 | - name: Check PMM client status 47 | command: docker exec "{{ pmm_client_container }}" pmm-admin status 48 | register: pmm_status 49 | ignore_errors: true 50 | 51 | - name: Display PMM client status 52 | debug: 53 | var: pmm_status.stdout -------------------------------------------------------------------------------- /Ansible/percona/postgresql/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/percona/postgresql/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | debian ansible_host=142.93.193.106 ansible_user=root 3 | ubuntu ansible_host=143.198.115.57 ansible_user=root 4 | rockylinux ansible_host=178.128.150.243 ansible_user=root -------------------------------------------------------------------------------- /Ansible/percona/postgresql/vars.yml: -------------------------------------------------------------------------------- 1 | percona_pg_version: "17" 2 | os_family_deb: 3 | - "debian" 4 | - "ubuntu" 5 | os_family_rpm: 6 | - "centos" 7 | - "rocky" 8 | - "oracle" 9 | - "redhat" -------------------------------------------------------------------------------- /Ansible/php/install_php.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: php_servers 3 | become: yes 4 | vars: 5 | php_version: "8.3" 6 | php_webserver_daemon: "nginx" 7 | roles: 8 | - name: geerlingguy.repo-remi 9 | when: ansible_os_family == 'RedHat' 10 | - geerlingguy.php-versions 11 | # - geerlingguy.nginx # Ensure Nginx is installed 12 | - geerlingguy.php -------------------------------------------------------------------------------- /Ansible/php/inventory.ini: -------------------------------------------------------------------------------- 1 | [php_servers] 2 | debian ansible_host=10.128.0.47 ansible_user=root 3 | ubuntu ansible_host=10.128.0.48 ansible_user=root 4 | rhel ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/portainer/README.md: -------------------------------------------------------------------------------- 1 | # Portainer Installation 🚀 2 | 3 | This repository contains Ansible playbooks for installing and cleaning up Portainer Community Edition (CE) and Business Edition (BE). These playbooks manage Docker containers and images. 4 | 5 | ## 🛠 Installation Playbooks 6 | 7 | ### 📥 `install_portainer_ce.yml` 8 | This playbook installs Portainer CE (Community Edition): 9 | - Creates a Docker volume. 10 | - Runs the Portainer container. 11 | - Displays the list of Docker containers. 12 | 13 | **Usage:** 14 | ```bash 15 | ansible-playbook -i inventory install_portainer_ce.yml 16 | ``` 17 | 18 | ### 📥 `install_portainer_ee.yml` 19 | This playbook installs Portainer EE (Interprice Edition): 20 | - Creates a Docker volume. 21 | - Runs the Portainer container. 22 | - Displays the list of Docker containers. 23 | 24 | **Usage:** 25 | ```bash 26 | ansible-playbook -i inventory install_portainer_ee.yml 27 | ``` 28 | 29 | ### 🧹 Cleanup Playbook 30 | 🗑️ `clean_portainer.yml` 31 | This playbook cleans up Portainer CE and BE installations: 32 | 33 | * Removes the Portainer container (if it exists). 34 | * Removes the Docker volume (if it exists). 35 | * Removes Portainer CE and BE Docker images (if they exist). 36 | **Usage:** 37 | 38 | ```bash 39 | ansible-playbook -i inventory clean_portainer.yml 40 | ``` 41 | 42 | ### 🔍 Notes 43 | * The playbooks check for the existence of containers, volumes, and images before attempting removal. 44 | * Docker images are checked using the **2.21.2** tag for Portainer CE and EE. -------------------------------------------------------------------------------- /Ansible/portainer/clean_portainer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Clean up Portainer installation 3 | hosts: all 4 | become: yes 5 | 6 | tasks: 7 | - name: Check if Portainer container exists 8 | shell: docker ps -a --format '{{"{{.Names}}"}}' | grep -w portainer || true 9 | register: portainer_exists 10 | changed_when: false 11 | 12 | - name: Remove Portainer container 13 | command: docker rm -f portainer 14 | when: portainer_exists.stdout != "" 15 | ignore_errors: yes 16 | 17 | - name: Check if Portainer volume exists 18 | shell: docker volume ls --format '{{"{{.Name}}"}}' | grep -w portainer_data || true 19 | register: volume_exists 20 | changed_when: false 21 | 22 | - name: Remove Portainer Docker volume 23 | command: docker volume rm portainer_data 24 | when: volume_exists.stdout != "" 25 | ignore_errors: yes 26 | 27 | - name: Check if Portainer CE image exists 28 | shell: docker images --format '{{"{{.Repository}}:{{.Tag}}"}}' | grep -w 'portainer/portainer-ce:2.21.2' || true 29 | register: ce_image_exists 30 | changed_when: false 31 | 32 | - name: Remove Portainer CE image 33 | command: docker rmi portainer/portainer-ce:2.21.2 34 | when: ce_image_exists.stdout != "" 35 | 36 | - name: Check if Portainer BE image exists 37 | shell: docker images --format '{{"{{.Repository}}:{{.Tag}}"}}' | grep -w 'portainer/portainer-ee:2.21.2' || true 38 | register: be_image_exists 39 | changed_when: false 40 | 41 | - name: Remove Portainer BE image 42 | command: docker rmi portainer/portainer-ee:2.21.2 43 | when: be_image_exists.stdout != "" 44 | -------------------------------------------------------------------------------- /Ansible/portainer/install_portainer_ce.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Portainer Community Edition 3 | hosts: portainer_ce 4 | become: yes 5 | 6 | tasks: 7 | - name: Create Docker volume for Portainer data 8 | command: docker volume create portainer_data 9 | 10 | - name: Run Portainer CE container 11 | command: docker run -d -p 8000:8000 -p 9443:9443 --name portainer --restart=always -v /var/run/docker.sock:/var/run/docker.sock -v portainer_data:/data portainer/portainer-ce:2.21.2 12 | 13 | - name: Display Docker containers 14 | command: docker ps 15 | register: result 16 | 17 | - name: Show Docker container output 18 | debug: 19 | var: result.stdout -------------------------------------------------------------------------------- /Ansible/portainer/install_portainer_ee.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Portainer Business Edition 3 | hosts: portainer_ee 4 | become: yes 5 | 6 | tasks: 7 | - name: Create Docker volume for Portainer data 8 | command: docker volume create portainer_data 9 | 10 | - name: Run Portainer BE container 11 | command: docker run -d -p 8000:8000 -p 9443:9443 --name portainer --restart=always -v /var/run/docker.sock:/var/run/docker.sock -v portainer_data:/data portainer/portainer-ee:2.21.2 12 | 13 | - name: Display Docker containers 14 | command: docker ps 15 | register: result 16 | 17 | - name: Show Docker container output 18 | debug: 19 | var: result.stdout -------------------------------------------------------------------------------- /Ansible/portainer/inventory.ini: -------------------------------------------------------------------------------- 1 | [portainer_ce] 2 | debian ansible_host=10.128.0.47 ansible_user=root 3 | ubuntu ansible_host=10.128.0.48 ansible_user=root 4 | rhel ansible_host=10.128.0.45 ansible_user=root 5 | 6 | [portainer_ee] 7 | debian ansible_host=10.128.0.47 ansible_user=root 8 | ubuntu ansible_host=10.128.0.48 ansible_user=root 9 | rhel ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/postgresql/README.md: -------------------------------------------------------------------------------- 1 | # 🚀 PostgreSQL Setup with Ansible 2 | 3 | This repository contains Ansible playbooks for installing and configuring PostgreSQL using the [ANXS.postgresql](https://galaxy.ansible.com/ui/standalone/roles/ANXS/postgresql/documentation/) role. The setup is divided into two stages: 4 | 1. **Installation**: Install PostgreSQL on the target servers. 5 | 2. **Configuration**: Set up PostgreSQL users, databases, and other settings. 6 | 7 | ## 📋 Prerequisites 8 | 9 | Before running the playbooks, ensure that the following are installed and properly configured: 10 | 11 | - Ansible 2.9+ 12 | - python3 13 | - Target servers with SSH access 14 | - PostgreSQL role from Ansible Galaxy 15 | 16 | You can install the required role with the following command: 17 | 18 | ```bash 19 | sudo yum install -y python3 20 | ansible-galaxy collection install community.postgresql 21 | ansible-galaxy role install ANXS.postgresql,v1.16.0 22 | ``` 23 | 24 | ## 🛠️ Playbooks 25 | ### Install PostgreSQL 26 | The install_postgresql.yml playbook installs PostgreSQL on the target servers. 27 | 28 | ```bash 29 | ansible-playbook -i inventory.ini install_postgresql.yml 30 | ``` 31 | ### Configure PostgreSQL 32 | The `configure_postgresql.yml` playbook configures PostgreSQL with custom users, roles, and authentication settings. 33 | 34 | ```bash 35 | ansible-playbook -i inventory.ini configure_postgresql.yml 36 | ``` -------------------------------------------------------------------------------- /Ansible/postgresql/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/postgresql/configure_postgresql.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure PostgreSQL 3 | hosts: all 4 | become: yes 5 | vars_files: 6 | - vars.yml 7 | tasks: 8 | - name: Update listen_addresses in postgresql.conf 9 | ansible.builtin.lineinfile: 10 | path: "{{ postgresql_conf_path }}" 11 | regexp: "^#?listen_addresses\\s*=\\s*.*" 12 | line: "listen_addresses = '*'" 13 | notify: Restart PostgreSQL 14 | 15 | - name: Add IPv4 entry to pg_hba.conf 16 | ansible.builtin.lineinfile: 17 | path: "{{ pg_hba_conf_path }}" 18 | line: "host all all 0.0.0.0/0 md5" 19 | state: present 20 | notify: Restart PostgreSQL 21 | 22 | - name: Add IPv6 entry to pg_hba.conf 23 | ansible.builtin.lineinfile: 24 | path: "{{ pg_hba_conf_path }}" 25 | line: "host all all ::/0 md5" 26 | state: present 27 | notify: Restart PostgreSQL 28 | 29 | handlers: 30 | - name: Restart PostgreSQL 31 | ansible.builtin.service: 32 | name: "{{ postgresql_service_name }}" 33 | state: restarted -------------------------------------------------------------------------------- /Ansible/postgresql/install_postgresql.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install PostgreSQL 16 3 | hosts: all 4 | become: true 5 | vars_files: 6 | - vars.yml 7 | roles: 8 | - role: ANXS.postgresql -------------------------------------------------------------------------------- /Ansible/postgresql/inventory.ini: -------------------------------------------------------------------------------- 1 | [postgresql_servers] 2 | rockylinux ansible_host=142.93.56.4 ansible_user=root 3 | debian ansible_host=198.211.96.150 ansible_user=root 4 | ubuntu ansible_host=68.183.107.200 ansible_user=root -------------------------------------------------------------------------------- /Ansible/postgresql/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_python_interpreter: "/usr/bin/python3" 3 | postgresql_version: 16 4 | postgresql_encoding: 'UTF-8' 5 | postgresql_locale: 'en_US.UTF-8' 6 | postgresql_ctype: 'en_US.UTF-8' 7 | postgresql_admin_user: "postgres" 8 | postgresql_users: 9 | - name: postgres 10 | pass: 123eefgmew 11 | encrypted: yes 12 | state: "present" 13 | 14 | postgresql_conf_path_debian: "/etc/postgresql/{{ postgresql_version }}/main/postgresql.conf" 15 | pg_hba_conf_path_debian: "/etc/postgresql/{{ postgresql_version }}/main/pg_hba.conf" 16 | postgresql_service_name_debian: "postgresql" 17 | 18 | # CentOS/RedHat 19 | postgresql_conf_path_redhat: "/etc/postgresql/{{ postgresql_version }}/data/postgresql.conf" 20 | pg_hba_conf_path_redhat: "/etc/postgresql/{{ postgresql_version }}/data/pg_hba.conf" 21 | postgresql_service_name_redhat: "postgresql-{{ postgresql_version }}" 22 | 23 | # Path selection logic based on OS family 24 | postgresql_conf_path: "{{ postgresql_conf_path_debian if ansible_os_family == 'Debian' else postgresql_conf_path_redhat }}" 25 | pg_hba_conf_path: "{{ pg_hba_conf_path_debian if ansible_os_family == 'Debian' else pg_hba_conf_path_redhat }}" 26 | postgresql_service_name: "{{ postgresql_service_name_debian if ansible_os_family == 'Debian' else postgresql_service_name_redhat }}" -------------------------------------------------------------------------------- /Ansible/proxy/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | rockylinux ansible_host=34.27.185.223 ansible_user=root 3 | debian ansible_host=35.193.131.71 ansible_user=root -------------------------------------------------------------------------------- /Ansible/rabbitmq/README.md: -------------------------------------------------------------------------------- 1 | ```bash 2 | ansible-galaxy collection install community.rabbitmq 3 | ansible-galaxy role install geerlingguy.rabbitmq 4 | ansible-galaxy role install geerlingguy.repo-epel 5 | ``` -------------------------------------------------------------------------------- /Ansible/rabbitmq/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/rabbitmq/cleanup_docker.yml: -------------------------------------------------------------------------------- 1 | # cleanup.yml 2 | 3 | - name: Cleanup RabbitMQ Docker setup 4 | hosts: all 5 | become: true 6 | vars_files: 7 | - vars.yml 8 | 9 | tasks: 10 | - name: Stop RabbitMQ container if it exists 11 | ansible.builtin.docker_container: 12 | name: "{{ container_name }}" 13 | state: stopped 14 | ignore_errors: yes 15 | failed_when: false 16 | 17 | - name: Remove RabbitMQ container if it exists 18 | ansible.builtin.docker_container: 19 | name: "{{ container_name }}" 20 | state: absent 21 | ignore_errors: yes 22 | failed_when: false 23 | 24 | - name: Remove RabbitMQ Docker image if it exists 25 | ansible.builtin.docker_image: 26 | name: "rabbitmq:{{ rabbitmq_image }}" 27 | state: absent 28 | ignore_errors: yes 29 | failed_when: false 30 | 31 | - name: Remove RabbitMQ data directory 32 | file: 33 | path: "{{ host_path }}" 34 | state: absent 35 | force: yes 36 | ignore_errors: yes 37 | failed_when: false 38 | 39 | - name: Display cleanup completion message 40 | debug: 41 | msg: "RabbitMQ cleanup completed successfully." 42 | -------------------------------------------------------------------------------- /Ansible/rabbitmq/configure_rabbitmq.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure RabbitMQ 3 | hosts: all 4 | become: true 5 | vars_files: 6 | - vars.yml 7 | tasks: 8 | - name: Enable RabbitMQ management plugin 9 | community.rabbitmq.rabbitmq_plugin: 10 | name: rabbitmq_management 11 | state: enabled 12 | 13 | - name: Create RabbitMQ admin user 14 | community.rabbitmq.rabbitmq_user: 15 | user: "{{ rabbitmq_admin_user }}" 16 | password: "{{ rabbitmq_admin_password }}" 17 | tags: 18 | - administrator 19 | vhost: / 20 | configure_priv: ".*" 21 | read_priv: ".*" 22 | write_priv: ".*" 23 | state: present 24 | 25 | - name: Set RabbitMQ to listen on 0.0.0.0 26 | lineinfile: 27 | path: /etc/rabbitmq/rabbitmq.conf 28 | regexp: '^listeners.tcp.default' 29 | line: "listeners.tcp.default = 0.0.0.0" 30 | state: present 31 | create: yes 32 | notify: Restart RabbitMQ 33 | 34 | handlers: 35 | - name: Restart RabbitMQ 36 | service: 37 | name: rabbitmq-server 38 | state: restarted -------------------------------------------------------------------------------- /Ansible/rabbitmq/install_rabbitmq.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install RabbitMQ 3 | hosts: rabbitmq_servers 4 | become: true 5 | roles: 6 | - name: geerlingguy.repo-epel 7 | when: ansible_os_family == 'RedHat' 8 | - geerlingguy.rabbitmq 9 | vars: 10 | - rabbitmq_daemon: rabbitmq-server 11 | - rabbitmq_enabled: true 12 | - rabbitmq_version: "3.10.0" 13 | - rabbitmq_rpm: "rabbitmq-server-{{ rabbitmq_version }}-1.el{{ ansible_distribution_major_version }}.noarch.rpm" 14 | - rabbitmq_rpm_url: "https://packagecloud.io/rabbitmq/rabbitmq-server/packages/el/{{ ansible_distribution_major_version }}/{{ rabbitmq_rpm }}/download" 15 | - rabbitmq_apt_repository: "deb [signed-by=/etc/apt/trusted.gpg.d/rabbitmq-9F4587F226208342.gpg] https://ppa1.novemberain.com/rabbitmq/rabbitmq-server/deb/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} main" 16 | - rabbitmq_apt_gpg_url: "https://ppa.novemberain.com/gpg.9F4587F226208342.key" 17 | - erlang_apt_repository: "deb [signed-by=/etc/apt/trusted.gpg.d/erlang-E495BB49CC4BBE5B.gpg] https://ppa2.novemberain.com/rabbitmq/rabbitmq-erlang/deb/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} main" 18 | - erlang_apt_gpg_url: "https://ppa.novemberain.com/gpg.E495BB49CC4BBE5B.key" -------------------------------------------------------------------------------- /Ansible/rabbitmq/inventory.ini: -------------------------------------------------------------------------------- 1 | [rabbitmq_servers] 2 | debian ansible_host=147.182.174.211 ansible_user=root 3 | ubuntu ansible_host=67.207.87.177 ansible_user=root 4 | rockylinux ansible_host=24.144.106.189 ansible_user=root -------------------------------------------------------------------------------- /Ansible/rabbitmq/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # RabbitMQ Docker 3 | 4 | rabbitmq_image: "4.0-management" 5 | container_name: "rabbitmq" 6 | host_port: 5672 7 | management_port: 15672 8 | admin_user: "admin" 9 | admin_password: "oh234lrfw($)" 10 | host_path: "/mnt/rabbitmq_data" -------------------------------------------------------------------------------- /Ansible/redis/README.md: -------------------------------------------------------------------------------- 1 | # 🚀 Redis Ansible Playbooks 2 | 3 | This repository contains two Ansible playbooks for installing and configuring Redis on multiple OS families (Debian/Ubuntu and RedHat/Rocky). 4 | 5 | ## 📂 Playbooks 6 | 7 | 1. **redis_install.yml** - Installs Redis on target hosts. 8 | 2. **redis_config.yml** - Configures `protected-mode` and `bind` in `redis.conf` without altering other settings. 9 | 10 | ## 🔧 Playbook 1: Redis Installation 11 | 12 | Installs Redis and ensures it is running on both OS families. 13 | 14 | ### Run: 15 | ```bash 16 | ansible-playbook -i inventory redis_install.yml 17 | ``` 18 | 19 | ### Key Variables: 20 | * Debian Package: `redis-server` 21 | * RedHat Package: `redis` 22 | 23 | ### 🔧 Playbook 2: Redis Configuration 24 | Modifies only `protected-mode` and `bind` in `redis.conf` and restarts 25 | ```bash 26 | ansible-playbook -i inventory redis_config.yml 27 | ``` 28 | ### Key Variables: 29 | * `protected-mode: no` 30 | * `bind: 0.0.0.0` -------------------------------------------------------------------------------- /Ansible/redis/configure_redis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure Redis 3 | hosts: redis_servers 4 | become: yes 5 | vars: 6 | redis_protected_mode: "no" 7 | redis_bind: "0.0.0.0" 8 | debian_redis_package: "redis-server" 9 | redhat_redis_package: "redis" 10 | tasks: 11 | - name: Modify 'protected-mode' in redis.conf 12 | lineinfile: 13 | path: /etc/redis/redis.conf 14 | regexp: '^protected-mode' 15 | line: "protected-mode {{ redis_protected_mode }}" 16 | state: present 17 | backrefs: yes 18 | 19 | - name: Modify 'bind' in redis.conf 20 | lineinfile: 21 | path: /etc/redis/redis.conf 22 | regexp: '^bind' 23 | line: "bind {{ redis_bind }}" 24 | state: present 25 | backrefs: yes 26 | 27 | - name: Restart Redis service on Debian-based systems 28 | service: 29 | name: "{{ debian_redis_package }}" 30 | state: restarted 31 | when: ansible_os_family == "Debian" 32 | 33 | - name: Restart Redis service on RedHat-based systems 34 | service: 35 | name: "{{ redhat_redis_package }}" 36 | state: restarted 37 | when: ansible_os_family == "RedHat" -------------------------------------------------------------------------------- /Ansible/redis/inventory.ini: -------------------------------------------------------------------------------- 1 | [redis_servers] 2 | ubuntu ansible_host=146.190.73.184 ansible_user=root 3 | debian ansible_host=157.245.251.104 ansible_user=root 4 | rockylinux ansible_host=157.230.85.171 ansible_user=root -------------------------------------------------------------------------------- /Ansible/security/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | debian ansible_host=10.128.0.47 ansible_user=root 3 | ubuntu ansible_host=10.128.0.48 ansible_user=root 4 | rhel ansible_host=10.128.0.45 ansible_user=root -------------------------------------------------------------------------------- /Ansible/security/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # SSH security configurations 3 | security_ssh_port: 22 4 | security_ssh_password_authentication: "no" 5 | security_ssh_permit_root_login: "no" 6 | security_ssh_usedns: "no" 7 | security_ssh_permit_empty_password: "no" 8 | security_ssh_challenge_response_auth: "no" 9 | security_ssh_gss_api_authentication: "no" 10 | security_ssh_x11_forwarding: "no" 11 | security_ssh_allowed_users: 12 | - johndoe 13 | - adminuser 14 | security_ssh_allowed_groups: 15 | - admin 16 | - devops 17 | 18 | # Sudoers configuration 19 | security_sudoers_passworded: 20 | - johndoe 21 | - deployacct 22 | 23 | # Automatic updates configuration 24 | security_autoupdate_enabled: true 25 | security_autoupdate_conf_path: "{{ '/etc/apt/apt.conf.d/50unattended-upgrades' if ansible_facts['os_family'] == 'Debian' else '/etc/yum/yum-cron.conf' }}" 26 | security_autoupdate_reboot: true 27 | security_autoupdate_reboot_time: "03:00" 28 | security_autoupdate_mail_to: "admin@example.com" 29 | security_autoupdate_mail_on_error: true 30 | 31 | # Fail2ban configuration 32 | security_fail2ban_enabled: true -------------------------------------------------------------------------------- /Ansible/semaphore-ui/installation_with_package.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ismoilovdevml/infra-as-code/82f52afdae3971556eac9e8d12efb59830837c14/Ansible/semaphore-ui/installation_with_package.yml -------------------------------------------------------------------------------- /Ansible/semaphore-ui/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | rockylinux ansible_host=137.184.143.123 ansible_user=root 3 | debian ansible_host=165.22.3.122 ansible_user=root 4 | ubuntu ansible_host=159.223.131.6 ansible_user=root -------------------------------------------------------------------------------- /Ansible/sonarqube/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | rockylinux ansible_host=24.144.106.189 ansible_user=root 3 | ; rockylinux ansible_host=137.184.143.123 ansible_user=root 4 | ; debian ansible_host=165.22.3.122 ansible_user=root 5 | ; ubuntu ansible_host=159.223.131.6 ansible_user=root -------------------------------------------------------------------------------- /Ansible/trivy/install_trivy_binary.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Trivy using installation script 3 | hosts: trivy_servers 4 | become: yes 5 | tasks: 6 | 7 | - name: Install curl if not installed (Debian/Ubuntu) 8 | apt: 9 | name: curl 10 | state: present 11 | update_cache: yes 12 | when: ansible_facts['os_family'] == "Debian" 13 | 14 | - name: Install curl if not installed (RHEL/CentOS) 15 | yum: 16 | name: curl 17 | state: present 18 | when: ansible_facts['os_family'] == "RedHat" 19 | 20 | - name: Download and run Trivy installation script 21 | ansible.builtin.shell: | 22 | curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v0.18.3 23 | args: 24 | creates: /usr/local/bin/trivy 25 | 26 | - name: Verify Trivy installation 27 | ansible.builtin.command: trivy --version 28 | register: trivy_version 29 | changed_when: false 30 | 31 | - name: Print Trivy version 32 | ansible.builtin.debug: 33 | msg: "Trivy version installed: {{ trivy_version.stdout }}" -------------------------------------------------------------------------------- /Ansible/trivy/install_trivy_docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Trivy using Docker 3 | hosts: trivy_servers 4 | become: yes 5 | vars: 6 | cache_dir: "/root/.cache" 7 | trivy_image_name: "python:3.4-alpine" # Bu yerda o'rnatiladigan image'ni aniq belgilang 8 | 9 | tasks: 10 | 11 | - name: Check if Docker is installed 12 | ansible.builtin.shell: which docker 13 | register: docker_installed 14 | changed_when: false 15 | failed_when: false 16 | 17 | - name: Show info message if Docker is not installed 18 | ansible.builtin.debug: 19 | msg: "Docker is not installed, skipping the installation of Trivy." 20 | when: docker_installed.rc != 0 21 | 22 | - name: Skip tasks if Docker is not installed 23 | ansible.builtin.meta: end_play 24 | when: docker_installed.rc != 0 25 | 26 | - name: Pull Trivy Docker image if Docker is installed 27 | docker_image: 28 | name: aquasec/trivy 29 | tag: "0.18.3" 30 | source: pull 31 | when: docker_installed.rc == 0 32 | 33 | - name: Run Trivy scan (Linux) 34 | docker_container: 35 | name: trivy_scan 36 | image: aquasec/trivy:0.18.3 37 | command: "image {{ trivy_image_name }}" 38 | volumes: 39 | - "{{ cache_dir }}:/root/.cache/" 40 | - /var/run/docker.sock:/var/run/docker.sock 41 | state: started 42 | auto_remove: yes 43 | when: docker_installed.rc == 0 and ansible_facts['os_family'] == "Linux" 44 | 45 | - name: Show Trivy scan result 46 | ansible.builtin.shell: "docker logs trivy_scan" 47 | register: trivy_scan_result 48 | when: docker_installed.rc == 0 and ansible_facts['os_family'] == "Linux" 49 | 50 | - name: Print Trivy scan result 51 | debug: 52 | msg: "{{ trivy_scan_result.stdout }}" 53 | when: docker_installed.rc == 0 and ansible_facts['os_family'] == "Linux" -------------------------------------------------------------------------------- /Ansible/trivy/install_trivy_helm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Trivy using Helm 3 | hosts: all 4 | become: yes 5 | tasks: 6 | 7 | - name: Check if Helm is installed 8 | ansible.builtin.command: which helm 9 | register: helm_installed 10 | changed_when: false 11 | failed_when: false 12 | 13 | - name: Show info message if Helm is not installed 14 | ansible.builtin.debug: 15 | msg: "Helm is not installed, skipping the installation of Trivy." 16 | when: helm_installed.rc != 0 17 | 18 | - name: Skip tasks if Helm is not installed 19 | ansible.builtin.meta: end_play 20 | when: helm_installed.rc != 0 21 | 22 | - name: Add the AquaSecurity Helm repository 23 | ansible.builtin.command: 24 | cmd: helm repo add aquasecurity https://aquasecurity.github.io/helm-charts/ 25 | register: helm_repo_add 26 | changed_when: "'Repository 'aquasecurity' has been added' in helm_repo_add.stdout" 27 | 28 | - name: Update the Helm repository 29 | ansible.builtin.command: 30 | cmd: helm repo update 31 | register: helm_repo_update 32 | changed_when: "'Update Complete' in helm_repo_update.stdout" 33 | 34 | - name: Search for Trivy in the Helm repository 35 | ansible.builtin.command: 36 | cmd: helm search repo trivy 37 | register: helm_search_trivy 38 | changed_when: false 39 | 40 | - name: Print the Helm search result for Trivy 41 | debug: 42 | msg: "{{ helm_search_trivy.stdout }}" 43 | 44 | - name: Install Trivy using Helm 45 | ansible.builtin.command: 46 | cmd: helm install my-trivy aquasecurity/trivy 47 | register: helm_install_trivy 48 | changed_when: "'STATUS: deployed' in helm_install_trivy.stdout" 49 | 50 | - name: Print the Trivy installation result 51 | debug: 52 | msg: "{{ helm_install_trivy.stdout }}" -------------------------------------------------------------------------------- /Ansible/trivy/inventory.ini: -------------------------------------------------------------------------------- 1 | [trivy_servers] 2 | rockylinux ansible_host=34.27.185.223 ansible_user=root 3 | debian ansible_host=35.193.131.71 ansible_user=root -------------------------------------------------------------------------------- /Ansible/update-upgrade/README.md: -------------------------------------------------------------------------------- 1 | # 📋 Update & Install Tools Playbook 2 | 3 | This Ansible playbook updates the Linux server system and installs essential tools. 4 | 5 | ## 🛠️ Usage 6 | To run the playbook, use the following command: 7 | 8 | ```bash 9 | ansible-playbook -i inventory.ini ./update_upgrade_tools.yml 10 | ``` 11 | 12 | ## 💻 Supported Linux Operating Systems 13 | This playbook supports the following Linux distributions: 14 | * 🐧 **Debian:** 11,12 15 | * 🐧 **Ubuntu:** 20.04,22.04 16 | * 🐧 **RHEL:** 7,8 17 | * 🐧 **Rocky Linux:** 8,9 18 | 19 | ## ✅ Tested Operating Systems 20 | The playbook has been tested on the following OS versions: 21 | * ✅**Debian:** 11,12 22 | * ✅**Ubuntu:** 20.04,22.04 23 | * ✅**RHEL:** 7,8 24 | * ✅**Rocky Linux:** 8,9 25 | 26 | ## ⚙️ Supported Ansible Versions 27 | * ✅ ansible [core 2.16.3] 28 | * ❗️ ansible [core 2.17.3] (compatibility issues) 29 | 30 | > Note: The playbook assumes you are running Ansible as the `root` user. -------------------------------------------------------------------------------- /Ansible/update-upgrade/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callbacks_enabled = profile_tasks -------------------------------------------------------------------------------- /Ansible/update-upgrade/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | debian ansible_host=157.245.130.56 ansible_user=root 3 | ubuntu ansible_host=134.209.77.220 ansible_user=root 4 | rockylinux ansible_host=142.93.56.4 ansible_user=root -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | use_default_rules: true 3 | parseable: true 4 | quiet: true 5 | verbosity: 1 6 | 7 | # State that naming for now should be a warning 8 | # 106: ansible role name does not conform to pattern [a-z][a-z0-9_]+$ 9 | warn_list: 10 | - '106' 11 | 12 | # This is for false positives 13 | # 504: Do not use 'local_action', use 'delegate_to: localhost' 14 | skip_list: 15 | - '504' -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/.gitignore: -------------------------------------------------------------------------------- 1 | *.retry 2 | wazuh-agent.yml 3 | wazuh-elastic_stack-distributed.yml 4 | wazuh-elastic_stack-single.yml 5 | wazuh-elastic.yml 6 | wazuh-kibana.yml 7 | wazuh-manager.yml 8 | *.pyc 9 | .mypy_cache 10 | Pipfile.lock 11 | *.swp 12 | molecule/**/es_certs/ 13 | molecule/**/opendistro/ -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/inventory.ini: -------------------------------------------------------------------------------- 1 | # Inventory file for Wazuh cluster setup 2 | 3 | [manager] 4 | manager ansible_host=192.168.1.100 private_ip=192.168.1.100 5 | 6 | [indexer] 7 | indexer ansible_host=192.168.1.101 private_ip=192.168.1.101 8 | 9 | [dashboard] 10 | dashboard ansible_host=192.168.1.102 private_ip=192.168.1.102 11 | 12 | [agent] 13 | agent01 ansible_host=192.168.1.103 private_ip=192.168.1.103 14 | 15 | [all:vars] 16 | ansible_user=root 17 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/playbooks/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | hash_behaviour=merge 3 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/playbooks/wazuh-dashboard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: wi1 3 | roles: 4 | - role: ../roles/wazuh/wazuh-dashboard 5 | vars: 6 | ansible_shell_allow_world_readable_temp: true 7 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/playbooks/wazuh-indexer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: wi_cluster 3 | roles: 4 | - role: ../roles/wazuh/wazuh-indexer 5 | 6 | vars: 7 | instances: # A certificate will be generated for every node using the name as CN. 8 | node1: 9 | name: node-1 10 | ip: 11 | role: indexer 12 | node2: 13 | name: node-2 14 | ip: 15 | role: indexer 16 | node3: 17 | name: node-3 18 | ip: 19 | role: indexer 20 | 21 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/playbooks/wazuh-manager-oss-cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Wazuh cluster without Filebeat 3 | - hosts: manager 4 | roles: 5 | - role: "../roles/wazuh/ansible-wazuh-manager" 6 | become: yes 7 | become_user: root 8 | vars: 9 | wazuh_manager_config: 10 | connection: 11 | - type: 'secure' 12 | port: '1514' 13 | protocol: 'tcp' 14 | queue_size: 131072 15 | api: 16 | https: 'yes' 17 | cluster: 18 | disable: 'no' 19 | node_name: 'master' 20 | node_type: 'master' 21 | key: 'c98b62a9b6169ac5f67dae55ae4a9088' 22 | nodes: 23 | - "{{ hostvars.manager.private_ip }}" 24 | hidden: 'no' 25 | wazuh_api_users: 26 | - username: custom-user 27 | password: SecretPassword1! 28 | 29 | - hosts: worker01 30 | roles: 31 | - role: "../roles/wazuh/ansible-wazuh-manager" 32 | become: yes 33 | become_user: root 34 | vars: 35 | wazuh_manager_config: 36 | connection: 37 | - type: 'secure' 38 | port: '1514' 39 | protocol: 'tcp' 40 | queue_size: 131072 41 | api: 42 | https: 'yes' 43 | cluster: 44 | disable: 'no' 45 | node_name: 'worker_01' 46 | node_type: 'worker' 47 | key: 'c98b62a9b6169ac5f67dae55ae4a9088' 48 | nodes: 49 | - "{{ hostvars.manager.private_ip }}" 50 | hidden: 'no' 51 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/playbooks/wazuh-manager-oss.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: managers 3 | roles: 4 | - role: ../roles/wazuh/ansible-wazuh-manager 5 | - role: ../roles/wazuh/ansible-filebeat-oss 6 | filebeat_output_indexer_hosts: 7 | - ":9200" 8 | - ":9200" 9 | - ":9200" 10 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/playbooks/wazuh-single.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Certificates generation 3 | - hosts: aio 4 | roles: 5 | - role: ../roles/wazuh/wazuh-indexer 6 | perform_installation: false 7 | become: no 8 | #become_user: root 9 | vars: 10 | indexer_node_master: true 11 | instances: 12 | node1: 13 | name: node-1 # Important: must be equal to indexer_node_name. 14 | ip: 127.0.0.1 15 | role: indexer 16 | macos_localhost: false 17 | tags: 18 | - generate-certs 19 | # Single node 20 | - hosts: aio 21 | become: yes 22 | become_user: root 23 | roles: 24 | - role: ../roles/wazuh/wazuh-indexer 25 | - role: ../roles/wazuh/ansible-wazuh-manager 26 | - role: ../roles/wazuh/ansible-filebeat-oss 27 | - role: ../roles/wazuh/wazuh-dashboard 28 | vars: 29 | single_node: true 30 | minimum_master_nodes: 1 31 | indexer_node_master: true 32 | indexer_network_host: 127.0.0.1 33 | filebeat_node_name: node-1 34 | filebeat_output_indexer_hosts: 35 | - 127.0.0.1 36 | instances: 37 | node1: 38 | name: node-1 # Important: must be equal to indexer_node_name. 39 | ip: 127.0.0.1 40 | role: indexer 41 | ansible_shell_allow_world_readable_temp: true 42 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/elastic-stack/ansible-kibana/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kibana_node_name: node-1 3 | 4 | elasticsearch_http_port: "9200" 5 | elasticsearch_network_host: "127.0.0.1" 6 | kibana_server_host: "0.0.0.0" 7 | kibana_server_port: "5601" 8 | kibana_conf_path: /etc/kibana 9 | elastic_stack_version: 7.10.2 10 | wazuh_version: 4.4.1 11 | wazuh_app_url: https://packages.wazuh.com/4.x/ui/kibana/wazuh_kibana 12 | 13 | elasticrepo: 14 | apt: 'https://artifacts.elastic.co/packages/7.x/apt' 15 | yum: 'https://artifacts.elastic.co/packages/7.x/yum' 16 | gpg: 'https://artifacts.elastic.co/GPG-KEY-elasticsearch' 17 | key_id: '46095ACC8548582C1A2699A9D27D666CD88E42B4' 18 | 19 | # API credentials 20 | wazuh_api_credentials: 21 | - id: "default" 22 | url: "https://localhost" 23 | port: 55000 24 | username: "wazuh" 25 | password: "wazuh" 26 | 27 | # Xpack Security 28 | kibana_xpack_security: false 29 | kibana_ssl_verification_mode: "full" 30 | 31 | elasticsearch_xpack_security_user: elastic 32 | elasticsearch_xpack_security_password: elastic_pass 33 | 34 | node_certs_destination: /etc/kibana/certs 35 | 36 | # CA Generation 37 | master_certs_path: "{{ playbook_dir }}/es_certs" 38 | generate_CA: true 39 | ca_cert_name: "" 40 | 41 | # Nodejs 42 | nodejs: 43 | repo_dict: 44 | debian: "deb" 45 | redhat: "rpm" 46 | repo_url_ext: "nodesource.com/setup_10.x" 47 | 48 | #Nodejs NODE_OPTIONS 49 | node_options: --no-warnings --max-old-space-size=2048 --max-http-header-size=65536 50 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/opendistro/opendistro-kibana/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Kibana configuration 4 | elasticsearch_http_port: 9200 5 | elastic_api_protocol: https 6 | kibana_conf_path: /etc/kibana 7 | kibana_node_name: node-1 8 | kibana_server_host: "0.0.0.0" 9 | kibana_server_port: "5601" 10 | kibana_server_name: "kibana" 11 | kibana_max_payload_bytes: 1048576 12 | elastic_stack_version: 7.10.2 13 | wazuh_version: 4.4.1 14 | wazuh_app_url: https://packages.wazuh.com/4.x/ui/kibana/wazuh_kibana 15 | 16 | # The OpenDistro package repository 17 | kibana_opendistro_version: 1.13.2-1 # Version includes the - for RedHat family compatibility, replace with = for Debian hosts 18 | 19 | package_repos: 20 | yum: 21 | opendistro: 22 | baseurl: 'https://packages.wazuh.com/4.x/yum/' 23 | gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH' 24 | apt: 25 | opendistro: 26 | baseurl: 'deb https://packages.wazuh.com/4.x/apt/ stable main' 27 | gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH' 28 | 29 | # API credentials 30 | wazuh_api_credentials: 31 | - id: "default" 32 | url: "https://localhost" 33 | port: 55000 34 | username: "wazuh" 35 | password: "wazuh" 36 | 37 | # opendistro Security 38 | kibana_opendistro_security: true 39 | kibana_newsfeed_enabled: "false" 40 | kibana_telemetry_optin: "false" 41 | kibana_telemetry_enabled: "false" 42 | 43 | opendistro_admin_password: changeme 44 | opendistro_kibana_user: kibanaserver 45 | opendistro_kibana_password: changeme 46 | local_certs_path: "{{ playbook_dir }}/opendistro/certificates" 47 | 48 | # Nodejs 49 | nodejs: 50 | repo_dict: 51 | debian: "deb" 52 | redhat: "rpm" 53 | repo_url_ext: "nodesource.com/setup_10.x" 54 | 55 | 56 | #Nodejs NODE_OPTIONS 57 | node_options: --no-warnings --max-old-space-size=2048 --max-http-header-size=65536 58 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Filebeat for Elastic Stack 2 | ------------------------------------ 3 | 4 | An Ansible Role that installs [Filebeat-oss](https://www.elastic.co/products/beats/filebeat), this can be used in conjunction with [ansible-wazuh-manager](https://github.com/wazuh/wazuh-ansible/ansible-wazuh-server). 5 | 6 | Requirements 7 | ------------ 8 | 9 | This role will work on: 10 | * Red Hat 11 | * CentOS 12 | * Fedora 13 | * Debian 14 | * Ubuntu 15 | 16 | Role Variables 17 | -------------- 18 | 19 | Available variables are listed below, along with default values (see `defaults/main.yml`): 20 | 21 | ``` 22 | filebeat_output_indexer_hosts: 23 | - "localhost:9200" 24 | 25 | ``` 26 | 27 | License and copyright 28 | --------------------- 29 | 30 | WAZUH Copyright (C) 2016, Wazuh Inc. (License GPLv3) 31 | 32 | ### Based on previous work from geerlingguy 33 | 34 | - https://github.com/geerlingguy/ansible-role-filebeat 35 | 36 | ### Modified by Wazuh 37 | 38 | The playbooks have been modified by Wazuh, including some specific requirements, templates and configuration to improve integration with Wazuh ecosystem. 39 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | filebeat_version: 7.10.2 3 | 4 | wazuh_template_branch: v5.0.0 5 | 6 | filebeat_node_name: node-1 7 | 8 | filebeat_output_indexer_hosts: 9 | - "localhost" 10 | 11 | filebeat_module_package_name: wazuh-filebeat-0.4.tar.gz 12 | filebeat_module_package_path: /tmp/ 13 | filebeat_module_destination: /usr/share/filebeat/module 14 | filebeat_module_folder: /usr/share/filebeat/module/wazuh 15 | indexer_security_user: admin 16 | indexer_security_password: changeme 17 | # Security plugin 18 | filebeat_security: true 19 | filebeat_ssl_dir: /etc/pki/filebeat 20 | 21 | # Local path to store the generated certificates (Opensearch security plugin) 22 | local_certs_path: "{{ playbook_dir }}/indexer/certificates" 23 | 24 | filebeatrepo: 25 | keyring_path: '/usr/share/keyrings/wazuh.gpg' 26 | apt: "deb [signed-by=/usr/share/keyrings/wazuh.gpg] https://packages.wazuh.com/5.x/apt/ stable main" 27 | yum: 'https://packages.wazuh.com/5.x/yum/' 28 | gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH' 29 | path: '/tmp/WAZUH-GPG-KEY' -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart filebeat 3 | service: name=filebeat state=restarted 4 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | 4 | galaxy_info: 5 | author: Wazuh 6 | description: Installing and maintaining Filebeat-oss. 7 | company: wazuh.com 8 | license: license (GPLv3) 9 | min_ansible_version: 2.0 10 | platforms: 11 | - name: EL 12 | versions: 13 | - 6 14 | - 7 15 | - name: Fedora 16 | versions: 17 | - all 18 | - name: Debian 19 | versions: 20 | - jessie 21 | - name: Ubuntu 22 | versions: 23 | - precise 24 | - trusty 25 | - xenial 26 | galaxy_tags: 27 | - web 28 | - system 29 | - monitoring 30 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss/tasks/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Debian/Ubuntu | Install apt-transport-https, ca-certificates and acl 3 | apt: 4 | name: 5 | - apt-transport-https 6 | - ca-certificates 7 | - acl 8 | state: present 9 | register: filebeat_ca_packages_install 10 | until: filebeat_ca_packages_install is succeeded 11 | 12 | - name: Debian/Ubuntu | Download Filebeat apt key. 13 | get_url: 14 | url: "{{ filebeatrepo.gpg }}" 15 | dest: "{{ filebeatrepo.path }}" 16 | 17 | - name: Import Filebeat GPG key 18 | command: "gpg --no-default-keyring --keyring gnupg-ring:{{ filebeatrepo.keyring_path }} --import {{ filebeatrepo.path }}" 19 | args: 20 | creates: "{{ filebeatrepo.keyring_path }}" 21 | 22 | - name: Set permissions for Filebeat GPG key 23 | file: 24 | path: "{{ filebeatrepo.keyring_path }}" 25 | mode: '0644' 26 | 27 | - name: Debian/Ubuntu | Add Filebeat-oss repository. 28 | apt_repository: 29 | repo: "{{ filebeatrepo.apt }}" 30 | state: present 31 | update_cache: true 32 | changed_when: false 33 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss/tasks/RMDebian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Debian/Ubuntu | Remove Filebeat repository (and clean up left-over metadata) 3 | apt_repository: 4 | repo: "{{ filebeatrepo.apt }}" 5 | state: absent 6 | changed_when: false 7 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss/tasks/RMRedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: RedHat/CentOS/Fedora | Remove Filebeat repository (and clean up left-over metadata) 3 | yum_repository: 4 | name: wazuh_repo 5 | state: absent 6 | changed_when: false 7 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss/tasks/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: RedHat/CentOS/Fedora/Amazon Linux | Install Filebeats repo 3 | yum_repository: 4 | name: wazuh_repo 5 | description: Wazuh Repo 6 | baseurl: "{{ filebeatrepo.yum }}" 7 | gpgkey: "{{ filebeatrepo.gpg }}" 8 | gpgcheck: true 9 | changed_when: false 10 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss/tasks/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - block: 3 | - name: Copy Filebeat configuration. 4 | template: 5 | src: filebeat.yml.j2 6 | dest: "/etc/filebeat/filebeat.yml" 7 | owner: root 8 | group: root 9 | mode: 0400 10 | notify: restart filebeat 11 | 12 | - name: Fetch latest Wazuh alerts template 13 | get_url: 14 | url: https://raw.githubusercontent.com/wazuh/wazuh/{{ wazuh_template_branch }}/extensions/elasticsearch/7.x/wazuh-template.json 15 | dest: "/etc/filebeat/wazuh-template.json" 16 | owner: root 17 | group: root 18 | mode: 0400 19 | notify: restart filebeat 20 | 21 | tags: 22 | - configure 23 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss/tasks/security_actions.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | 3 | - name: Ensure Filebeat SSL key pair directory exists. 4 | file: 5 | path: "{{ filebeat_ssl_dir }}" 6 | state: directory 7 | owner: root 8 | group: root 9 | mode: 500 10 | 11 | - name: Copy the certificates from local to the Manager instance 12 | copy: 13 | src: "{{ local_certs_path }}/wazuh-certificates/{{ item }}" 14 | dest: "{{ filebeat_ssl_dir }}" 15 | owner: root 16 | group: root 17 | mode: 400 18 | with_items: 19 | - "{{ filebeat_node_name }}-key.pem" 20 | - "{{ filebeat_node_name }}.pem" 21 | - "root-ca.pem" 22 | 23 | tags: 24 | - security 25 | when: 26 | - filebeat_security 27 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-filebeat-oss/templates/filebeat.yml.j2: -------------------------------------------------------------------------------- 1 | # Wazuh - Filebeat configuration file 2 | filebeat.modules: 3 | - module: wazuh 4 | alerts: 5 | enabled: true 6 | archives: 7 | enabled: false 8 | 9 | setup.template.json.enabled: true 10 | setup.template.json.path: '/etc/filebeat/wazuh-template.json' 11 | setup.template.json.name: 'wazuh' 12 | setup.template.overwrite: true 13 | setup.ilm.enabled: false 14 | 15 | # Send events directly to Wazuh indexer 16 | output.elasticsearch: 17 | hosts: 18 | {% for item in filebeat_output_indexer_hosts %} 19 | - {{ item }} 20 | {% endfor %} 21 | 22 | {% if filebeat_security %} 23 | username: {{ indexer_security_user }} 24 | password: "{{ indexer_security_password }}" 25 | protocol: https 26 | ssl.certificate_authorities: 27 | - {{ filebeat_ssl_dir }}/root-ca.pem 28 | ssl.certificate: "{{ filebeat_ssl_dir }}/{{ filebeat_node_name }}.pem" 29 | ssl.key: "{{ filebeat_ssl_dir }}/{{ filebeat_node_name }}-key.pem" 30 | {% endif %} 31 | 32 | # Optional. Send events to Logstash instead of Wazuh indexer 33 | #output.logstash.hosts: ["YOUR_LOGSTASH_SERVER_IP:5000"] 34 | 35 | logging.metrics.enabled: false 36 | 37 | seccomp: 38 | default_action: allow 39 | syscalls: 40 | - action: allow 41 | names: 42 | - rseq 43 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-filebeat/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | filebeat_version: 7.10.2 3 | 4 | wazuh_template_branch: v4.4.1 5 | 6 | filebeat_create_config: true 7 | 8 | filebeat_node_name: node-1 9 | 10 | filebeat_output_elasticsearch_hosts: 11 | - "localhost:9200" 12 | 13 | filebeat_module_package_url: https://packages.wazuh.com/4.x/filebeat 14 | filebeat_module_package_name: wazuh-filebeat-0.1.tar.gz 15 | filebeat_module_package_path: /tmp/ 16 | filebeat_module_destination: /usr/share/filebeat/module 17 | filebeat_module_folder: /usr/share/filebeat/module/wazuh 18 | 19 | # Xpack Security 20 | filebeat_xpack_security: false 21 | 22 | elasticsearch_xpack_security_user: elastic 23 | elasticsearch_xpack_security_password: elastic_pass 24 | 25 | node_certs_destination: /etc/filebeat/certs 26 | 27 | # CA Generation 28 | master_certs_path: "{{ playbook_dir }}/es_certs" 29 | generate_CA: true 30 | ca_cert_name: "" 31 | 32 | elasticrepo: 33 | apt: 'https://artifacts.elastic.co/packages/7.x/apt' 34 | yum: 'https://artifacts.elastic.co/packages/7.x/yum' 35 | gpg: 'https://artifacts.elastic.co/GPG-KEY-elasticsearch' 36 | key_id: '46095ACC8548582C1A2699A9D27D666CD88E42B4' 37 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-agent/README.md: -------------------------------------------------------------------------------- 1 | Ansible Playbook - Wazuh agent 2 | ============================== 3 | 4 | This role will install and configure a Wazuh Agent. 5 | 6 | OS Requirements 7 | ---------------- 8 | 9 | This role is compatible with: 10 | * Red Hat 11 | * CentOS 12 | * Fedora 13 | * Debian 14 | * Ubuntu 15 | * Windows 16 | * macOS 17 | 18 | 19 | Role Variables 20 | -------------- 21 | 22 | * `wazuh_managers`: Collection of Wazuh Managers' IP address, port, and protocol used by the agent 23 | * `wazuh_agent_authd`: Collection with the settings to register an agent using authd. 24 | 25 | Playbook example 26 | ---------------- 27 | 28 | The following is an example of how this role can be used: 29 | 30 | - hosts: all:!wazuh-manager 31 | roles: 32 | - ansible-wazuh-agent 33 | vars: 34 | wazuh_managers: 35 | - address: 127.0.0.1 36 | port: 1514 37 | protocol: tcp 38 | api_port: 55000 39 | api_proto: 'http' 40 | api_user: 'ansible' 41 | wazuh_agent_authd: 42 | registration_address: 127.0.0.1 43 | enable: true 44 | port: 1515 45 | ssl_agent_ca: null 46 | ssl_auto_negotiate: 'no' 47 | 48 | 49 | License and copyright 50 | --------------------- 51 | 52 | WAZUH Copyright (C) 2016, Wazuh Inc. (License GPLv3) 53 | 54 | ### Based on previous work from dj-wasabi 55 | 56 | - https://github.com/dj-wasabi/ansible-ossec-server 57 | 58 | ### Modified by Wazuh 59 | 60 | The playbooks have been modified by Wazuh, including some specific requirements, templates and configuration to improve integration with Wazuh ecosystem. 61 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-agent/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart wazuh-agent 3 | service: name=wazuh-agent state=restarted enabled=yes 4 | 5 | - name: Windows | Restart Wazuh Agent 6 | win_service: name=WazuhSvc start_mode=auto state=restarted 7 | 8 | - name: macOS | Restart Wazuh Agent 9 | command: /Library/Ossec/bin/wazuh-control restart -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-agent/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Wazuh 4 | description: Installing, deploying and configuring Wazuh Agent. 5 | company: wazuh.com 6 | license: license (GPLv3) 7 | min_ansible_version: 2.0 8 | platforms: 9 | - name: EL 10 | versions: 11 | - all 12 | - name: Ubuntu 13 | versions: 14 | - all 15 | - name: Debian 16 | versions: 17 | - all 18 | - name: Fedora 19 | versions: 20 | - all 21 | galaxy_tags: 22 | - monitoring 23 | dependencies: [] 24 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-agent/tasks/RMDebian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove Wazuh repository (and clean up left-over metadata) 3 | apt_repository: 4 | repo: "{{ wazuh_agent_config.repo.apt }}" 5 | state: absent 6 | changed_when: false 7 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-agent/tasks/RMRedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove Wazuh repository (and clean up left-over metadata) 3 | yum_repository: 4 | name: wazuh_repo 5 | state: absent 6 | changed_when: false 7 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-agent/tasks/installation_from_custom_packages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Wazuh Agent from .deb packages 3 | apt: 4 | deb: "{{ wazuh_custom_packages_installation_agent_deb_url }}" 5 | state: present 6 | when: 7 | - ansible_os_family|lower == "debian" 8 | - wazuh_custom_packages_installation_agent_enabled 9 | 10 | - name: Install Wazuh Agent from .rpm packages | yum 11 | yum: 12 | name: "{{ wazuh_custom_packages_installation_agent_rpm_url }}" 13 | state: present 14 | when: 15 | - ansible_os_family|lower == "redhat" 16 | - wazuh_custom_packages_installation_agent_enabled 17 | - not (ansible_distribution|lower == "centos" and ansible_distribution_major_version >= "8") 18 | - not (ansible_distribution|lower == "redhat" and ansible_distribution_major_version >= "8") 19 | 20 | - name: Install Wazuh Agent from .rpm packages | dnf 21 | dnf: 22 | name: "{{ wazuh_custom_packages_installation_agent_rpm_url }}" 23 | state: present 24 | when: 25 | - ansible_os_family|lower == "redhat" 26 | - wazuh_custom_packages_installation_agent_enabled 27 | - (ansible_distribution|lower == "centos" and ansible_distribution_major_version >= "8") or 28 | (ansible_distribution|lower == "redhat" and ansible_distribution_major_version >= "8") -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-agent/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include_vars: ../../vars/repo_vars.yml 4 | 5 | - include_vars: ../../vars/repo.yml 6 | when: packages_repository == 'production' 7 | 8 | - include_vars: ../../vars/repo_pre-release.yml 9 | when: packages_repository == 'pre-release' 10 | 11 | - include_vars: ../../vars/repo_staging.yml 12 | when: packages_repository == 'staging' 13 | 14 | - name: Overlay wazuh_agent_config on top of defaults 15 | set_fact: 16 | wazuh_agent_config: '{{ wazuh_agent_config_defaults | combine(config_layer, recursive=True) }}' 17 | vars: 18 | config_layer: '{{ wazuh_agent_config | default({}) }}' 19 | when: wazuh_agent_config_overlay | bool 20 | 21 | - include_tasks: "Windows.yml" 22 | when: ansible_os_family == "Windows" 23 | 24 | - include_tasks: "Linux.yml" 25 | when: ansible_system == "Linux" 26 | 27 | - include_tasks: "macOS.yml" 28 | when: ansible_system == "Darwin" -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-agent/templates/authd_pass.j2: -------------------------------------------------------------------------------- 1 | {{ authd_pass }} -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-agent/templates/var-ossec-etc-local-internal-options.conf.j2: -------------------------------------------------------------------------------- 1 | # local_internal_options.conf 2 | # 3 | # This file should be handled with care. It contains 4 | # run time modifications that can affect the use 5 | # of OSSEC. Only change it if you know what you 6 | # are doing. Look first at ossec.conf 7 | # for most of the things you want to change. 8 | # 9 | # This file will not be overwritten during upgrades. 10 | 11 | # This is the template of Ansible for the file local_internal_options.conf 12 | # In this file you could include the configuration settings for your agents 13 | 14 | # Logcollector - If it should accept remote commands from the manager 15 | logcollector.remote_commands=1 16 | 17 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/files/custom_ruleset/decoders/sample_custom_decoders.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 22 | 23 | 24 | sample_custom_decoder 25 | 26 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/files/custom_ruleset/rules/sample_custom_rules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 11 | 12 | 5716 13 | 1.1.1.1 14 | sshd: authentication failed from IP 1.1.1.1. 15 | authentication_failed,pci_dss_10.2.4,pci_dss_10.2.5, 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart wazuh-manager 3 | service: 4 | name: wazuh-manager 5 | state: restarted 6 | enabled: true 7 | 8 | - name: restart wazuh-api 9 | service: 10 | name: wazuh-api 11 | state: restarted 12 | enabled: true 13 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Wazuh 4 | description: Installing, deploying and configuring Wazuh Manager. 5 | company: wazuh.com 6 | license: license (GPLv3) 7 | min_ansible_version: 2.0 8 | platforms: 9 | - name: EL 10 | versions: 11 | - all 12 | - name: Ubuntu 13 | versions: 14 | - all 15 | - name: Debian 16 | versions: 17 | - all 18 | - name: Fedora 19 | versions: 20 | - all 21 | galaxy_tags: 22 | - monitoring 23 | dependencies: [] 24 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/tasks/install_cmake.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Vars 4 | # cmake_download_url: http://packages.wazuh.com/utils/cmake/cmake-3.18.3.tar.gz 5 | # cmake_version: 3.18.3 6 | # 7 | - name: Include CMake install vars 8 | include_vars: install_cmake.yml 9 | 10 | - name: Download CMake sources 11 | get_url: 12 | url: "{{ cmake_download_url }}" 13 | dest: "/tmp/cmake-{{ cmake_version }}.tar.gz" 14 | register: cmake_download 15 | 16 | - name: Unpack CMake 17 | unarchive: 18 | copy: no 19 | dest: /tmp/ 20 | src: "{{ cmake_download.dest }}" 21 | when: cmake_download.changed 22 | register: cmake_unpack 23 | 24 | - name: Configure CMake 25 | command: "./bootstrap" 26 | args: 27 | chdir: "/tmp/cmake-{{ cmake_version }}" 28 | when: cmake_unpack.changed 29 | register: cmake_configure 30 | 31 | - name: Install CMake 32 | shell: make && make install 33 | args: 34 | chdir: "/tmp/cmake-{{ cmake_version }}" 35 | when: cmake_configure.changed 36 | 37 | - name: Delete installation files 38 | file: 39 | state: absent 40 | path: "/tmp/cmake-{{ cmake_version }}" -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/tasks/installation_from_custom_packages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - block: 3 | - name: Install Wazuh Manager from .deb packages 4 | apt: 5 | deb: "{{ wazuh_custom_packages_installation_manager_deb_url }}" 6 | state: present 7 | when: 8 | - wazuh_custom_packages_installation_manager_enabled 9 | when: 10 | - ansible_os_family|lower == "debian" 11 | 12 | - block: 13 | - name: Install Wazuh Manager from .rpm packages | yum 14 | yum: 15 | name: "{{ wazuh_custom_packages_installation_manager_rpm_url }}" 16 | state: present 17 | when: 18 | - wazuh_custom_packages_installation_manager_enabled 19 | - not (ansible_distribution|lower == "centos" and ansible_distribution_major_version >= "8") 20 | - not (ansible_distribution|lower == "redhat" and ansible_distribution_major_version >= "8") 21 | 22 | - name: Install Wazuh Manager from .rpm packages | dnf 23 | dnf: 24 | name: "{{ wazuh_custom_packages_installation_manager_rpm_url }}" 25 | state: present 26 | when: 27 | - wazuh_custom_packages_installation_manager_enabled 28 | - (ansible_distribution|lower == "centos" and ansible_distribution_major_version >= "8") or 29 | (ansible_distribution|lower == "redhat" and ansible_distribution_major_version >= "8") 30 | when: 31 | - ansible_os_family|lower == "redhat" -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/tasks/uninstall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Debian/Ubuntu | Remove Wazuh repository. 4 | apt_repository: 5 | repo: "{{ wazuh_manager_config.repo.apt }}" 6 | state: absent 7 | changed_when: false 8 | when: ansible_os_family == "Debian" 9 | 10 | - name: RedHat/CentOS/Fedora | Remove Wazuh repository (and clean up left-over metadata) 11 | yum_repository: 12 | name: wazuh_repo 13 | state: absent 14 | changed_when: false 15 | when: ansible_os_family == "RedHat" or ansible_os_family == "Amazon" 16 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/templates/admin.json.j2: -------------------------------------------------------------------------------- 1 | 2 | {% for api in wazuh_api_users %} 3 | {"username":"{{ api['username'] }}", "password": "{{ api['password'] }}"} 4 | {% endfor %} -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/templates/agentless.j2: -------------------------------------------------------------------------------- 1 | {% for agentless in agentless_creds %} 2 | {{ agentless.host }}|{{ agentless.passwd }} 3 | {% endfor %} 4 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/templates/authd_pass.j2: -------------------------------------------------------------------------------- 1 | {{ authd_pass }} -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/templates/cdb_lists.j2: -------------------------------------------------------------------------------- 1 | {{ item.content }} 2 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/templates/var-ossec-etc-local-internal-options.conf.j2: -------------------------------------------------------------------------------- 1 | # local_internal_options.conf 2 | # 3 | # This file should be handled with care. It contains 4 | # run time modifications that can affect the use 5 | # of OSSEC. Only change it if you know what you 6 | # are doing. Look first at ossec.conf 7 | # for most of the things you want to change. 8 | # 9 | # This file will not be overwritten during upgrades. 10 | 11 | # This is the template of Ansible for the file local_internal_options.conf 12 | # In this file you could include the configuration settings for your manager 13 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/templates/var-ossec-rules-local_decoder.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 22 | 23 | 24 | local_decoder_example 25 | 26 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/templates/var-ossec-rules-local_rules.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 12 | 13 | 5716 14 | 1.1.1.1 15 | sshd: authentication failed from IP 1.1.1.1. 16 | authentication_failed,pci_dss_10.2.4,pci_dss_10.2.5, 17 | 18 | 19 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/vars/agentless_creds.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # agentless_creds: 3 | # - type: ssh_integrity_check_linux 4 | # frequency: 3600 5 | # host: root@example.net 6 | # state: periodic 7 | # arguments: '/bin /etc/ /sbin' 8 | # passwd: qwerty 9 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/vars/authd_pass.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # authd_pass: foobar 3 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/ansible-wazuh-manager/vars/install_cmake.yml: -------------------------------------------------------------------------------- 1 | # Install cmake vars 2 | 3 | cmake_version: 3.18.3 4 | cmake_download_url: "http://packages.wazuh.com/utils/cmake/cmake-{{ cmake_version }}.tar.gz" -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/check-packages/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | wazuh_version: 5.0.0 3 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/check-packages/files/packages_uri.txt: -------------------------------------------------------------------------------- 1 | yum/wazuh-manager-VERSION-1.x86_64.rpm 2 | apt/pool/main/w/wazuh-manager/wazuh-manager_VERSION-1_amd64.deb 3 | yum/wazuh-dashboard-VERSION-1.x86_64.rpm 4 | yum/wazuh-indexer-VERSION-1.x86_64.rpm 5 | apt/pool/main/w/wazuh-agent/wazuh-agent_VERSION-1_amd64.deb 6 | yum/wazuh-agent-VERSION-1.x86_64.rpm -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/check-packages/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check packages 3 | shell: | 4 | ./check_packages.sh {{ wazuh_version }} 5 | args: 6 | warn: false 7 | executable: /bin/bash 8 | chdir: "{{ role_path }}/scripts/" 9 | delegate_to: localhost 10 | become: no 11 | 12 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/vars/repo.yml: -------------------------------------------------------------------------------- 1 | wazuh_repo: 2 | keyring_path: '/usr/share/keyrings/wazuh.gpg' 3 | apt: 'deb [signed-by=/usr/share/keyrings/wazuh.gpg] https://packages.wazuh.com/5.x/apt/ stable main' 4 | yum: 'https://packages.wazuh.com/5.x/yum/' 5 | gpg: 'https://packages.wazuh.com/key/GPG-KEY-WAZUH' 6 | path: '/tmp/WAZUH-GPG-KEY' 7 | wazuh_winagent_config_url: "https://packages.wazuh.com/5.x/windows/wazuh-agent-{{ wazuh_agent_version }}-1.msi" 8 | wazuh_winagent_package_name: "wazuh-agent-{{ wazuh_agent_version }}-1.msi" 9 | wazuh_winagent_sha512_url: "https://packages.wazuh.com/5.x/checksums/wazuh/{{ wazuh_agent_version }}/wazuh-agent-{{ wazuh_agent_version }}-1.msi.sha512" 10 | filebeat_module_package_url: https://packages.wazuh.com/5.x/filebeat 11 | 12 | wazuh_macos_intel_package_name: "wazuh-agent-{{ wazuh_agent_version }}-1.intel64.pkg" 13 | wazuh_macos_arm_package_name: "wazuh-agent-{{ wazuh_agent_version }}-1.arm64.pkg" 14 | wazuh_macos_intel_package_url: "https://packages.wazuh.com/5.x/macos/{{ wazuh_macos_intel_package_name }}" 15 | wazuh_macos_arm_package_url: "https://packages.wazuh.com/5.x/macos/{{ wazuh_macos_arm_package_name }}" 16 | 17 | certs_gen_tool_version: 5.0 18 | 19 | # Url of certificates generator tool 20 | certs_gen_tool_url: "https://packages.wazuh.com/{{ certs_gen_tool_version }}/wazuh-certs-tool.sh" 21 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/vars/repo_pre-release.yml: -------------------------------------------------------------------------------- 1 | wazuh_repo: 2 | keyring_path: '/usr/share/keyrings/wazuh.gpg' 3 | apt: 'deb [signed-by=/usr/share/keyrings/wazuh.gpg] https://packages-dev.wazuh.com/pre-release/apt/ unstable main' 4 | yum: 'https://packages-dev.wazuh.com/pre-release/yum/' 5 | gpg: 'https://packages-dev.wazuh.com/key/GPG-KEY-WAZUH' 6 | path: '/tmp/WAZUH-GPG-KEY' 7 | wazuh_winagent_config_url: "https://packages-dev.wazuh.com/pre-release/windows/wazuh-agent-{{ wazuh_agent_version }}-1.msi" 8 | wazuh_winagent_package_name: "wazuh-agent-{{ wazuh_agent_version }}-1.msi" 9 | wazuh_winagent_sha512_url: "https://packages-dev.wazuh.com/pre-release/checksums/wazuh/{{ wazuh_agent_version }}/wazuh-agent-{{ wazuh_agent_version }}-1.msi.sha512" 10 | filebeat_module_package_url: https://packages-dev.wazuh.com/pre-release/filebeat 11 | 12 | wazuh_macos_intel_package_name: "wazuh-agent-{{ wazuh_agent_version }}-1.intel64.pkg" 13 | wazuh_macos_arm_package_name: "wazuh-agent-{{ wazuh_agent_version }}-1.arm64.pkg" 14 | wazuh_macos_intel_package_url: "https://packages-dev.wazuh.com/pre-release/{{ wazuh_macos_intel_package_name }}" 15 | wazuh_macos_arm_package_url: "https://packages-dev.wazuh.com/pre-release/macos/{{ wazuh_macos_arm_package_name }}" 16 | 17 | certs_gen_tool_version: 5.0 18 | 19 | # Url of certificates generator tool 20 | certs_gen_tool_url: "https://packages-dev.wazuh.com/{{ certs_gen_tool_version }}/wazuh-certs-tool.sh" 21 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/vars/repo_staging.yml: -------------------------------------------------------------------------------- 1 | wazuh_repo: 2 | keyring_path: '/usr/share/keyrings/wazuh.gpg' 3 | apt: 'deb [signed-by=/usr/share/keyrings/wazuh.gpg] https://packages-dev.wazuh.com/staging/apt/ unstable main' 4 | yum: 'https://packages-dev.wazuh.com/staging/yum/' 5 | gpg: 'https://packages-dev.wazuh.com/key/GPG-KEY-WAZUH' 6 | path: '/tmp/WAZUH-GPG-KEY' 7 | wazuh_winagent_config_url: "https://packages-dev.wazuh.com/staging/windows/wazuh-agent-{{ wazuh_agent_version }}-1.msi" 8 | wazuh_winagent_package_name: "wazuh-agent-{{ wazuh_agent_version }}-1.msi" 9 | wazuh_winagent_sha512_url: "https://packages-dev.wazuh.com/staging/checksums/wazuh/{{ wazuh_agent_version }}/wazuh-agent-{{ wazuh_agent_version }}-1.msi.sha512" 10 | check_sha512: False 11 | filebeat_module_package_url: https://packages-dev.wazuh.com/staging/filebeat 12 | 13 | wazuh_macos_intel_package_name: "wazuh-agent-{{ wazuh_agent_version }}-1.intel64.pkg" 14 | wazuh_macos_arm_package_name: "wazuh-agent-{{ wazuh_agent_version }}-1.arm64.pkg" 15 | wazuh_macos_intel_package_url: "https://packages-dev.wazuh.com/staging/macos/{{ wazuh_macos_intel_package_name }}" 16 | wazuh_macos_arm_package_url: "https://packages-dev.wazuh.com/staging/macos/{{ wazuh_macos_arm_package_name }}" 17 | 18 | certs_gen_tool_version: 5.0 19 | 20 | # Url of certificates generator tool 21 | certs_gen_tool_url: "https://packages-dev.wazuh.com/{{ certs_gen_tool_version }}/wazuh-certs-tool.sh" 22 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/vars/repo_vars.yml: -------------------------------------------------------------------------------- 1 | packages_repository: production -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-dashboard/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Dashboard configuration 4 | indexer_http_port: 9200 5 | indexer_api_protocol: https 6 | dashboard_conf_path: /etc/wazuh-dashboard/ 7 | dashboard_node_name: node-1 8 | dashboard_server_host: "0.0.0.0" 9 | dashboard_server_port: "443" 10 | dashboard_server_name: "dashboard" 11 | wazuh_version: 5.0.0 12 | indexer_cluster_nodes: 13 | - 127.0.0.1 14 | 15 | # The Wazuh dashboard package repository 16 | dashboard_version: "5.0.0" 17 | 18 | # API credentials 19 | wazuh_api_credentials: 20 | - id: "default" 21 | url: "https://127.0.0.1" 22 | port: 55000 23 | username: "wazuh-wui" 24 | password: "wazuh-wui" 25 | 26 | # Dashboard Security 27 | dashboard_security: true 28 | indexer_admin_password: changeme 29 | dashboard_user: kibanaserver 30 | dashboard_password: changeme 31 | local_certs_path: "{{ playbook_dir }}/indexer/certificates" 32 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-dashboard/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart wazuh-dashboard 3 | service: name=wazuh-dashboard state=restarted 4 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-dashboard/tasks/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - block: 3 | 4 | - include_vars: debian.yml 5 | - name: Download apt repository signing key 6 | get_url: 7 | url: "{{ wazuh_repo.gpg }}" 8 | dest: "{{ wazuh_repo.path }}" 9 | 10 | - name: Import Wazuh repository GPG key 11 | command: "gpg --no-default-keyring --keyring gnupg-ring:{{ wazuh_repo.keyring_path }} --import {{ wazuh_repo.path }}" 12 | args: 13 | creates: "{{ wazuh_repo.keyring_path }}" 14 | 15 | - name: Set permissions for Wazuh repository GPG key 16 | file: 17 | path: "{{ wazuh_repo.keyring_path }}" 18 | mode: '0644' 19 | 20 | - name: Debian systems | Add Wazuh dashboard repo 21 | apt_repository: 22 | repo: "{{ wazuh_repo.apt }}" 23 | state: present 24 | update_cache: yes 25 | 26 | - name: Install Wazuh dashboard 27 | apt: 28 | name: "wazuh-dashboard={{ dashboard_version }}-*" 29 | state: present 30 | update_cache: yes 31 | register: install 32 | 33 | tags: 34 | - install -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-dashboard/tasks/RMRedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove Wazuh dashboard repository (and clean up left-over metadata) 3 | yum_repository: 4 | name: wazuh_repo 5 | state: absent 6 | changed_when: false 7 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-dashboard/tasks/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - block: 3 | 4 | - name: RedHat/CentOS/Fedora | Add Wazuh dashboard repo 5 | yum_repository: 6 | name: wazuh_repo 7 | description: Wazuh yum repository 8 | baseurl: "{{ wazuh_repo.yum }}" 9 | gpgkey: "{{ wazuh_repo.gpg }}" 10 | gpgcheck: true 11 | 12 | - name: Install Wazuh dashboard 13 | package: 14 | name: "wazuh-dashboard-{{ dashboard_version }}" 15 | state: present 16 | update_cache: yes 17 | register: install 18 | 19 | tags: 20 | - install 21 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-dashboard/tasks/security_actions.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | 3 | - name: Ensure Dashboard certificates directory permissions. 4 | file: 5 | path: "/etc/wazuh-dashboard/certs/" 6 | state: directory 7 | owner: wazuh-dashboard 8 | group: wazuh-dashboard 9 | mode: 500 10 | 11 | - name: Copy the certificates from local to the Wazuh dashboard instance 12 | copy: 13 | src: "{{ local_certs_path }}/wazuh-certificates/{{ item }}" 14 | dest: /etc/wazuh-dashboard/certs/ 15 | owner: wazuh-dashboard 16 | group: wazuh-dashboard 17 | mode: 0400 18 | with_items: 19 | - "root-ca.pem" 20 | - "{{ dashboard_node_name }}-key.pem" 21 | - "{{ dashboard_node_name }}.pem" 22 | tags: 23 | - security 24 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-dashboard/templates/opensearch_dashboards.yml.j2: -------------------------------------------------------------------------------- 1 | server.host: {{ dashboard_server_host }} 2 | server.port: {{ dashboard_server_port }} 3 | opensearch.hosts: 4 | {% for item in indexer_cluster_nodes %} 5 | - https://{{ item }}:{{ indexer_http_port }} 6 | {% endfor %} 7 | opensearch.ssl.verificationMode: certificate 8 | opensearch.requestHeadersWhitelist: ["securitytenant","Authorization"] 9 | opensearch_security.multitenancy.enabled: false 10 | opensearch_security.readonly_mode.roles: ["kibana_read_only"] 11 | server.ssl.enabled: true 12 | server.ssl.key: "/etc/wazuh-dashboard/certs/{{ dashboard_node_name }}-key.pem" 13 | server.ssl.certificate: "/etc/wazuh-dashboard/certs/{{ dashboard_node_name }}.pem" 14 | opensearch.ssl.certificateAuthorities: ["/etc/wazuh-dashboard/certs/root-ca.pem"] 15 | uiSettings.overrides.defaultRoute: /app/wz-home 16 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-dashboard/vars/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dashboard_version: 5.0.0 3 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-indexer/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Cluster Settings 3 | indexer_version: 5.0.0 4 | 5 | single_node: false 6 | indexer_node_name: node-1 7 | indexer_cluster_name: wazuh 8 | indexer_network_host: '0.0.0.0' 9 | 10 | indexer_node_master: true 11 | indexer_node_data: true 12 | indexer_node_ingest: true 13 | indexer_start_timeout: 90 14 | 15 | indexer_cluster_nodes: 16 | - 127.0.0.1 17 | indexer_discovery_nodes: 18 | - 127.0.0.1 19 | 20 | local_certs_path: "{{ playbook_dir }}/indexer/certificates" 21 | 22 | # Minimum master nodes in cluster, 2 for 3 nodes Wazuh indexer cluster 23 | minimum_master_nodes: 2 24 | 25 | # Configure hostnames for Wazuh indexer nodes 26 | # Example es1.example.com, es2.example.com 27 | domain_name: wazuh.com 28 | 29 | indexer_sec_plugin_conf_path: /etc/wazuh-indexer/opensearch-security 30 | indexer_sec_plugin_tools_path: /usr/share/wazuh-indexer/plugins/opensearch-security/tools 31 | indexer_conf_path: /etc/wazuh-indexer 32 | indexer_index_path: /var/lib/wazuh-indexer/ 33 | 34 | # Security password 35 | indexer_custom_user: "" 36 | indexer_custom_user_role: "admin" 37 | 38 | # Set JVM memory limits 39 | indexer_jvm_xms: null 40 | 41 | indexer_http_port: 9200 42 | 43 | indexer_admin_password: changeme 44 | dashboard_password: changeme 45 | 46 | # Deployment settings 47 | generate_certs: true 48 | perform_installation: true 49 | 50 | indexer_nolog_sensible: true 51 | 52 | # Docker image for certificates generation on macOS 53 | wazuh_certs_tool_docker: "wazuh/wazuh-cert-tool:{{ indexer_version }}" 54 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-indexer/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart wazuh-indexer 3 | service: 4 | name: wazuh-indexer 5 | state: restarted 6 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-indexer/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Wazuh 4 | description: Installing and maintaining Wazuh indexer. 5 | company: wazuh.com 6 | license: license (GPLv3) 7 | min_ansible_version: 2.0 8 | platforms: 9 | - name: EL 10 | versions: 11 | - all 12 | - name: Ubuntu 13 | versions: 14 | - all 15 | - name: Debian 16 | versions: 17 | - all 18 | - name: Fedora 19 | versions: 20 | - all 21 | galaxy_tags: 22 | - web 23 | - system 24 | - monitoring 25 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-indexer/tasks/Debian.yml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | 4 | - name: Update cache 5 | apt: 6 | update_cache: yes 7 | 8 | - name: Debian 9 (Stretch) 9 | when: (ansible_facts['distribution'] == "Debian" and ansible_facts['distribution_major_version'] == "9") 10 | block: 11 | 12 | - name: Install Wazuh indexer dependencies 13 | apt: 14 | name: [ 15 | 'unzip', 'wget', 'curl', 'apt-transport-https', software-properties-common 16 | ] 17 | state: present 18 | 19 | - name: Add Wazuh indexer repository 20 | block: 21 | - name: Add apt repository signing key 22 | get_url: 23 | url: "{{ wazuh_repo.gpg }}" 24 | dest: "{{ wazuh_repo.path }}" 25 | 26 | - name: Import Wazuh repository GPG key 27 | command: "gpg --no-default-keyring --keyring gnupg-ring:{{ wazuh_repo.keyring_path }} --import {{ wazuh_repo.path }}" 28 | args: 29 | creates: "{{ wazuh_repo.keyring_path }}" 30 | 31 | - name: Set permissions for Wazuh repository GPG key 32 | file: 33 | path: "{{ wazuh_repo.keyring_path }}" 34 | mode: '0644' 35 | 36 | - name: Add Wazuh indexer repository 37 | apt_repository: 38 | repo: "{{ wazuh_repo.apt }}" 39 | state: present 40 | filename: 'wazuh-indexer' 41 | update_cache: yes 42 | 43 | - name: Install Wazuh indexer 44 | apt: 45 | name: wazuh-indexer={{ indexer_version }}-1 46 | state: present 47 | register: install 48 | tags: install -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-indexer/tasks/RMRedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: RedHat/CentOS/Fedora | Remove Wazuh indexer repository (and clean up left-over metadata) 3 | yum_repository: 4 | name: wazuh_repo 5 | state: absent 6 | changed_when: false 7 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-indexer/tasks/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - block: 3 | 4 | - name: RedHat/CentOS/Fedora | Add Wazuh indexer repo 5 | yum_repository: 6 | name: wazuh_repo 7 | description: Wazuh yum repository 8 | baseurl: "{{ wazuh_repo.yum }}" 9 | gpgkey: "{{ wazuh_repo.gpg }}" 10 | gpgcheck: true 11 | changed_when: false 12 | 13 | 14 | 15 | - name: Amazon Linux | Configure system settings 16 | block: 17 | - name: Install Amazon extras in Amazon Linux 2 18 | yum: 19 | name: amazon-linux-extras 20 | state: present 21 | when: 22 | - ansible_distribution == 'Amazon' 23 | - ansible_distribution_major_version == '2' 24 | 25 | - name: Configure vm.max_map_count 26 | lineinfile: 27 | line: "vm.max_map_count=262144" 28 | dest: "/etc/sysctl.conf" 29 | insertafter: EOF 30 | create: true 31 | become: yes 32 | 33 | - name: Update vm.max_map_count 34 | shell: sysctl -p 35 | become: yes 36 | 37 | when: 38 | - ansible_distribution == 'Amazon' 39 | 40 | - name: RedHat/CentOS/Fedora | Install Indexer dependencies 41 | yum: 42 | name: "{{ packages }}" 43 | vars: 44 | packages: 45 | - wget 46 | - unzip 47 | 48 | - name: Install Wazuh indexer 49 | package: 50 | name: wazuh-indexer-{{ indexer_version }} 51 | state: present 52 | register: install 53 | tags: install 54 | 55 | tags: 56 | - install 57 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-indexer/templates/config.yml.j2: -------------------------------------------------------------------------------- 1 | nodes: 2 | # Indexer server nodes 3 | indexer: 4 | {% for (key,value) in instances.items() %} 5 | {% if (value.role is defined and value.role == 'indexer') %} 6 | - name: {{ value.name }} 7 | ip: {{ value.ip }} 8 | {% endif %} 9 | {% endfor %} 10 | 11 | # Wazuh server nodes 12 | # Use node_type only with more than one Wazuh manager 13 | server: 14 | {% for (key,value) in instances.items() %} 15 | {% if (value.role is defined and value.role == 'wazuh') %} 16 | - name: {{ value.name }} 17 | ip: {{ value.ip }} 18 | {% endif %} 19 | {% if (value.node_type is defined and value.node_type == 'master') %} 20 | node_type: master 21 | {% elif (value.node_type is defined and value.node_type == 'worker') %} 22 | node_type: worker 23 | {% endif %} 24 | {% endfor %} 25 | 26 | # Dashboard node 27 | dashboard: 28 | {% for (key,value) in instances.items() %} 29 | {% if (value.role is defined and value.role == 'dashboard') %} 30 | - name: {{ value.name }} 31 | ip: {{ value.ip }} 32 | {% endif %} 33 | {% endfor %} 34 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-indexer/templates/disabledlog4j.options.j2: -------------------------------------------------------------------------------- 1 | ## JVM configuration 2 | 3 | ## Disable log4j 4 | -Dlog4j2.formatMsgNoLookups=true -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-indexer/templates/internal_users.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | # This is the internal user database 3 | # The hash value is a bcrypt hash and can be generated with plugin/tools/hash.sh 4 | 5 | _meta: 6 | type: "internalusers" 7 | config_version: 2 8 | 9 | # Define your internal users here 10 | 11 | admin: 12 | hash: "{{ indexer_admin_password }}" 13 | reserved: true 14 | backend_roles: 15 | - "admin" 16 | description: "admin user" 17 | 18 | kibanaserver: 19 | hash: "{{ dashboard_password }}" 20 | reserved: true 21 | description: "kibanaserver user" 22 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/roles/wazuh/wazuh-indexer/templates/tlsconfig.yml.j2: -------------------------------------------------------------------------------- 1 | ca: 2 | root: 3 | dn: CN=root.ca.{{ domain_name }},OU=CA,O={{ domain_name }}\, Inc.,DC={{ domain_name }} 4 | keysize: 2048 5 | validityDays: 730 6 | pkPassword: none 7 | file: root-ca.pem 8 | 9 | ### Default values and global settings 10 | defaults: 11 | validityDays: 730 12 | pkPassword: none 13 | # Set this to true in order to generate config and certificates for 14 | # the HTTP interface of nodes 15 | httpsEnabled: true 16 | reuseTransportCertificatesForHttp: false 17 | verifyHostnames: false 18 | resolveHostnames: false 19 | 20 | ### 21 | ### Nodes 22 | ### 23 | # 24 | # Specify the nodes of your ES cluster here 25 | # 26 | nodes: 27 | {% for (key,value) in instances.items() %} 28 | {% if (value.ip is defined and value.ip | length > 0) %} 29 | - name: {{ value.name }} 30 | dn: CN={{ value.name }}.{{ domain_name }},OU=Ops,O={{ domain_name }}\, Inc.,DC={{ domain_name }} 31 | dns: {{ value.name }}.{{ domain_name }} 32 | ip: {{ value.ip }} 33 | {% endif %} 34 | {% endfor %} 35 | ### 36 | ### Clients 37 | ### 38 | # 39 | # Specify the clients that shall access your ES cluster with certificate authentication here 40 | # 41 | # At least one client must be an admin user (i.e., a super-user). Admin users can 42 | # be specified with the attribute admin: true 43 | # 44 | clients: 45 | - name: admin 46 | dn: CN=admin.{{ domain_name }},OU=Ops,O={{ domain_name }}\, Inc.,DC={{ domain_name }} 47 | admin: true 48 | -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/wazuh-agent-install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Wazuh Agent installation playbook 3 | 4 | - hosts: agent 5 | become: yes 6 | roles: 7 | - role: wazuh-ansible/roles/ansible-wazuh-agent 8 | vars: 9 | wazuh_agent_config: 10 | server: 11 | - "{{ hostvars.manager.private_ip }}" -------------------------------------------------------------------------------- /Ansible/wazuh-ansible/wazuh-cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Certificates generation 3 | - hosts: indexer 4 | roles: 5 | - role: wazuh-ansible/roles/wazuh-indexer 6 | indexer_network_host: "{{ private_ip }}" 7 | indexer_cluster_nodes: 8 | - "{{ hostvars.indexer.private_ip }}" 9 | indexer_discovery_nodes: 10 | - "{{ hostvars.indexer.private_ip }}" 11 | become: yes 12 | become_user: root 13 | vars: 14 | indexer_node_master: true 15 | 16 | # Wazuh Manager installation 17 | - hosts: manager 18 | roles: 19 | - role: wazuh-ansible/roles/ansible-wazuh-manager 20 | - role: wazuh-ansible/roles/ansible-filebeat-oss # Filebeat ni o'rnatish 21 | become: yes 22 | become_user: root 23 | vars: 24 | wazuh_manager_config: 25 | connection: 26 | - type: 'secure' 27 | port: '1514' 28 | protocol: 'tcp' 29 | queue_size: 131072 30 | api: 31 | https: 'yes' 32 | cluster: 33 | disable: 'no' 34 | node_name: 'master' 35 | node_type: 'master' 36 | key: 'your-cluster-key' 37 | nodes: 38 | - "{{ hostvars.manager.private_ip }}" 39 | hidden: 'no' 40 | wazuh_api_users: 41 | - username: custom-user 42 | password: SecretPassword1! 43 | filebeat_output_indexer_hosts: 44 | - "{{ hostvars.indexer.private_ip }}" 45 | 46 | # Wazuh Dashboard installation 47 | - hosts: dashboard 48 | roles: 49 | - role: wazuh-ansible/roles/wazuh-dashboard 50 | become: yes 51 | become_user: root 52 | vars: 53 | wazuh_api_credentials: 54 | - id: default 55 | url: https://{{ hostvars.manager.private_ip }} 56 | port: 55000 57 | username: custom-user 58 | password: SecretPassword1! 59 | dashboard_node_name: 'dashboard-node' -------------------------------------------------------------------------------- /Ansible/zabbix/README.md: -------------------------------------------------------------------------------- 1 | How to Run These Playbooks: 2 | Install Zabbix Server: 3 | 4 | ```bash 5 | ansible-galaxy collection install ansible.posix 6 | ansible-galaxy collection install community.general 7 | ansible-galaxy collection install ansible.netcommon 8 | ansible-galaxy collection install community.zabbix 9 | ``` 10 | 11 | ```bash 12 | ansible-playbook -i inventory.ini zabbix_server.yml 13 | ``` 14 | Install Zabbix Agents: 15 | 16 | ```bash 17 | ansible-playbook -i inventory.ini zabbix_agent.yml 18 | ``` -------------------------------------------------------------------------------- /Ansible/zabbix/inventory.ini: -------------------------------------------------------------------------------- 1 | [zabbix_server] 2 | zabbix-server ansible_host=167.172.139.139 ansible_user=root 3 | 4 | [zabbix_agents] 5 | agent1 ansible_host=167.71.105.192 ansible_user=root 6 | agent2 ansible_host=161.35.3.173 ansible_user=root 7 | agent3 ansible_host=167.172.139.139 ansible_user=root 8 | -------------------------------------------------------------------------------- /Ansible/zabbix/vars.yml: -------------------------------------------------------------------------------- 1 | zabbix_server_db: mysql 2 | zabbix_server_dbhost: localhost 3 | zabbix_server_dbname: zabbix 4 | zabbix_server_dbuser: zabbix 5 | zabbix_server_dbpassword: kshak92scka 6 | zabbix_agent_server: 167.172.139.139 7 | zabbix_agent_interfaces: 8 | - type: "agent" 9 | main: 1 10 | useip: 1 11 | ip: "{{ ansible_host }}" -------------------------------------------------------------------------------- /Ansible/zabbix/zabbix-hosts-management.yml: -------------------------------------------------------------------------------- 1 | - name: Zabbix Hosts Management 2 | hosts: localhost 3 | vars_files: 4 | - vars.yml 5 | tasks: 6 | - name: Create host groups 7 | community.zabbix.zabbix_group: 8 | host_group: "Production Servers" 9 | 10 | - name: Create hosts in Zabbix 11 | community.zabbix.zabbix_host: 12 | host_name: "{{ inventory_hostname }}" 13 | host_groups: "Production Servers" 14 | interfaces: 15 | - type: 1 16 | main: 1 17 | useip: true 18 | ip: "{{ ansible_host }}" 19 | dns: "" 20 | port: 10050 -------------------------------------------------------------------------------- /Ansible/zabbix/zabbix-templates-management.yml: -------------------------------------------------------------------------------- 1 | - name: Zabbix Templates Management 2 | hosts: localhost 3 | vars_files: 4 | - vars.yml 5 | tasks: 6 | - name: Link Zabbix template 7 | community.zabbix.zabbix_host: 8 | host_name: "{{ inventory_hostname }}" 9 | link_templates: "Linux by Zabbix agent" -------------------------------------------------------------------------------- /Ansible/zabbix/zabbix_agent.yml: -------------------------------------------------------------------------------- 1 | - name: Install and Configure Zabbix Agents 2 | hosts: zabbix_agents 3 | become: true 4 | vars_files: 5 | - vars.yml 6 | tasks: 7 | - name: Install Zabbix agent 8 | community.zabbix.zabbix_agent: 9 | zabbix_agent_server: "{{ zabbix_server_ip }}" 10 | zabbix_agent_listenport: "{{ zabbix_agent_listenport }}" 11 | zabbix_agent_tlsconnect: "{{ zabbix_agent_tlsconnect }}" 12 | zabbix_agent_tlsaccept: "{{ zabbix_agent_tlsaccept }}" 13 | 14 | - name: Start Zabbix agent service 15 | ansible.builtin.service: 16 | name: zabbix-agent 17 | state: started 18 | enabled: true -------------------------------------------------------------------------------- /Ansible/zabbix/zabbix_server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Zabbix Server 3 | hosts: zabbix_server 4 | become: yes 5 | vars_files: 6 | - vars.yml 7 | roles: 8 | - role: community.zabbix.zabbix_server -------------------------------------------------------------------------------- /Ansible/zookeeper/README.md: -------------------------------------------------------------------------------- 1 | ```bash 2 | ansible-galaxy role install sleighzy.zookeeper 3 | ``` -------------------------------------------------------------------------------- /Ansible/zookeeper/install.yml: -------------------------------------------------------------------------------- 1 | - hosts: zookeeper-nodes 2 | become: true 3 | vars: 4 | zookeeper_mirror: "https://dlcdn.apache.org/zookeeper" 5 | zookeeper_version: "3.9.2" 6 | zookeeper_package: "apache-zookeeper-{{ zookeeper_version }}-bin.tar.gz" 7 | zookeeper_group: "zookeeper" 8 | zookeeper_user: "zookeeper" 9 | zookeeper_root_dir: "/usr/share" 10 | zookeeper_install_dir: "{{ zookeeper_root_dir }}/apache-zookeeper-{{zookeeper_version}}" 11 | zookeeper_dir: "{{ zookeeper_root_dir }}/zookeeper" 12 | zookeeper_data_dir: "/var/lib/zookeeper" 13 | zookeeper_log_dir: "/var/log/zookeeper" 14 | zookeeper_data_log_dir: "/var/lib/zookeeper" 15 | zookeeper_client_port: 2181 16 | zookeeper_leader_port: 2888 17 | zookeeper_election_port: 3888 18 | zookeeper_firewalld: false 19 | zookeeper_config_params: 20 | tickTime: 2000 21 | initLimit: 10 22 | syncLimit: 5 23 | autopurge.snapRetainCount: 3 24 | autopurge.purgeInterval: 1 25 | clientPortAddress: 0.0.0.0 26 | zookeeper_servers_use_inventory_hostname: true 27 | 28 | roles: 29 | - sleighzy.zookeeper -------------------------------------------------------------------------------- /Ansible/zookeeper/inventory.ini: -------------------------------------------------------------------------------- 1 | [zookeeper-nodes] 2 | rockylinux ansible_host=137.184.143.123 ansible_user=root zookeeper_id=1 3 | debian ansible_host=165.22.3.122 ansible_user=root zookeeper_id=2 4 | ubuntu ansible_host=159.223.131.6 ansible_user=root zookeeper_id=3 -------------------------------------------------------------------------------- /Ansible/zookeeper/java_8_11.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: java_servers 3 | become: yes 4 | tasks: 5 | - name: Install Java 8 or Java 11 on RHEL-based systems 6 | include_role: 7 | name: geerlingguy.java 8 | vars: 9 | java_packages: 10 | - java-1.8.0-openjdk 11 | - java-11-openjdk 12 | when: ansible_os_family == 'RedHat' 13 | 14 | - name: Install Java 8 or Java 11 on Debian-based systems 15 | include_role: 16 | name: geerlingguy.java 17 | vars: 18 | java_packages: 19 | - openjdk-11-jdk 20 | when: ansible_os_family == 'Debian' 21 | 22 | - name: Install Java 8 or Java 11 on Ubuntu systems 23 | include_role: 24 | name: geerlingguy.java 25 | vars: 26 | java_packages: 27 | - openjdk-8-jdk 28 | - openjdk-11-jdk 29 | when: ansible_distribution == 'Ubuntu' -------------------------------------------------------------------------------- /Terraform/GCP/enable-gcp-services/main.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = var.project_id 3 | region = var.region 4 | } 5 | 6 | resource "google_project_service" "enable_services" { 7 | for_each = { for service, enabled in var.gcp_services : service => enabled if enabled } 8 | project = var.project_id 9 | service = each.key 10 | disable_on_destroy = true 11 | disable_dependent_services = true 12 | } 13 | 14 | output "enabled_services" { 15 | value = [for service in google_project_service.enable_services : service.service] 16 | } -------------------------------------------------------------------------------- /Terraform/GCP/enable-gcp-services/terraform.tfvars: -------------------------------------------------------------------------------- 1 | project_id = "labaratoriya" 2 | region = "us-central1" 3 | 4 | gcp_services = { 5 | "compute.googleapis.com" = true, 6 | "container.googleapis.com" = true, 7 | "containerregistry.googleapis.com" = true, 8 | "bigquery.googleapis.com" = true, 9 | "sqladmin.googleapis.com" = true 10 | } -------------------------------------------------------------------------------- /Terraform/GCP/enable-gcp-services/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | description = "The ID of the GCP project" 3 | type = string 4 | } 5 | 6 | variable "region" { 7 | description = "The region of the GCP project" 8 | type = string 9 | default = "us-central1" 10 | } 11 | 12 | variable "gcp_services" { 13 | description = "List of GCP services to enable or disable" 14 | type = map(bool) 15 | default = { 16 | "compute.googleapis.com" = true, # Compute Engine 17 | "container.googleapis.com" = true, # Kubernetes Engine 18 | "containerregistry.googleapis.com" = true, # Container Registry 19 | "bigquery.googleapis.com" = true, # BigQuery 20 | "sqladmin.googleapis.com" = true # Cloud SQL 21 | } 22 | } -------------------------------------------------------------------------------- /Terraform/GCP/firewall/main.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = var.project_id 3 | region = var.region 4 | } 5 | 6 | # Using existing VPC network (no need to create a new one) 7 | data "google_compute_network" "existing_vpc_network" { 8 | name = var.network_name # Your existing network name 9 | } 10 | 11 | # Create a firewall rule for HTTP/HTTPS ports (using dynamic ports) 12 | resource "google_compute_firewall" "allow_http_https" { 13 | name = "allow-http-https" 14 | network = data.google_compute_network.existing_vpc_network.self_link 15 | 16 | allow { 17 | protocol = "tcp" 18 | ports = var.allowed_ports 19 | } 20 | 21 | source_ranges = var.source_ranges 22 | target_tags = ["web-server"] # Only for specific VM instances 23 | } 24 | 25 | # Create a firewall rule for SSH (using dynamic SSH ports) 26 | resource "google_compute_firewall" "allow_ssh" { 27 | name = "allow-ssh" 28 | network = data.google_compute_network.existing_vpc_network.self_link 29 | 30 | allow { 31 | protocol = "tcp" 32 | ports = var.ssh_ports 33 | } 34 | 35 | source_ranges = var.source_ranges 36 | target_tags = ["ssh-access"] # Specific tag for SSH access 37 | } 38 | 39 | # # Create a Compute Engine VM instance 40 | # resource "google_compute_instance" "default" { 41 | # name = var.instance_name 42 | # machine_type = var.instance_type 43 | # zone = var.zone 44 | 45 | # boot_disk { 46 | # initialize_params { 47 | # image = "debian-cloud/debian-11" # Hardcoded OS image 48 | # } 49 | # } 50 | 51 | # network_interface { 52 | # network = data.google_compute_network.existing_vpc_network.self_link 53 | 54 | # access_config { 55 | # # This provides a public IP for the VM 56 | # } 57 | # } 58 | 59 | # tags = ["web-server", "ssh-access"] # Static tags 60 | # } 61 | -------------------------------------------------------------------------------- /Terraform/GCP/firewall/terraform.tfvars: -------------------------------------------------------------------------------- 1 | project_id = "labaratoriya" 2 | region = "us-central1" 3 | zone = "us-central1-a" 4 | network_name = "default" 5 | allowed_ports = ["8080", "443"] # Allowed ports for firewall rules 6 | ssh_ports = ["22"] 7 | source_ranges = ["0.0.0.0/0"] # Allow from all sources 8 | # instance_name = "custom-vm" 9 | # instance_type = "n1-standard-1" 10 | # os_image = "ubuntu-os-cloud/ubuntu-2004-lts" 11 | # instance_tags = ["web-server", "ssh-access"] 12 | -------------------------------------------------------------------------------- /Terraform/GCP/gcp-project-creation/main.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | credentials = file(var.credentials_file) 3 | project = var.project_id 4 | region = var.region 5 | } 6 | 7 | # GCP loyihasini yaratish 8 | resource "google_project" "my_project" { 9 | name = var.project_name 10 | project_id = var.project_id 11 | # org_id = var.organization_id 12 | billing_account = var.billing_account 13 | } 14 | 15 | # Xizmatlarni yoqish (optional) 16 | resource "google_project_service" "enable_services" { 17 | for_each = toset(var.services_to_enable) 18 | project = google_project.my_project.project_id 19 | service = each.key 20 | } 21 | -------------------------------------------------------------------------------- /Terraform/GCP/gcp-project-creation/outputs.tf: -------------------------------------------------------------------------------- 1 | output "project_id" { 2 | description = "The project ID of the newly created project" 3 | value = google_project.my_project.project_id 4 | } 5 | 6 | output "enabled_services" { 7 | value = [for service in google_project_service.enable_services : service.id] 8 | } -------------------------------------------------------------------------------- /Terraform/GCP/gcp-project-creation/terraform.tfvars: -------------------------------------------------------------------------------- 1 | credentials_file = "/home/ismoilovdev/Downloads/labaratoriya-a339e7260ed3.json" 2 | project_name = "expriment2" 3 | project_id = "expriment2" 4 | # organization_id = "0" 5 | billing_account = "01CDC2-DE3C4B-CD9E77" 6 | region = "us-central1" 7 | services_to_enable = [ 8 | "compute.googleapis.com", 9 | "container.googleapis.com" 10 | ] -------------------------------------------------------------------------------- /Terraform/GCP/gcp-project-creation/variables.tf: -------------------------------------------------------------------------------- 1 | variable "credentials_file" { 2 | description = "Path to the service account credentials file" 3 | type = string 4 | } 5 | 6 | variable "project_name" { 7 | description = "The name of the GCP project" 8 | type = string 9 | } 10 | 11 | variable "project_id" { 12 | description = "The unique ID for the GCP project" 13 | type = string 14 | } 15 | 16 | # variable "organization_id" { 17 | # description = "The organization ID where the project will be created" 18 | # type = string 19 | # } 20 | 21 | variable "billing_account" { 22 | description = "The billing account ID to link with the project" 23 | type = string 24 | } 25 | 26 | variable "region" { 27 | description = "The GCP region" 28 | type = string 29 | default = "us-central1" 30 | } 31 | 32 | variable "services_to_enable" { 33 | description = "List of services to enable after project creation" 34 | type = list(string) 35 | default = [ 36 | "compute.googleapis.com", # Compute Engine 37 | "container.googleapis.com" # Kubernetes Engine 38 | ] 39 | } -------------------------------------------------------------------------------- /Terraform/GCP/vm-template/README.md: -------------------------------------------------------------------------------- 1 | # GCP VM Terraform Template 2 | 3 | This repository contains a Terraform template to create 3 Virtual Machines (VMs) on Google Cloud Platform (GCP). 4 | 5 | ## Features 🌟 6 | 7 | - **Creates 3 VMs** - Dynamically assigns names, disk sizes, and machine types to each VM. 8 | - **Ubuntu OS** - Uses Ubuntu 20.04 Focal OS image for the VMs. 9 | - **Auto Disk Delete** - Disks are automatically deleted when the VM is deleted. 10 | - **SSH Keys** - Connect to the VMs via SSH keys. 11 | 12 | ## Usage 📘 13 | 14 | Follow these steps to use this template in your project: 15 | 16 | **Initialize Terraform:** 17 | 18 | ```bash 19 | terraform init 20 | ``` 21 | **Review the Terraform plan:** 22 | 23 | ```bash 24 | terraform plan 25 | ``` 26 | Apply the Terraform configuration to create the resources: 27 | 28 | ```bash 29 | terraform apply 30 | ``` 31 | 32 | Destroy 33 | 34 | ```bash 35 | terraform destroy 36 | ``` -------------------------------------------------------------------------------- /Terraform/GCP/vm-template/main.tf: -------------------------------------------------------------------------------- 1 | provider "google" { 2 | project = var.project 3 | region = var.region 4 | } 5 | 6 | resource "google_compute_instance" "custom-vm" { 7 | count = 3 8 | name = var.vm_names[count.index] 9 | 10 | machine_type = var.machine_types[count.index] 11 | 12 | boot_disk { 13 | auto_delete = true 14 | initialize_params { 15 | image = var.os_image 16 | size = var.disk_size[count.index] 17 | type = "pd-balanced" 18 | } 19 | } 20 | 21 | network_interface { 22 | access_config { 23 | network_tier = "PREMIUM" 24 | } 25 | 26 | subnetwork = var.subnetwork 27 | } 28 | 29 | metadata = { 30 | ssh-keys = var.ssh_keys 31 | } 32 | 33 | zone = var.zone 34 | } -------------------------------------------------------------------------------- /Terraform/GCP/vm-template/terraform.tfvars: -------------------------------------------------------------------------------- 1 | project = "labaratoriya" 2 | region = "us-central1" 3 | vm_names = ["config-server1", "config-server2", "config-server3"] 4 | machine_types = ["e2-medium", "e2-medium", "e2-medium"] 5 | disk_size = [50, 50, 50] 6 | zone = "us-central1-a" 7 | subnetwork = "projects/labaratoriya/regions/us-central1/subnetworks/default" 8 | ssh_keys = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOsX4I3rwJr+/NL3aPA7rIS/4/XtlnJRIpn/0C9T3os0 ismoilovdev@vivobook" 9 | os_image = "projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20240830" -------------------------------------------------------------------------------- /Terraform/GCP/vm-template/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project" { 2 | type = string 3 | default = "labaratoriya" 4 | } 5 | 6 | variable "region" { 7 | type = string 8 | default = "us-central1" 9 | } 10 | 11 | variable "vm_names" { 12 | type = list(string) 13 | default = ["config-server1", "config-server2", "config-server3"] 14 | } 15 | 16 | variable "machine_types" { 17 | type = list(string) 18 | default = ["e2-medium", "e2-medium", "e2-medium"] 19 | } 20 | 21 | variable "disk_size" { 22 | type = list(number) 23 | default = [50, 50, 50] 24 | } 25 | 26 | variable "zone" { 27 | type = string 28 | default = "us-central1-a" 29 | } 30 | 31 | variable "subnetwork" { 32 | type = string 33 | default = "projects/labaratoriya/regions/us-central1/subnetworks/default" 34 | } 35 | 36 | variable "ssh_keys" { 37 | type = string 38 | description = "SSH public key for accessing VMs" 39 | default = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOsX4I3rwJr+/NL3aPA7rIS/4/XtlnJRIpn/0C9T3os0 ismoilovdev@vivobook" 40 | } 41 | 42 | variable "os_image" { 43 | type = string 44 | description = "The OS image to use for the VMs" 45 | default = "projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20240830" 46 | } -------------------------------------------------------------------------------- /Terraform/GCP/vpc-network/README.md: -------------------------------------------------------------------------------- 1 | # GCP VPC Network Setup with Terraform 2 | 3 | ![Terraform](https://img.shields.io/badge/Terraform-0.14%2B-brightgreen) ![GCP](https://img.shields.io/badge/GCP-Google%20Cloud-orange) 4 | 5 | ## 📋 Prerequisites 6 | 7 | Before you begin, ensure you have the following: 8 | 9 | - **Terraform**: Install [Terraform](https://www.terraform.io/downloads.html) version 0.14 or later. 10 | - **GCP Account**: A Google Cloud Platform account with permissions to create resources. 11 | - **gcloud CLI**: Install the [Google Cloud SDK](https://cloud.google.com/sdk/docs/install) and authenticate using: 12 | 13 | ```bash 14 | gcloud auth login 15 | ``` 16 | 17 | ## 🚀 Usage 18 | ### 1.Update Variables: 19 | Edit the `terraform.tfvars` file to set your project ID, region, and other parameters. 20 | 21 | ### 2.Initialize Terraform: 22 | Run the following command to initialize the Terraform configuration: 23 | ```bash 24 | terraform init 25 | ``` 26 | ### 3.Plan the Deployment: 27 | Check what resources Terraform will create: 28 | 29 | ```bash 30 | terraform plan 31 | ``` 32 | ### 4.Apply the Configuration: 33 | Deploy the resources to GCP: 34 | 35 | ```bash 36 | terraform apply 37 | ``` 38 | Confirm the action by typing `yes` when prompted. 39 | 40 | ### 3 Verify Resources: 41 | Log in to the Google Cloud Console and navigate to the VPC network section to verify that your VPC and subnets have been created. 42 | 43 | ## 📄 Resources 44 | * **VPC Network:** A custom VPC network created with specified MTU and routing mode. 45 | * **Subnets:** One public subnet configured with CIDR range `10.0.0.0/24`. 46 | * **Firewall Rules:** 47 | * **Allow Custom Protocols:** Allows all traffic from `10.0.0.0/24.` 48 | * **Allow ICMP:** Allows ICMP traffic from anywhere. 49 | * **Allow RDP:** Allows RDP traffic (TCP 3389) from anywhere. 50 | * **Allow SSH:** Allows SSH traffic (TCP 22) from anywhere. -------------------------------------------------------------------------------- /Terraform/GCP/vpc-network/terraform.tfvars: -------------------------------------------------------------------------------- 1 | project_id = "labaratoriya" 2 | region = "asia-east1" 3 | vpc_name = "my-vpc" 4 | public_subnet_cidr = "10.0.0.0/24" 5 | custom_source_range = "10.0.0.0/24" 6 | icmp_source_range = "0.0.0.0/0" 7 | rdp_source_range = "0.0.0.0/0" 8 | ssh_source_range = "0.0.0.0/0" 9 | -------------------------------------------------------------------------------- /Terraform/GCP/vpc-network/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | description = "GCP project ID" 3 | type = string 4 | } 5 | 6 | variable "region" { 7 | description = "The GCP region" 8 | type = string 9 | } 10 | 11 | variable "vpc_name" { 12 | description = "Name of the VPC network" 13 | type = string 14 | } 15 | 16 | variable "public_subnet_cidr" { 17 | description = "CIDR range for the public subnet" 18 | type = string 19 | } 20 | 21 | variable "mtu" { 22 | description = "MTU size for the VPC network" 23 | type = number 24 | default = 1460 25 | } 26 | 27 | variable "routing_mode" { 28 | description = "Routing mode for the VPC network" 29 | type = string 30 | default = "REGIONAL" 31 | } 32 | 33 | variable "firewall_priority" { 34 | description = "Priority for firewall rules" 35 | type = number 36 | default = 65534 37 | } 38 | 39 | variable "custom_source_range" { 40 | description = "Source range for custom firewall rule" 41 | type = string 42 | } 43 | 44 | variable "icmp_source_range" { 45 | description = "Source range for ICMP firewall rule" 46 | type = string 47 | } 48 | 49 | variable "rdp_source_range" { 50 | description = "Source range for RDP firewall rule" 51 | type = string 52 | } 53 | 54 | variable "ssh_source_range" { 55 | description = "Source range for SSH firewall rule" 56 | type = string 57 | } --------------------------------------------------------------------------------