├── CONTRIBUTING.md ├── LICENSE.txt ├── README.md ├── SECURITY.md ├── docs ├── files │ ├── Picture 9.jpg │ ├── Picture1.jpg │ ├── Picture10.jpg │ ├── Picture11.jpg │ ├── Picture12.jpg │ ├── Picture13.jpg │ ├── Picture14.jpg │ ├── Picture15.jpg │ ├── Picture16.jpg │ ├── Picture17.jpg │ ├── Picture19.png │ ├── Picture2.jpg │ ├── Picture20.png │ ├── Picture21.png │ ├── Picture22.png │ ├── Picture23.png │ ├── Picture24.png │ ├── Picture25.png │ ├── Picture3.jpg │ ├── Picture4.jpg │ ├── Picture5.jpg │ ├── Picture6.jpg │ ├── Picture7.jpg │ ├── Picture8.jpg │ ├── Picture9.jpg │ ├── ocne01.png │ ├── ocne02.png │ ├── ocne03.png │ ├── ocne04.png │ ├── ocne05.png │ ├── test │ ├── vault1.png │ └── vault2.png ├── index.html └── test ├── playbooks ├── .gitkeep ├── AIDE │ ├── Readme.md │ ├── aide_create_new_baseline.yml │ ├── checkaide.yaml │ ├── installaide.yaml │ └── remove_aide.yaml ├── BTRFS │ ├── Readme.txt │ ├── create_adhoc_btrfs_snapshot.yaml │ ├── create_btrfs_snapshot_and_update.yaml │ ├── delete_btrfs_snapshot.yaml │ ├── list_btrfs_snapshots.yaml │ ├── rename_subvolume.yaml │ └── rollback_btrfs_snapshot.yaml ├── KSPLICE │ ├── README.md │ ├── collections │ │ └── requirements.yml │ ├── known-exploit-detection.yml │ ├── ksplice-uptrack-check.yml │ ├── ksplice-uptrack-offline.yml │ └── ksplice-uptrack-online.yml ├── Leapp │ ├── README.md │ ├── group_vars │ │ └── vars.yml │ ├── leapp_prepare.yml │ ├── leapp_preupgrade.yml │ ├── leapp_upgrade.yml │ └── post_upgrade.yml ├── OCI │ ├── Readme.txt │ ├── create_always_free_autonomous_database.yaml │ ├── list_buckets.yaml │ ├── list_oci_compartment.yaml │ └── oci_create_instance.yaml ├── OCNE │ ├── README.md │ ├── collections │ │ └── requirements.yml │ ├── deploy-mod-istio.yml │ ├── deploy-mod-metallb.yml │ ├── deploy-mod-ociccm-example.yml │ ├── deploy-mod-olm.yml │ ├── files │ │ ├── metallb-config.yaml │ │ ├── ocne-environment-example.yaml │ │ └── ocne-environment-ha-example.yaml │ ├── group_vars │ │ └── all-example.yml │ ├── inventories │ │ └── hosts-example.ini │ ├── ocne-downscale-cluster.yml │ ├── ocne-quick-install.yml │ └── ocne-upscale-cluster.yml ├── OLAM │ ├── .gitignore │ ├── cluster-plus-hop-node │ │ ├── .gitignore │ │ ├── README.md │ │ ├── ansible.cfg │ │ ├── group_vars │ │ │ └── all.yml.example │ │ ├── install.yml │ │ ├── inventory │ │ │ ├── hosts.ini.example │ │ │ ├── hosts.ini.example-4nodes │ │ │ └── local.ini │ │ ├── pingtest.yml │ │ ├── requirements.yml │ │ └── templates │ │ │ ├── nginx.conf.tpl │ │ │ └── receptor.conf.tpl │ └── single-node │ │ ├── .gitignore │ │ ├── README.md │ │ ├── ansible.cfg │ │ ├── group_vars │ │ └── all.yml.example │ │ ├── install.yml │ │ ├── inventory │ │ ├── hosts.ini.example │ │ └── local.ini │ │ ├── pingtest.yml │ │ ├── requirements.yml │ │ └── templates │ │ ├── nginx.conf.tpl │ │ └── receptor.conf.tpl ├── OLVM │ ├── README.md │ ├── default_vars.yml │ ├── inventory │ │ └── hosts-example.ini │ ├── olvm_clone_vm.yml │ ├── olvm_create_multiple_vms.yml │ ├── olvm_create_one_vm.yml │ ├── olvm_delete_vm.yml │ ├── olvm_list_vminfo.yml │ ├── olvm_migrate_vm.yml │ ├── olvm_rename_vm.yml │ └── ovirt_list_resources.yaml ├── OL_Admin │ ├── Readme.txt │ ├── adduser.yml │ ├── auditd.yml │ ├── collections │ │ └── requirements.yml │ ├── hello-world.yml │ ├── iptables-httpd.yaml │ ├── update_ol.yml │ └── vnc_install_configure.yaml └── STIG │ ├── README.md │ ├── collections │ └── requirements.yml │ ├── ol8-playbook-stig.yml │ └── openscap.yml └── templates └── vncserver-service.j2 /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to this repository 2 | 3 | We welcome your contributions! There are multiple ways to contribute. 4 | 5 | ## Opening issues 6 | 7 | For bugs or enhancement requests, please file a GitHub issue unless it's 8 | security related. When filing a bug remember that the better written the bug is, 9 | the more likely it is to be fixed. If you think you've found a security 10 | vulnerability, do not raise a GitHub issue and follow the instructions in our 11 | [security policy](./SECURITY.md). 12 | 13 | ## Contributing code 14 | 15 | We welcome your code contributions. Before submitting code via a pull request, 16 | you will need to have signed the [Oracle Contributor Agreement][OCA] (OCA) and 17 | your commits need to include the following line using the name and e-mail 18 | address you used to sign the OCA: 19 | 20 | ```text 21 | Signed-off-by: Your Name 22 | ``` 23 | 24 | This can be automatically added to pull requests by committing with `--sign-off` 25 | or `-s`, e.g. 26 | 27 | ```text 28 | git commit --signoff 29 | ``` 30 | 31 | Only pull requests from committers that can be verified as having signed the OCA 32 | can be accepted. 33 | 34 | ## Pull request process 35 | 36 | 1. Ensure there is an issue created to track and discuss the fix or enhancement 37 | you intend to submit. 38 | 1. Fork this repository 39 | 1. Create a branch in your fork to implement the changes. We recommend using 40 | the issue number as part of your branch name, e.g. `1234-fixes` 41 | 1. Ensure that any documentation is updated with the changes that are required 42 | by your change. 43 | 1. Ensure that any samples are updated if the base image has been changed. 44 | 1. Submit the pull request. *Do not leave the pull request blank*. Explain exactly 45 | what your changes are meant to do and provide simple steps on how to validate 46 | your changes. Ensure that you reference the issue you created as well. 47 | 1. We will assign the pull request to 2-3 people for review before it is merged. 48 | 49 | ## Code of conduct 50 | 51 | Follow the [Golden Rule](https://en.wikipedia.org/wiki/Golden_Rule). If you'd 52 | like more specific guidelines, see the [Contributor Covenant Code of Conduct][COC]. 53 | 54 | [OCA]: https://oca.opensource.oracle.com 55 | [COC]: https://www.contributor-covenant.org/version/1/4/code-of-conduct/ 56 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2021, 2023 Oracle and/or its affiliates. 2 | 3 | The Universal Permissive License (UPL), Version 1.0 4 | 5 | Subject to the condition set forth below, permission is hereby granted to any 6 | person obtaining a copy of this software, associated documentation and/or data 7 | (collectively the "Software"), free of charge and under any and all copyright 8 | rights in the Software, and any and all patent rights owned or freely 9 | licensable by each licensor hereunder covering either (i) the unmodified 10 | Software as contributed to or provided by such licensor, or (ii) the Larger 11 | Works (as defined below), to deal in both 12 | 13 | (a) the Software, and 14 | (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if 15 | one is included with the Software (each a "Larger Work" to which the Software 16 | is contributed by such licensors), 17 | 18 | without restriction, including without limitation the rights to copy, create 19 | derivative works of, display, perform, and distribute the Software and make, 20 | use, sell, offer for sale, import, export, have made, and have sold the 21 | Software and the Larger Work(s), and to sublicense the foregoing rights on 22 | either these or other terms. 23 | 24 | This license is subject to the following condition: 25 | The above copyright notice and either this complete permission notice or at 26 | a minimum a reference to the UPL must be included in all copies or 27 | substantial portions of the Software. 28 | 29 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 30 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 31 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 32 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 33 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 34 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 | SOFTWARE. 36 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible Playbooks for Oracle Linux 2 | 3 | This repository is a collection of Ansible playbooks/code examples that helps automating any kind of time consuming process or operation usually executed manually. 4 | Code examples are mainly focused on automation for Oracle products with the option to leverage those with Oracle Linux Automation Manager and Oracle Linux Automation Engine. 5 | 6 | # What is Oracle Linux Automation Manager/Engine? 7 | 8 | Oracle Linux Automation Manager and Oracle Linux Automation Engine are the latest additions to the Oracle Linux operating environment. 9 | Together, these new capabilities provide a cost effective, powerful, web-based automation engine with reporting, credentialed role-based access control, work flow automation, and job scheduling framework for Oracle Linux customers that need modern provisioning, deployment, configuration management, and task automation. 10 | Oracle Linux Automation Manager and Engine, based upon the open source AWX and Ansible projects, respectively, are included with an Oracle Linux Premier Support subscription. Oracle Linux Premier Support customers already using or evaluating AWX or Ansible can run these technologies fully supported, at no additional cost, on Oracle Linux. 11 | 12 | This repository will contain example ansible playbook code. 13 | 14 | ## Getting Started 15 | 16 | Clone this repository `git clone https://github.com/oracle-samples/ansible-playbooks.git` 17 | 18 | - [Check the getting started OLAM documentation](https://docs.oracle.com/en/operating-systems/oracle-linux-automation-manager/index.html). 19 | 20 | ## Contributing 21 | 22 | This project welcomes contributions from the community. Before submitting a pull 23 | request, please [review our contribution guide](./CONTRIBUTING.md). 24 | 25 | ## Help 26 | 27 | - Project Owner: Avi Miller ([@Djelibeybi](https://github.com/Djelibeybi)) 28 | - Project Admin: Monica S ([@Monicashivakumar](https://github.com/Monicashivakumar)) 29 | 30 | ## Security 31 | 32 | Please consult the [security guide](./SECURITY.md) for our responsible security 33 | vulnerability disclosure process. 34 | 35 | ## License 36 | 37 | Copyright (c) 2021, 2023 Oracle and/or its affiliates. 38 | Released under the Universal Permissive License v1.0 as shown at . 39 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Reporting security vulnerabilities 2 | 3 | Oracle values the independent security research community and believes that 4 | responsible disclosure of security vulnerabilities helps us ensure the security 5 | and privacy of all our users. 6 | 7 | Please do NOT raise a GitHub Issue to report a security vulnerability. If you 8 | believe you have found a security vulnerability, please submit a report to 9 | [secalert_us@oracle.com][1] preferably with a proof of concept. Please review 10 | some additional information on [how to report security vulnerabilities to Oracle][2]. 11 | We encourage people who contact Oracle Security to use email encryption using 12 | [our encryption key][3]. 13 | 14 | We ask that you do not use other channels or contact the project maintainers 15 | directly. 16 | 17 | Non-vulnerability related security issues including ideas for new or improved 18 | security features are welcome on GitHub Issues. 19 | 20 | ## Security updates, alerts and bulletins 21 | 22 | Security updates will be released on a regular cadence. Many of our projects 23 | will typically release security fixes in conjunction with the 24 | Oracle Critical Patch Update program. Additional 25 | information, including past advisories, is available on our [security alerts][4] 26 | page. 27 | 28 | ## Security-related information 29 | 30 | We will provide security related information such as a threat model, considerations 31 | for secure use, or any known security issues in our documentation. Please note 32 | that labs and sample code are intended to demonstrate a concept and may not be 33 | sufficiently hardened for production use. 34 | 35 | [1]: mailto:secalert_us@oracle.com 36 | [2]: https://www.oracle.com/corporate/security-practices/assurance/vulnerability/reporting.html 37 | [3]: https://www.oracle.com/security-alerts/encryptionkey.html 38 | [4]: https://www.oracle.com/security-alerts/ 39 | -------------------------------------------------------------------------------- /docs/files/Picture 9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture 9.jpg -------------------------------------------------------------------------------- /docs/files/Picture1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture1.jpg -------------------------------------------------------------------------------- /docs/files/Picture10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture10.jpg -------------------------------------------------------------------------------- /docs/files/Picture11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture11.jpg -------------------------------------------------------------------------------- /docs/files/Picture12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture12.jpg -------------------------------------------------------------------------------- /docs/files/Picture13.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture13.jpg -------------------------------------------------------------------------------- /docs/files/Picture14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture14.jpg -------------------------------------------------------------------------------- /docs/files/Picture15.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture15.jpg -------------------------------------------------------------------------------- /docs/files/Picture16.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture16.jpg -------------------------------------------------------------------------------- /docs/files/Picture17.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture17.jpg -------------------------------------------------------------------------------- /docs/files/Picture19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture19.png -------------------------------------------------------------------------------- /docs/files/Picture2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture2.jpg -------------------------------------------------------------------------------- /docs/files/Picture20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture20.png -------------------------------------------------------------------------------- /docs/files/Picture21.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture21.png -------------------------------------------------------------------------------- /docs/files/Picture22.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture22.png -------------------------------------------------------------------------------- /docs/files/Picture23.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture23.png -------------------------------------------------------------------------------- /docs/files/Picture24.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture24.png -------------------------------------------------------------------------------- /docs/files/Picture25.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture25.png -------------------------------------------------------------------------------- /docs/files/Picture3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture3.jpg -------------------------------------------------------------------------------- /docs/files/Picture4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture4.jpg -------------------------------------------------------------------------------- /docs/files/Picture5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture5.jpg -------------------------------------------------------------------------------- /docs/files/Picture6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture6.jpg -------------------------------------------------------------------------------- /docs/files/Picture7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture7.jpg -------------------------------------------------------------------------------- /docs/files/Picture8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture8.jpg -------------------------------------------------------------------------------- /docs/files/Picture9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/Picture9.jpg -------------------------------------------------------------------------------- /docs/files/ocne01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/ocne01.png -------------------------------------------------------------------------------- /docs/files/ocne02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/ocne02.png -------------------------------------------------------------------------------- /docs/files/ocne03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/ocne03.png -------------------------------------------------------------------------------- /docs/files/ocne04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/ocne04.png -------------------------------------------------------------------------------- /docs/files/ocne05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/ocne05.png -------------------------------------------------------------------------------- /docs/files/test: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/files/vault1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/vault1.png -------------------------------------------------------------------------------- /docs/files/vault2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/docs/files/vault2.png -------------------------------------------------------------------------------- /docs/test: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /playbooks/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/playbooks/.gitkeep -------------------------------------------------------------------------------- /playbooks/AIDE/Readme.md: -------------------------------------------------------------------------------- 1 | # AIDE - Advanced Intrusion Detection Environment 2 | 3 | Advanced Intrusion Detection Environment (AIDE) is an application that uses various tools to detect changes to particular files on a system and report on them so that you can maintain baseline file integrity and detect unauthorized changes and potential tootkits. 4 | AIDE takes a "snapshot" of the state of the system, this "snapshot" is used to build a database. When an administrator wants to run an integrity test, AIDE compares the database against the current status of the system. Should a change have happened to the system between the snapshot creation and the test, AIDE will detect it and report it 5 | 6 | # Playbooks 7 | 8 | `installaide.yaml`: 9 | 10 | The install and configure playbook will perform the following: 11 | - Become the superuser. 12 | - Check if the AIDE database exists. This is needed for idempotency, and will fail if the AIDE database exists. 13 | - Install the AIDE package. 14 | - Initialize AIDE, create a baseline, and enable the database. 15 | 16 | `checkaide.yaml`: 17 | 18 | The check playbook which runs on an Oracle Linux 8 host will perform the following: 19 | - Become the superuser. 20 | - Run the aide check command and report the result. 21 | - If no differences are found, then report no differences and pass the job. 22 | - If differences are found, then report differences and fail the job. 23 | 24 | `aide_create_new_baseline.yaml`: 25 | 26 | A new service requirement may need multiple hosts to have additional software installed and configured, following this installation and configuration any subsequent AIDE checks will fail. The create new baseline playbook will perform the following: 27 | - Become the superuser. 28 | - Run the initialize aide command. 29 | - Re-enable the database. 30 | 31 | `remove_aide.yaml`: 32 | 33 | If it is necessary to remove the AIDE configuration from multiple hosts, then the remove aide playbook which runs on an Oracle Linux 8 host will perform the following: 34 | - Become the superuser. 35 | - Remove the AIDE package. 36 | - Clean up the AIDE file system. 37 | 38 | For more details on Advanced Intrusion Detection Environment with Oracle Linux Automation Manager, refer to the [Technical Paper](https://www.oracle.com/a/ocom/docs/linux/using-advanced-intrusion-detection-environment.pdf). 39 | -------------------------------------------------------------------------------- /playbooks/AIDE/aide_create_new_baseline.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook to install & start HTTPD and configure an iptables based firewall 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | 13 | --- 14 | - hosts: all 15 | become: yes 16 | tasks: 17 | - name: Initialise aide 18 | command: aide --init 19 | - name: Enable Database for aide 20 | command: mv /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz 21 | -------------------------------------------------------------------------------- /playbooks/AIDE/checkaide.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook to install & start HTTPD and configure an iptables based firewall 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | 13 | --- 14 | - hosts: all 15 | become: yes 16 | tasks: 17 | - name: Gather aide state 18 | block: 19 | - name: Run aide check 20 | command: aide --check 21 | register: result 22 | - name: Report Aide OK 23 | debug: 24 | msg: AIDE found NO differences between database and filesystem. Looks okay!! 25 | when: result.rc == 0 26 | rescue: 27 | - name: Report Aide Error 28 | debug: 29 | msg: AIDE found differences between database and filesystem!! 30 | - name: Force a failure 31 | command: /bin/false 32 | -------------------------------------------------------------------------------- /playbooks/AIDE/installaide.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook to install AIDE 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | 13 | --- 14 | - hosts: all 15 | become: yes 16 | tasks: 17 | - name: Check if the aide database exists 18 | stat: 19 | path: /var/lib/aide/aide.db.gz 20 | register: p 21 | - name: Fail if aide database exists 22 | fail: 23 | msg: The aide database exists, therefore aide is installed, we need to exit 24 | when: p.stat.exists 25 | - name: Install aide package 26 | yum: 27 | name: aide 28 | state: present 29 | - name: Initialise aide 30 | command: aide --init 31 | register: command_output 32 | - debug: var=command_output 33 | - name: Enable Database for aide 34 | command: mv /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz 35 | -------------------------------------------------------------------------------- /playbooks/AIDE/remove_aide.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook to install & start HTTPD and configure an iptables based firewall 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | 13 | --- 14 | - hosts: all 15 | become: yes 16 | tasks: 17 | - name: Remove aide package 18 | yum: 19 | name: aide 20 | state: absent 21 | - name: Clean up aide filesystem 22 | file: 23 | path: /var/lib/aide 24 | state: absent 25 | -------------------------------------------------------------------------------- /playbooks/BTRFS/Readme.txt: -------------------------------------------------------------------------------- 1 | This directory contains playbooks for BTRFS snapshots and OLAM. 2 | -------------------------------------------------------------------------------- /playbooks/BTRFS/create_adhoc_btrfs_snapshot.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook to create an adhoc BTRFS snapshot 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | 13 | --- 14 | - hosts: all 15 | become: yes 16 | 17 | tasks: 18 | - name: Gather uname output to determine if system is running Oracle Linux 8 UEK 19 | command: uname -r 20 | register: uname_contents 21 | 22 | - name: Fail if system is not running Oracle Linux 8 UEK 23 | fail: 24 | msg: System is not running Oracle Linux 8 UEK, we need to exit 25 | when: uname_contents.stdout.find('el8uek') == -1 26 | 27 | - name: Gather btrfs output for the root filesystem to determine if system if running btrfs 28 | command: stat -f --format="%T" / 29 | register: btrfs_contents 30 | 31 | - name: Fail if system is not using btrfs for the root filesystem 32 | fail: 33 | msg: System is not using btrfs for the root filesystem, we need to exit 34 | when: btrfs_contents.stdout.find('btrfs') == -1 35 | 36 | - name: Check and fail if /mnt is already mounted 37 | shell: mountpoint /mnt 38 | register: mount 39 | ignore_errors: True 40 | 41 | - name: Fail if /mnt is already mounted 42 | fail: 43 | msg: The /mnt filesystem, is already mounted we need to exit 44 | when: mount.rc == 0 45 | 46 | - name: Check that the /mnt filesystem exists, if not we will create it 47 | file: 48 | path: /mnt 49 | state: directory 50 | mode: '0755' 51 | 52 | - name: Get the subvolume id for the root filesystem 53 | shell: btrfs subvolume list / | awk 'NR == 1 {print $7}' 54 | register: subvolid 55 | 56 | - set_fact: 57 | subvolid_string: "{{ subvolid.stdout }}" 58 | 59 | - debug: 60 | msg: "Current subvolume id for root = {{ subvolid_string }}" 61 | 62 | - name: Get the disk device that is supporting the root filesystem 63 | shell: df -hT | grep /$ | awk '{print $1}' 64 | register: diskdevice 65 | 66 | - set_fact: 67 | diskdevice_string: "{{ diskdevice.stdout }}" 68 | 69 | - debug: 70 | msg: "Current disk device = {{ diskdevice_string }}" 71 | 72 | - name: Mount the root filesystem on /mnt 73 | shell: mount -o subvolid="{{ subvolid_string }}" "{{ diskdevice_string }}" /mnt 74 | args: 75 | warn: false # set warn=false to prevent warning 76 | ignore_errors: True 77 | 78 | - name: Check the /mnt/snapshots directory exists, if not we will create it 79 | file: 80 | path: /mnt/snapshots 81 | state: directory 82 | mode: '0755' 83 | 84 | - name: Take a snapshot of the root filesystem with a date and time stamp 85 | shell: | 86 | cd /mnt 87 | btrfs subvolume snapshot root snapshots/"{{ snapshot_name }}" 88 | 89 | - name: Unmount /mnt 90 | mount: 91 | path: /mnt 92 | state: unmounted 93 | -------------------------------------------------------------------------------- /playbooks/BTRFS/create_btrfs_snapshot_and_update.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook create a BTRFS snapshot then dnf update 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | 13 | --- 14 | - hosts: all 15 | become: yes 16 | 17 | tasks: 18 | - name: Gather uname output to determine if system is running Oracle Linux 8 UEK 19 | command: uname -r 20 | register: uname_contents 21 | 22 | - name: Fail if system is not running Oracle Linux 8 UEK 23 | fail: 24 | msg: System is not running Oracle Linux 8 UEK, we need to exit 25 | when: uname_contents.stdout.find('el8uek') == -1 26 | 27 | - name: Gather btrfs output for the root filesystem to determine if system if running btrfs 28 | command: stat -f --format="%T" / 29 | register: btrfs_contents 30 | 31 | - name: Fail if system is not using btrfs for the root filesystem 32 | fail: 33 | msg: System is not using btrfs for the root filesystem, we need to exit 34 | when: btrfs_contents.stdout.find('btrfs') == -1 35 | 36 | - name: Check and fail if /mnt is already mounted 37 | shell: mountpoint /mnt 38 | register: mount 39 | ignore_errors: True 40 | 41 | - name: Fail if /mnt is already mounted 42 | fail: 43 | msg: The /mnt filesystem, is already mounted we need to exit 44 | when: mount.rc == 0 45 | 46 | - name: Check that the /mnt filesystem exists, if not we will create it 47 | file: 48 | path: /mnt 49 | state: directory 50 | mode: '0755' 51 | 52 | - name: Get the top level subvolume id for the root filesystem 53 | shell: btrfs subvolume list / | awk 'NR == 1 {print $7}' 54 | register: top_subvolid 55 | 56 | - set_fact: 57 | top_subvolid_string: "{{ top_subvolid.stdout }}" 58 | 59 | - debug: 60 | msg: "Current subvolume id for root = {{ top_subvolid_string }}" 61 | 62 | - name: Get the disk device that is supporting the root filesystem 63 | shell: df -hT | grep /$ | awk '{print $1}' 64 | register: diskdevice 65 | 66 | - set_fact: 67 | diskdevice_string: "{{ diskdevice.stdout }}" 68 | 69 | - debug: 70 | msg: "Current disk device = {{ diskdevice_string }}" 71 | 72 | - name: Mount the root filesystem on /mnt 73 | shell: mount -o subvolid="{{ top_subvolid_string }}" "{{ diskdevice_string }}" /mnt 74 | args: 75 | warn: false # set warn=false to prevent warning 76 | ignore_errors: True 77 | 78 | - name: Check the /mnt/snapshots directory exists, if not we will create it 79 | file: 80 | path: /mnt/snapshots 81 | state: directory 82 | mode: '0755' 83 | 84 | - name: Take a snapshot of the root filesystem 85 | shell: | 86 | cd /mnt 87 | btrfs subvolume snapshot root snapshots/"{{ snapshot_name }}" 88 | 89 | - name: Unmount /mnt 90 | mount: 91 | path: /mnt 92 | state: unmounted 93 | 94 | - name: Install yum-utils if not present as we need it for auto reboot 95 | ansible.builtin.dnf: 96 | name: yum-utils 97 | state: latest 98 | when: ansible_distribution == 'OracleLinux' and (ansible_facts['distribution_version'] is version('8', '>=')) 99 | 100 | - name: update all packages for Oracle Linux 8 101 | ansible.builtin.dnf: 102 | name: "*" 103 | state: latest 104 | when: ansible_distribution == 'OracleLinux' and (ansible_facts['distribution_version'] is version('8', '>=')) 105 | 106 | - name: check if reboot required 107 | command: /usr/bin/needs-restarting -r 108 | register: reboot_required 109 | ignore_errors: yes 110 | changed_when: false 111 | failed_when: reboot_required.rc == 2 112 | when: ansible_distribution == 'OracleLinux' 113 | 114 | - debug: 115 | msg: "Checking if reboot is required: {{ reboot_required.stdout }}" 116 | 117 | - name: reboot (if needed) to apply latest kernel and updates 118 | reboot: 119 | when: ansible_distribution == 'OracleLinux' and reboot_required.rc == 1 120 | -------------------------------------------------------------------------------- /playbooks/BTRFS/delete_btrfs_snapshot.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook to delete a BTRFS snapshot 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | 13 | --- 14 | - hosts: all 15 | become: yes 16 | 17 | tasks: 18 | - name: Gather uname output to determine if system is running Oracle Linux 8 UEK 19 | command: uname -r 20 | register: uname_contents 21 | 22 | - name: Fail if system is not running Oracle Linux 8 UEK 23 | fail: 24 | msg: System is not running Oracle Linux 8 UEK, we need to exit 25 | when: uname_contents.stdout.find('el8uek') == -1 26 | 27 | - name: Gather btrfs output for the root filesystem to determine if system if running btrfs 28 | command: stat -f --format="%T" / 29 | register: btrfs_contents 30 | 31 | - name: Fail if system is not using btrfs for the root filesystem 32 | fail: 33 | msg: System is not using btrfs for the root filesystem, we need to exit 34 | when: btrfs_contents.stdout.find('btrfs') == -1 35 | 36 | - name: Check and fail if /mnt is already mounted 37 | shell: mountpoint /mnt 38 | register: mount 39 | ignore_errors: True 40 | 41 | - name: Fail if /mnt is already mounted 42 | fail: 43 | msg: The /mnt filesystem, is already mounted we need to exit 44 | when: mount.rc == 0 45 | 46 | - name: Check the default subvolume 47 | command: btrfs subvolume get-default / 48 | register: snapshot_output 49 | 50 | - name: Fail if the default subvolume is currently booted 51 | fail: 52 | msg: System is booted using the target snapshot for deletion, we need to exit 53 | when: 'snapshot_name | string in snapshot_output.stdout' 54 | 55 | - name: Check that the /mnt filesystem exists, if not we will create it 56 | file: 57 | path: /mnt 58 | state: directory 59 | mode: '0755' 60 | 61 | - name: Get the subvolume id for the root filesystem 62 | shell: btrfs subvolume list / | awk 'NR == 1 {print $7}' 63 | register: subvolid 64 | 65 | - set_fact: 66 | subvolid_string: "{{ subvolid.stdout }}" 67 | 68 | - debug: 69 | msg: "Current subvolume id for root = {{ subvolid_string }}" 70 | 71 | - name: Get the disk device that is supporting the root filesystem 72 | shell: df -hT | grep /$ | awk '{print $1}' 73 | register: diskdevice 74 | 75 | - set_fact: 76 | diskdevice_string: "{{ diskdevice.stdout }}" 77 | 78 | - debug: 79 | msg: "Current disk device = {{ diskdevice_string }}" 80 | 81 | - name: Mount the root filesystem on /mnt 82 | shell: mount -o subvolid="{{ subvolid_string }}" "{{ diskdevice_string }}" /mnt 83 | args: 84 | warn: false # set warn=false to prevent warning 85 | ignore_errors: True 86 | 87 | - name: Delete the snapshot 88 | shell: | 89 | cd /mnt 90 | btrfs subvolume delete snapshots/"{{ snapshot_name }}" 91 | 92 | - name: Unmount /mnt 93 | mount: 94 | path: /mnt 95 | state: unmounted 96 | -------------------------------------------------------------------------------- /playbooks/BTRFS/list_btrfs_snapshots.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook to list BTRFS snapshot and subvolume ID's 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | 13 | --- 14 | - hosts: all 15 | become: yes 16 | 17 | tasks: 18 | - name: Gather uname output to determine if system is running Oracle Linux 8 UEK 19 | command: uname -r 20 | register: uname_contents 21 | 22 | - name: Fail if system is not running Oracle Linux 8 UEK 23 | fail: 24 | msg: System is not running Oracle Linux 8 UEK, we need to exit 25 | when: uname_contents.stdout.find('el8uek') == -1 26 | 27 | - name: Gather btrfs output for the root filesystem to determine if system if running btrfs 28 | command: stat -f --format="%T" / 29 | register: btrfs_contents 30 | 31 | - name: Fail if system is not using btrfs for the root filesystem 32 | fail: 33 | msg: System is not using btrfs for the root filesystem, we need to exit 34 | when: btrfs_contents.stdout.find('btrfs') == -1 35 | 36 | - name: Print out the btrfs subvolume for gathering the snapshot ID's 37 | command: btrfs subvolume list / 38 | register: btrfs_subvol_output 39 | 40 | - name: Print out the brtfs subvolumes for gathering the ID's 41 | debug: 42 | msg: "{{ item }}" 43 | loop: "{{ btrfs_subvol_output.stdout_lines }}" 44 | 45 | - name: Print out the current btrfs default subvolume id 46 | shell: btrfs subvolume get-default / 47 | register: btrfs_subvol_output 48 | 49 | - debug: 50 | msg: "Current btrfs default subvolume (current boot subvolume) details {{ btrfs_subvol_output.stdout_lines }}" 51 | -------------------------------------------------------------------------------- /playbooks/BTRFS/rename_subvolume.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook to rename a BTRFS snapshot 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | 13 | --- 14 | - hosts: all 15 | become: yes 16 | 17 | tasks: 18 | - name: Gather uname output to determine if system is running Oracle Linux 8 UEK 19 | command: uname -r 20 | register: uname_contents 21 | 22 | - name: Fail if system is not running Oracle Linux 8 UEK 23 | fail: 24 | msg: System is not running Oracle Linux 8 UEK, we need to exit 25 | when: uname_contents.stdout.find('el8uek') == -1 26 | 27 | - name: Gather btrfs output for the root filesystem to determine if system if running btrfs 28 | command: stat -f --format="%T" / 29 | register: btrfs_contents 30 | 31 | - name: Fail if system is not using btrfs for the root filesystem 32 | fail: 33 | msg: System is not using btrfs for the root filesystem, we need to exit 34 | when: btrfs_contents.stdout.find('btrfs') == -1 35 | 36 | - name: Check and fail if /mnt is already mounted 37 | shell: mountpoint /mnt 38 | register: mount 39 | ignore_errors: True 40 | 41 | - name: Fail if /mnt is already mounted 42 | fail: 43 | msg: The /mnt filesystem, is already mounted we need to exit 44 | when: mount.rc == 0 45 | 46 | - name: Check that the /mnt filesystem exists, if not we will create it 47 | file: 48 | path: /mnt 49 | state: directory 50 | mode: '0755' 51 | 52 | - name: Get the subvolume id for the root filesystem 53 | shell: btrfs subvolume list / | awk 'NR == 1 {print $7}' 54 | register: subvolid 55 | 56 | - set_fact: 57 | subvolid_string: "{{ subvolid.stdout }}" 58 | 59 | - debug: 60 | msg: "Current subvolume id for root = {{ subvolid_string }}" 61 | 62 | - name: Get the disk device that is supporting the root filesystem 63 | shell: df -hT | grep /$ | awk '{print $1}' 64 | register: diskdevice 65 | 66 | - set_fact: 67 | diskdevice_string: "{{ diskdevice.stdout }}" 68 | 69 | - debug: 70 | msg: "Current disk device = {{ diskdevice_string }}" 71 | 72 | - name: Mount the root filesystem on /mnt 73 | shell: mount -o subvolid="{{ subvolid_string }}" "{{ diskdevice_string }}" /mnt 74 | args: 75 | warn: false # set warn=false to prevent warning 76 | ignore_errors: True 77 | 78 | - name: Check if the existing directory exists 79 | stat: path=/mnt/{{ existing }} 80 | register: existing_string 81 | 82 | - name: Fail if the existing directory does not exist 83 | fail: 84 | msg: The chosen directory to rename does not exist, we need to exit 85 | when: existing_string.stat.exists == false 86 | 87 | - name: Rename the snapshot or subvolume 88 | shell: | 89 | cd /mnt 90 | mv "{{ existing }}" "{{ new }}" 91 | 92 | - name: Unmount /mnt 93 | mount: 94 | path: /mnt 95 | state: unmounted 96 | -------------------------------------------------------------------------------- /playbooks/BTRFS/rollback_btrfs_snapshot.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook to rollback / boot from a BTRFS snapshot 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | 13 | --- 14 | - hosts: all 15 | become: yes 16 | 17 | tasks: 18 | - name: Gather uname output to determine if system is running Oracle Linux 8 UEK 19 | command: uname -r 20 | register: uname_contents 21 | 22 | - name: Fail if system is not running Oracle Linux 8 UEK 23 | fail: 24 | msg: System is not running Oracle Linux 8 UEK, we need to exit 25 | when: uname_contents.stdout.find('el8uek') == -1 26 | 27 | - name: Gather btrfs output for the root filesystem to determine if system if running btrfs 28 | command: stat -f --format="%T" / 29 | register: btrfs_contents 30 | 31 | - name: Fail if system is not using btrfs for the root filesystem 32 | fail: 33 | msg: System is not using btrfs for the root filesystem, we need to exit 34 | when: btrfs_contents.stdout.find('btrfs') == -1 35 | 36 | - name: Set the alternative root subvolume as default 37 | command: btrfs subvolume set-default "{{ id }}" / 38 | register: remount_command_output 39 | - debug: var=remount_command_output 40 | 41 | - name: Prepare to boot from the alternative root subvolume 42 | shell: | 43 | current_grub_kernel=$(sudo grubby --default-kernel); 44 | grubby --remove-args="rootflags=subvol=root" --update-kernel $current_grub_kernel 45 | 46 | - name: reboot to apply alternative root snapshot 47 | reboot: 48 | -------------------------------------------------------------------------------- /playbooks/KSPLICE/README.md: -------------------------------------------------------------------------------- 1 | # Ksplice - Oracle Linux Ksplice Ansible Playbooks 2 | 3 | 4 | A collection of playbooks to install Ksplice zero downtime patching on Oracle Linux servers. Ksplice is an Oracle Linux Premier Support service. 5 | 6 | Ksplice is available in online or offline deployment scenarios for kernel patching and/or userland patching. Ksplice Uptrack is the kernel patching client, while Ksplice Enhanced is the kernel and userland patching client. 7 | 8 | # Ksplice Uptrack check 9 | 10 | Playbook: `ksplice-uptrack-check.yml` 11 | 12 | Ksplice Uptrack check is a playbook to scan for Common Vulnerabilities and Exposures (CVE). By using the option `save_output == "yes"` in the Job template, the playbook saves the output in HTML format in the `/tmp` directory of the target server. If you run the playbook with the CLI, the “-e” or “–extra-vars” command line parameter for ansible-playbook should be used. 13 | 14 | # Ksplice Uptrack offline client 15 | 16 | Playbook: `ksplice-uptrack-offline.yml` 17 | 18 | The offline version of the Ksplice Uptrack Client removes the requirement that a server on your intranet has a direct connection to the Oracle Uptrack server or ULN. Prior to configuring an offline client, you must set up a local ULN mirror with Ksplice repositories synced from ULN Network. 19 | 20 | The URL of the local Ksplice repository on the ULN mirror must be configured as variable in the playbook. 21 | 22 | When the playbook is finished, Ksplice is configured to do daily checks for new Ksplice updates (`/etc/cron.daily/uptrack-updates`). When new updates are available they are applied automatically. Configure `skip_apply_after_pkg_install` to `true` to turn of automatic patch upgrades. 23 | 24 | ## Variables in Ksplice Uptrack offline playbook 25 | 26 | | Variable | Default | Description | 27 | | -------- | -------- | ----------- | 28 | | baseurl_ksplice | | URL to the local ULN mirror, eg http://localyum.example.com/repo/ol{{ ol_version }}_x86_64_ksplice 29 | | install_on_reboot | yes | Automatically install earlier applied updates at boot time 30 | | upgrade_on_reboot | yes | Automatically install all (earlier applied and new) available updates at boot time 31 | | skip_apply_after_pkg_install | false | Set to `true` to avoid automatically running `uptrack-upgrade --all -y` when the uptrack-updates are installed through yum/dnf 32 | 33 | # Ksplice Uptrack online client 34 | 35 | Playbook: `ksplice-uptrack-online.yml` 36 | 37 | The Ksplice Uptrack online playbook requires a server to be registered at the ULN Network. Also the server needs to be subscribed to the Ksplice for Oracle Linux (eg `ol7_x86_64_ksplice` for Oracle Linux 7) ULN channel. 38 | 39 | ## Variables in Ksplice Uptrack online playbook 40 | 41 | | Variable | Default | Description | 42 | | -------- | -------- | ----------- | 43 | | install_on_reboot | yes | Automatically install earlier applied updates at boot time 44 | | upgrade_on_reboot | yes | Automatically install all (earlier applied and new) available updates at boot time 45 | | autoinstall | yes | Enables automatic installation of Ksplice updates, set to `no` for manual Ksplice updates 46 | | https_proxy | None | Proxy to use when accessing the Ksplice Uptrack server, the proxy must support making HTTPS connections 47 | | first_update | true | Runs the first `uptrack-upgrade` at the end of the playbook, set to false when you want to run the first `uptrack-upgrade` manually 48 | 49 | 50 | -------------------------------------------------------------------------------- /playbooks/KSPLICE/collections/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: community.general 4 | -------------------------------------------------------------------------------- /playbooks/KSPLICE/known-exploit-detection.yml: -------------------------------------------------------------------------------- 1 | - name: Known exploit detection 2 | hosts: all 3 | 4 | tasks: 5 | - name: Installing ksplice-known-exploit-detection pacakge 6 | package: 7 | name: ksplice-known-exploit-detection 8 | state: present 9 | 10 | - name: Editing /etc/uptrack/uptrack.conf 11 | shell: grep -q "^enabled = yes$" /etc/uptrack/uptrack.conf || echo -e "[Known-Exploit-Detection]\nenabled = yes" >> /etc/uptrack/uptrack.conf 12 | 13 | - name: Activating the feature. 14 | shell: uptrack-upgrade -y 15 | 16 | - name: Scan /var/log/messages for attempts 17 | shell: "grep -E '\\' /var/log/messages| wc -l" 18 | register: exploit_count 19 | ignore_errors: yes 20 | 21 | 22 | 23 | - name: Print count to output 24 | debug: 25 | msg: "Detected {{ exploit_count.stdout }} attempts to breach the system" 26 | when: exploit_count.stdout|int > 0 27 | 28 | - name: Print details 29 | shell: "grep -E '\\' /var/log/messages" 30 | register: cve_msgs 31 | ignore_errors: yes 32 | 33 | - debug: 34 | msg: "{{ cve_msgs.stdout_lines }}" 35 | -------------------------------------------------------------------------------- /playbooks/KSPLICE/ksplice-uptrack-check.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2023 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook to scan for Common Vulnerabilities and Exposures (CVE) 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | 13 | --- 14 | - name: Uptrack Check 15 | hosts: all 16 | vars_prompt: 17 | - name: "save_output" 18 | prompt: "Do you want to save the output? (yes/no)" 19 | private: no 20 | 21 | tasks: 22 | - name: Scanning for vulnerabilities. 23 | shell: 24 | cmd: "(uname -s; uname -m; uname -r; uname -v) | \ 25 | curl https://api-ksplice.oracle.com/api/1/update-list/ \ 26 | -L -H 'Accept: text/text' --data-binary @- " 27 | register: command_output 28 | when: save_output == "no" 29 | 30 | - name: Listing the Vulnerabilities 31 | debug: 32 | msg: "{{ command_output.stdout_lines }}" 33 | when: save_output == "no" 34 | 35 | - name: Scanning for vulnerabilities. The report will be available at /tmp/uptrack-check.html on the remote systems 36 | shell: 37 | cmd: "(uname -s; uname -m; uname -r; uname -v) | \ 38 | curl https://api-ksplice.oracle.com/api/1/update-list/ \ 39 | -L -H 'Accept: text/text' --data-binary @- > /tmp/uptrack-check.txt" 40 | register: command_output 41 | when: save_output == "yes" 42 | 43 | 44 | - name: Formatting the file 45 | replace: 46 | path: /tmp/uptrack-check.txt 47 | regexp: 'CVE-(\d+)-(\d+)' 48 | replace: '
CVE-\1-\2' 49 | backup: yes 50 | when: save_output == "yes" 51 | 52 | 53 | - name: Save to HTML file 54 | command: mv /tmp/uptrack-check.txt /tmp/uptrack-check.html 55 | when: save_output == "yes" 56 | 57 | - name: Deleting txt file 58 | command: find /tmp -name 'uptrack-check.txt*' -delete 59 | 60 | -------------------------------------------------------------------------------- /playbooks/KSPLICE/ksplice-uptrack-offline.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: Ansible playbook to deploy the offline uptrack client 11 | # 12 | # yamllint disable 13 | --- 14 | 15 | - name: Deploy Ksplice uptrack offline client 16 | hosts: all 17 | become: yes 18 | become_method: sudo 19 | gather_facts: yes 20 | 21 | vars: 22 | ol_version: "{{ ansible_facts['distribution_major_version'] }}" 23 | i_o_r: "{{ install_on_reboot | default('yes') }}" 24 | u_o_r: "{{ upgrade_on_reboot | default('yes') }}" 25 | skip_a_a_p_i: "{{ skip_apply_after_pkg_install | default('false') }}" 26 | 27 | tasks: 28 | 29 | - name: Create repo file for local ksplice repository 30 | yum_repository: 31 | file: "local-ksplice-ol{{ ol_version }}" 32 | name: "ol{{ ol_version }}_local_ksplice" 33 | description: Ksplice for Oracle Linux $releasever ($basearch) 34 | baseurl: "{{ baseurl_ksplice }}" 35 | gpgkey: "file:///etc/pki/rpm-gpg/RPM-GPG-KEY" 36 | gpgcheck: yes 37 | enabled: yes 38 | 39 | - name: Make sure ksplice online packages are removed 40 | package: name=ksplice-tools,uptrack state=absent 41 | 42 | - name: Install ksplice uptrack offline package 43 | package: name=uptrack-offline state=present 44 | 45 | - name: Adjust install_on_reboot in uptrack.conf configuration file 46 | community.general.ini_file: 47 | dest: /etc/uptrack/uptrack.conf 48 | section: Settings 49 | option: install_on_reboot 50 | value: "{{ i_o_r }}" 51 | 52 | - name: Adjust upgrade_on_reboot in uptrack.conf configuration file 53 | ini_file: 54 | dest: /etc/uptrack/uptrack.conf 55 | section: Settings 56 | option: upgrade_on_reboot 57 | value: "{{ u_o_r }}" 58 | 59 | - name: Adjust skip_apply_after_pkg_install in uptrack.conf configuration file 60 | ini_file: 61 | dest: /etc/uptrack/uptrack.conf 62 | section: Settings 63 | option: skip_apply_after_pkg_install 64 | value: "{{ skip_a_a_p_i }}" 65 | 66 | - name: Create daily cronjob to download uptrack-updates 67 | copy: 68 | dest: "/etc/cron.daily/uptrack-updates" 69 | mode: 0755 70 | content: | 71 | #!/bin/sh 72 | yum -y install uptrack-updates-`uname -r` 73 | exit 0 74 | 75 | - name: Check for latest uptrack updates 76 | shell: /bin/sh /etc/cron.daily/uptrack-updates 77 | 78 | -------------------------------------------------------------------------------- /playbooks/KSPLICE/ksplice-uptrack-online.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: Ansible playbook to deploy the online uptrack client 11 | # Prerequisites: 12 | # - server must be registered in ULN 13 | # - olX_x86_64_ksplice must be added to subscribed ULN channels, 14 | # where X is 7,8 or 9 15 | # 16 | # yamllint disable 17 | --- 18 | 19 | - name: Deploy Ksplice uptrack online client 20 | hosts: all 21 | become: yes 22 | become_method: sudo 23 | gather_facts: yes 24 | 25 | vars: 26 | i_o_r: "{{ install_on_reboot | default('yes') }}" 27 | u_o_r: "{{ upgrade_on_reboot | default('yes') }}" 28 | autoinst: "{{ autoinstall | default('yes') }}" 29 | https_pr: "{{ https_proxy | default('None') }}" 30 | first_updt: "{{ first_update | default('true') }}" 31 | 32 | tasks: 33 | 34 | - name: Make sure ksplice offline packages are removed 35 | package: name=ksplice-tools,uptrack-offline state=absent 36 | 37 | - name: Install ksplice uptrack offline package 38 | package: name=uptrack state=present 39 | 40 | - name: Adjust install_on_reboot in uptrack.conf configuration file 41 | ini_file: 42 | dest: /etc/uptrack/uptrack.conf 43 | section: Settings 44 | option: install_on_reboot 45 | value: "{{ i_o_r }}" 46 | 47 | - name: Adjust upgrade_on_reboot in uptrack.conf configuration file 48 | ini_file: 49 | dest: /etc/uptrack/uptrack.conf 50 | section: Settings 51 | option: upgrade_on_reboot 52 | value: "{{ u_o_r }}" 53 | 54 | - name: Adjust autoinstall in uptrack.conf configuration file 55 | ini_file: 56 | dest: /etc/uptrack/uptrack.conf 57 | section: Settings 58 | option: autoinstall 59 | value: "{{ autoinst }}" 60 | 61 | - name: Adjust https_proxy in uptrack.conf configuration file 62 | ini_file: 63 | dest: /etc/uptrack/uptrack.conf 64 | section: Network 65 | option: https_proxy 66 | value: "{{ https_pr }}" 67 | 68 | - name: Update to latest Ksplice uptrack updates 69 | ansible.builtin.command: /usr/sbin/uptrack-upgrade --all -y 70 | when: first_updt == true 71 | 72 | -------------------------------------------------------------------------------- /playbooks/Leapp/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Automating Leapp Upgrades Using Oracle Linux Automation Manager 3 | 4 | With the help of the following playbooks let's understand how the process of Leapp Upgrade from Oracle Linux 7 to Oracle Linux 8 as documented in https://docs.oracle.com/en/operating-systems/oracle-linux/8/leapp/leapp-AboutLeapp.html#about-leapp can be automated: 5 | 6 | * leapp_prepare.yml: Enables leapp repositories, install the leapp related pacakges, performs a yum update and prepares the system for the leapp updgrade. 7 | * leapp_preupgrade.yml: Run the leapp Preupgrade phase, displays the inhibitors and the answer file. 8 | * leapp_upgrade.yml: Performs the upgrade and reboots the machine. 9 | * post_upgrade.yml: Checks the system and removes any residual Oracle Linux 7 packages. 10 | 11 | ## Variables 12 | 13 | | Variable | Required | Description | 14 | | -------- | -------- | ----------- | 15 | | proxy | Yes | Values: True or False. Specifies if a proxy needs to be used. 16 | | my_https_proxy | Optional | If proxy is set to true, this variable takes the input of proxy. 17 | | leapp_switch | Yes | Values : --oraclelinux or --oci 18 | 19 | ## Steps 20 | 21 | ### Oracle Linux Automation Manager 22 | 23 | * Ensure the credentials, Inventories and Projects are created. 24 | * Create a template for each of the four playbooks mentioned above. To simplify the process further, Create a Workflow template in the order leapp_prepare --> leapp_preupgrade --> leapp_upgrade --> leapp_posyupgrade. 25 | * Add the extra variables as required to each template. Example : {"leapp_swicth":"--oraclelinux"} 26 | 27 | ### Ansible 28 | 29 | * Ensure Inventory file is created/updated pointing to the target hosts. 30 | * Execute each of the playbooks with commands similar to below: 31 | ``` 32 | #ansible-playbook leapp_prepare.yml -e '{"proxy":"yes","my_https_proxy":"http://proxy:proxyport"}' 33 | #ansible-playbook leapp_preupgrade.yml -e '{"leapp_switch":"--oraclelinux","my_https_proxy":"http://proxy:proxyport"}' 34 | #ansible-playbook leapp_upgrade.yml -e '{"leapp_switch":"--oraclelinux"}' 35 | ``` 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /playbooks/Leapp/group_vars/vars.yml: -------------------------------------------------------------------------------- 1 | # Variable to specify if the proxy is being used. 2 | proxy: false 3 | 4 | #Proxy to be specified if the above variable proxy is set to true. 5 | my_https_proxy: "" 6 | 7 | # --oci for OCI Instances 8 | leapp_switch: --oraclelinux 9 | 10 | -------------------------------------------------------------------------------- /playbooks/Leapp/leapp_prepare.yml: -------------------------------------------------------------------------------- 1 | # Oracle Linux Automation Manager 2 | # 3 | # Copyright (c) 2023 Oracle and/or its affiliates. 4 | # Licensed under the Universal Permissive License v 1.0 as shown at 5 | # https://oss.oracle.com/licenses/upl. 6 | # 7 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 8 | # 9 | # Description: Ansible playbook to prepare the host for leapp upgrade. 10 | # Example : ansible-playbook leapp_prepare.yml -e '{"proxy":"yes","my_https_proxy":"http://proxy"}' 11 | 12 | 13 | --- 14 | - name: Preparing the hosts for leapp Upgrade 15 | hosts: all 16 | become: yes # Enable privilege escalation 17 | vars: 18 | proxy: "" 19 | my_https_proxy: "" 20 | 21 | tasks: 22 | 23 | - name: Display current release 24 | ansible.builtin.shell: | 25 | cat /etc/*release 26 | uname -r 27 | register: system_info 28 | 29 | - name: Print current release and kernel 30 | ansible.builtin.debug: 31 | var: system_info.stdout_lines 32 | 33 | - name: Check if Oracle Linux 34 | set_fact: 35 | is_oraclelinux: "{{ 'Oracle Linux' in system_info.stdout }}" 36 | 37 | - name: Gather package facts 38 | ansible.builtin.package_facts: 39 | 40 | - name: Display message based on package presence 41 | debug: 42 | msg: "rhn-setup package is installed. Please ensure that the system is currently not registered with ULN" 43 | when: "'rhn-setup' in ansible_facts.packages" 44 | 45 | - name: Get the date of the last system update 46 | shell: grep Updated /var/log/yum.log | tail -1 | cut -d' ' -f 1-2 47 | register: last_update_date 48 | changed_when: false 49 | 50 | - name: Display last update date and days since last update 51 | debug: 52 | msg: "The system was last updated on {{ last_update_date.stdout }}" 53 | 54 | - name: Get a list of recently updated packages 55 | shell: rpm -qa --last | head -20 56 | register: recent_updates 57 | changed_when: false 58 | 59 | - name: Display recently updated packages 60 | debug: 61 | msg: "Recently updated packages:\n{{ recent_updates.stdout }}" 62 | 63 | - name: Add proxy config to yum.conf 64 | lineinfile: 65 | path: /etc/yum.conf 66 | line: "proxy={{ my_https_proxy }}" 67 | state: present 68 | when: 69 | - proxy == "true" 70 | 71 | - name: Install mokutil 72 | become: yes 73 | yum: 74 | name: mokutil 75 | state: present 76 | 77 | - name: Install yum versionlock plugin 78 | become: yes 79 | yum: 80 | name: yum-plugin-versionlock 81 | state: present 82 | 83 | - name: Check Secure Boot status 84 | command: mokutil --sb-state 85 | register: secure_boot_output 86 | changed_when: false 87 | ignore_errors: true 88 | 89 | - name: Disable Secure Boot if enabled 90 | command: mokutil --disable-validation 91 | when: "'enabled' in secure_boot_output.stdout" 92 | changed_when: true 93 | 94 | - name: Display status after attempting to disable Secure Boot 95 | debug: 96 | msg: "Secure Boot status: {{ 'disabled' if 'enabled' in secure_boot_output.stdout else 'already disabled' }}" 97 | 98 | - name: Set locale using localectl 99 | command: localectl set-locale LANG=en_US.UTF-8 100 | 101 | 102 | - name: Clear version locks using yum 103 | command: yum versionlock clear 104 | 105 | - name: Get /var disk space usage 106 | shell: df -h /var | awk 'NR==2{print $5}' | sed 's/%//' 107 | register: var_space 108 | 109 | - name: Displaying the usage of /var 110 | debug: 111 | msg: "/var disk space usage: {{ var_space.stdout }}%" 112 | 113 | - name: Send warning if /var space usage is over 90% 114 | debug: 115 | msg: "/var disk space usage is over 90% - Current usage: {{ var_space.stdout }}%" 116 | when: var_space.stdout|int > 90 117 | 118 | - name: Performing update 119 | command: yum update -y 120 | 121 | - name: Install Leapp 122 | command: yum install -y leapp --enablerepo=ol7_leapp,ol7_latest 123 | 124 | - name: Install Leapp 125 | become: yes 126 | yum: 127 | name: leapp-upgrade 128 | state: present 129 | 130 | - name: Permit root login via ssh 131 | block: 132 | - name: Configure sshd 133 | lineinfile: 134 | path: /etc/ssh/sshd_config 135 | regexp: '^PermitRootLogin' 136 | line: PermitRootLogin yes 137 | register: permit_root_login 138 | 139 | - name: Install Cockpit packages 140 | yum: 141 | name: 142 | - cockpit 143 | state: present 144 | 145 | - name: Reboot system 146 | reboot: 147 | reboot_timeout: 300 148 | 149 | - name: Start and enable Cockpit service 150 | command: "systemctl enable --now cockpit" 151 | 152 | 153 | - name: Ensure the Cockpit port is open in the firewall 154 | command: firewall-cmd --permanent --add-service=cockpit 155 | 156 | - name: Reload Firewall Rules 157 | command: firewall-cmd --reload 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | -------------------------------------------------------------------------------- /playbooks/Leapp/leapp_preupgrade.yml: -------------------------------------------------------------------------------- 1 | # Oracle Linux Automation Manager 2 | # 3 | # Copyright (c) 2023 Oracle and/or its affiliates. 4 | # Licensed under the Universal Permissive License v 1.0 as shown at 5 | # https://oss.oracle.com/licenses/upl. 6 | # 7 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 8 | # 9 | # Description: Performs leapp upgrade and displays the Inhibitors. 10 | # If the environment is behind proxy, performs the remediation of KM Note 2820209.1 11 | 12 | 13 | --- 14 | - name: Leapp Preupgrade stage 15 | hosts: all 16 | become: yes 17 | vars: 18 | leapp_switch: "" 19 | my_https_proxy: "" 20 | 21 | 22 | tasks: 23 | 24 | - name: Run leapp pre upgrade 25 | ansible.builtin.shell: > 26 | leapp preupgrade {{ leapp_switch }} 27 | register: leapp_result 28 | failed_when: leapp_result.rc != 0 29 | changed_when: false 30 | ignore_errors: yes 31 | 32 | - name: Collect human readable report results 33 | ansible.builtin.slurp: 34 | src: /var/log/leapp/leapp-report.txt 35 | register: results_txt 36 | 37 | - name: Collect JSON report results 38 | ansible.builtin.slurp: 39 | src: /var/log/leapp/leapp-report.json 40 | register: results_json 41 | 42 | - name: Parse report results 43 | ansible.builtin.set_fact: 44 | leapp_report_txt: "{{ results_txt.content | b64decode | split('\n') }}" 45 | leapp_report_json: "{{ results_json.content | b64decode | from_json }}" 46 | 47 | - name: Check for inhibitors 48 | ansible.builtin.set_fact: 49 | upgrade_inhibited: true 50 | when: "'inhibitor' in item.flags" 51 | loop: "{{ leapp_report_json.entries }}" 52 | 53 | - name: Print lines 54 | ansible.builtin.shell: grep -i -A5 "Risk Factor" /var/log/leapp/leapp-report.txt 55 | register: risk_factor_lines 56 | 57 | - name: Display lines 58 | ansible.builtin.debug: 59 | var: risk_factor_lines.stdout_lines 60 | 61 | - name: Fetch files from remote host using fetch 62 | ansible.builtin.fetch: 63 | src: "/var/log/leapp/leapp-report.txt" 64 | dest: "/tmp/test/" 65 | flat: yes 66 | 67 | - name: Answer file 68 | ansible.builtin.command: mv /var/log/leapp/answerfile.userchoices /var/log/leapp/answerfile 69 | 70 | - name: Display contents of answerfile 71 | ansible.builtin.shell: cat /var/log/leapp/answerfile 72 | register: answerfile_content 73 | 74 | - name: Print answerfile content 75 | ansible.builtin.debug: 76 | var: answerfile_content.stdout_lines 77 | 78 | - name: Check if proxy is present in /etc/yum.conf 79 | command: grep -q '^proxy=' /etc/yum.conf 80 | ignore_errors: yes 81 | register: proxy_check 82 | 83 | - name: Comment out proxy in /etc/yum.conf 84 | replace: 85 | path: /etc/yum.conf 86 | regexp: '^(proxy=.*)$' 87 | replace: '#\1' 88 | when: proxy_check.rc == 0 89 | 90 | 91 | - name: Modify leapp-upgrade-repos-ol8.repo 92 | command: sudo sed -i '/^enabled=0.*/a proxy={{ my_https_proxy }}' /etc/yum.repos.d/leapp-upgrade-repos-ol8.repo 93 | when: proxy_check.rc == 0 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | -------------------------------------------------------------------------------- /playbooks/Leapp/leapp_upgrade.yml: -------------------------------------------------------------------------------- 1 | # Oracle Linux Automation Manager 2 | # 3 | # Copyright (c) 2023 Oracle and/or its affiliates. 4 | # Licensed under the Universal Permissive License v 1.0 as shown at 5 | # https://oss.oracle.com/licenses/upl. 6 | # 7 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 8 | # 9 | # Description: Performs leapp upgrade and displays the Inhibitors. 10 | # Reboot timeout can be alterted 11 | 12 | 13 | --- 14 | - name: Leapp Upgrade stage 15 | hosts: all 16 | become: yes 17 | vars: 18 | leapp_switch: "" 19 | 20 | 21 | tasks: 22 | 23 | - name: Remove_pam_pkcs11_module_check 24 | ansible.builtin.command: sudo leapp answer --section remove_pam_pkcs11_module_check.confirm=True 25 | 26 | - name: Run leapp upgrade 27 | ansible.builtin.shell: > 28 | leapp upgrade {{ leapp_switch }} 29 | register: leapp_result 30 | failed_when: leapp_result.rc != 0 31 | changed_when: false 32 | ignore_errors: yes 33 | 34 | - name: Reboot if Leapp upgrade was successful 35 | ansible.builtin.reboot: 36 | reboot_timeout: 5000 # You can adjust the timeout as needed 37 | when: leapp_result.rc == 0 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /playbooks/Leapp/post_upgrade.yml: -------------------------------------------------------------------------------- 1 | # Oracle Linux Automation Manager 2 | # 3 | # Copyright (c) 2023 Oracle and/or its affiliates. 4 | # Licensed under the Universal Permissive License v 1.0 as shown at 5 | # https://oss.oracle.com/licenses/upl. 6 | # 7 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 8 | # 9 | # Description: Removes residual Oracle Linux 7 packages 10 | # 11 | 12 | 13 | - name: Post Upgrade 14 | hosts: all 15 | become: yes # Enable privilege escalation 16 | 17 | 18 | tasks: 19 | 20 | - name: Display current release 21 | ansible.builtin.shell: | 22 | cat /etc/*release 23 | uname -r 24 | register: system_info 25 | 26 | - name: Print current release and kernel 27 | ansible.builtin.debug: 28 | var: system_info.stdout_lines 29 | 30 | - name: Check for residual Oracle Linux 7 packages 31 | shell: "rpm -qa | grep el7 " 32 | register: ol7_packages 33 | changed_when: false 34 | 35 | - name: Remove residual Leapp packages 36 | command: "dnf remove -y {{ item }}" 37 | with_items: "{{ ol7_packages.stdout_lines }}" 38 | when: ol7_packages.stdout_lines | length > 0 39 | -------------------------------------------------------------------------------- /playbooks/OCI/Readme.txt: -------------------------------------------------------------------------------- 1 | This directory holds playbooks for use with Oracle Cloud Infrastructure (OCI) 2 | -------------------------------------------------------------------------------- /playbooks/OCI/create_always_free_autonomous_database.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Oracle Linux Automation Manager 4 | # 5 | # Copyright (c) 2022 Oracle and/or its affiliates. 6 | # Licensed under the Universal Permissive License v 1.0 as shown at 7 | # https://oss.oracle.com/licenses/upl. 8 | # 9 | # Description: Playbook to create an always free autonomous database OL within OCI 10 | # 11 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 12 | # 13 | # This playbook uses the following additional variables which should be configured at run time for the template, these are examples values used 14 | # in the lab: 15 | # rescue_test: false 16 | # display name: OLAM-ADB 17 | # admin_password: BEstr0ng_#11 18 | # instance_compartment: ocid1.compartment.oc1..aaaaaaaazxd6wrvcuicrgbi6cbaob4pyrr3i7i33xudheqhx6pbcesobdazq 19 | # db_name: OLAM-DB 20 | 21 | - name: OCI Autonomous Database 22 | hosts: localhost 23 | collections: 24 | - oracle.oci 25 | connection: local 26 | vars: 27 | # the following settings are inherent to Always Free ADBs and cannot be scaled 28 | data_storage_size_in_tbs: 1 # always free ADB will have 0.02 TB (20 GB) of storage but API requires value of >= 1 TB 29 | cpu_core_count: 1 30 | 31 | tasks: 32 | - block: 33 | - name: Create a new Always Free Autonomous Database 34 | oci_database_autonomous_database: 35 | compartment_id: "{{ compartment_ocid }}" 36 | cpu_core_count: "{{ cpu_core_count }}" 37 | display_name: "{{ display_name }}" 38 | admin_password: "{{ admin_password }}" 39 | db_name: "{{ db_name }}" 40 | data_storage_size_in_tbs: "{{ data_storage_size_in_tbs }}" 41 | is_free_tier: true 42 | state: 'present' 43 | register: result 44 | 45 | - set_fact: 46 | autonomous_database_id: "{{ result.autonomous_database.id }}" 47 | 48 | - assert: 49 | that: 50 | - result.autonomous_database.lifecycle_state == "AVAILABLE" 51 | 52 | - name: List All Autonomous Databases in our compartment, filtered by Display Name 53 | oci_database_autonomous_database_facts: 54 | compartment_id: "{{ compartment_ocid }}" 55 | display_name: '{{ display_name }}' 56 | register: result 57 | 58 | - name: Assert that specified Autonomous Database is listed 59 | assert: 60 | that: 61 | - result.autonomous_databases[0].display_name == display_name 62 | 63 | 64 | - name: Get our Autonomous Database's facts 65 | oci_database_autonomous_database_facts: 66 | autonomous_database_id: "{{ autonomous_database_id }}" 67 | register: result 68 | - debug: # print facts retrieved about the Autonomous Database 69 | msg: "{{result}}" 70 | - name: Assert that specified Autonomous Database is listed 71 | assert: 72 | that: 73 | - result.autonomous_databases[0].display_name == display_name 74 | 75 | - name: Fail task when variable rescue_test is set to true (for testing the rescue section) 76 | fail: 77 | msg: "Rescue debug is enabled, we will fail to test the rescue steps" 78 | when: "( rescue_test )" 79 | 80 | 81 | rescue: 82 | 83 | # Delete Autonomous Database 84 | - name: Delete Autonomous Database 85 | oci_database_autonomous_database: 86 | autonomous_database_id: "{{ autonomous_database_id }}" 87 | state: 'absent' 88 | register: result 89 | 90 | - name: Assert that specified Autonomous Database is deleted 91 | assert: 92 | that: 93 | - result.changed == True 94 | - result.autonomous_database.display_name == display_name 95 | 96 | - fail: 97 | msg: "The sample execution failed." 98 | -------------------------------------------------------------------------------- /playbooks/OCI/list_buckets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Oracle Linux Automation Manager 4 | # 5 | # Copyright (c) 2022 Oracle and/or its affiliates. 6 | # Licensed under the Universal Permissive License v 1.0 as shown at 7 | # https://oss.oracle.com/licenses/upl. 8 | # 9 | # Description: Playbook to list object storage buckets within OCI 10 | # 11 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 12 | # 13 | # This playbook uses the following additional variables which should be configured at run time for the template, these are examples values used 14 | # in the lab: 15 | # instance_compartment: ocid1.compartment.oc1..aaaaaaaazxd6wrvcuicrgbi6cbaob4pyrr3i7i33xudheqhx6pbcesobdazq 16 | 17 | - name : List summary of existing buckets in OCI object storage 18 | collections: 19 | - oracle.oci 20 | connection: local 21 | hosts: localhost 22 | tasks: 23 | - name: List bucket facts 24 | oci_object_storage_bucket_facts: 25 | namespace_name: ovm 26 | compartment_id: "{{ instance_compartment }}" 27 | register: result 28 | - name: Dump result 29 | debug: 30 | msg: '{{result}}' 31 | -------------------------------------------------------------------------------- /playbooks/OCI/list_oci_compartment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Oracle Linux Automation Manager 4 | # 5 | # Copyright (c) 2022 Oracle and/or its affiliates. 6 | # Licensed under the Universal Permissive License v 1.0 as shown at 7 | # https://oss.oracle.com/licenses/upl. 8 | # 9 | # Description: Playbook to list a compartment within OCI 10 | # 11 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 12 | # 13 | # This playbook uses the following additional variables which should be configured at run time for the template, these are examples values used 14 | # in the lab: 15 | # instance_compartment: ocid1.compartment.oc1..aaaaaaaazxd6wrvcuicrgbi6cbaob4pyrr3i7i33xudheqhx6pbcesobdazq 16 | 17 | - name: List Compartment Details 18 | collections: 19 | - oracle.oci 20 | connection: local 21 | hosts: localhost 22 | tasks: 23 | - name: Get a specific compartment 24 | oci_identity_compartment_facts: 25 | compartment_id: "{{ instance_compartment }}" 26 | register: output 27 | - name: Print compartment details 28 | debug: 29 | msg: "{{ output }}" 30 | -------------------------------------------------------------------------------- /playbooks/OCI/oci_create_instance.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Oracle Linux Automation Manager 4 | # 5 | # Copyright (c) 2022 Oracle and/or its affiliates. 6 | # Licensed under the Universal Permissive License v 1.0 as shown at 7 | # https://oss.oracle.com/licenses/upl. 8 | # 9 | # Description: Playbook to create an OL Instance within OCI 10 | # 11 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 12 | # 13 | # This playbook uses the following additional variables which should be configured at run time for the template, these are examples values used 14 | # in the lab: 15 | # rescue_test: false 16 | # instance_shape: VM.Standard2.1 17 | # instance_hostname: OLAMinstance 18 | # instance_image: ocid1.image.oc1.iad.aaaaaaaanxbmz7rm7tkopukbbahtcbcx45v5omusafwhjaenzf7tkcoq56qa 19 | # instance_ad: ergw:US-ASHBURN-AD-1 20 | # instance_compartment: ocid1.compartment.oc1..aaaaaaaazxd6wrvcuicrgbi6cbaob4pyrr3i7i33xudheqhx6pbcesobdazq 21 | # 22 | # Also, two template files are needed which exist in the templates directory, these are to enable the ingress and egress configuration for the OCI security list 23 | # Within the lab OLAM server I placed these files in: /var/lib/awx/projects/templates with awx:awx permissions for both file and directory. 24 | # These files are: egress_security_rules.yaml.j2 & ingress_security_rules.yaml.j2 25 | # An ssh directory is also needed with the public key which you should have the matching private one in order to connect to the instance as user opc 26 | 27 | - name: Launch a compute instance 28 | hosts: localhost 29 | collections: 30 | - oracle.oci 31 | vars: 32 | # common networking definitions 33 | quad_zero_route: "0.0.0.0/0" 34 | TCP_protocol: "6" 35 | SSH_port: "22" 36 | 37 | vcn_name: "OLAM-vcn" 38 | vcn_cidr_block: "10.0.0.0/16" 39 | vcn_dns_label: "OLAMlcs" 40 | 41 | ig_name: "OLAMinternetgateway" 42 | 43 | route_table_name: "OLAMroutetable" 44 | # route all internet access to our Internet Gateway 45 | route_table_rules: 46 | - cidr_block: "{{ quad_zero_route }}" 47 | network_entity_id: "{{ ig_id }}" 48 | 49 | 50 | subnet_cidr: "10.0.0.48/28" 51 | subnet_name: "OLAMsubnet" 52 | subnet_dns_label: "OLAMsubnet" 53 | securitylist_name: "OLAMsecuritylist" 54 | 55 | tasks: 56 | - block: 57 | - set_fact: 58 | SSH_AUTHORIZED_KEYS: "{{ lookup('file', './ssh/oci_id_rsa.pub') }}" 59 | 60 | - name: Create a VCN 61 | oci_network_vcn: 62 | compartment_id: "{{ instance_compartment }}" 63 | display_name: "{{ vcn_name }}" 64 | cidr_block: "{{ vcn_cidr_block }}" 65 | dns_label: "{{ vcn_dns_label }}" 66 | register: result 67 | - set_fact: 68 | vcn_id: "{{ result.vcn.id }}" 69 | 70 | - name: Create a new Internet Gateway 71 | oci_network_internet_gateway: 72 | compartment_id: "{{ instance_compartment }}" 73 | vcn_id: "{{ vcn_id }}" 74 | name: "{{ ig_name }}" 75 | is_enabled: 'yes' 76 | state: 'present' 77 | register: result 78 | - set_fact: 79 | ig_id: "{{ result.internet_gateway.id }}" 80 | 81 | - name: Create route table to connect internet gateway to the VCN 82 | oci_network_route_table: 83 | compartment_id: "{{ instance_compartment }}" 84 | vcn_id: "{{ vcn_id }}" 85 | name: "{{ route_table_name }}" 86 | route_rules: "{{ route_table_rules }}" 87 | state: 'present' 88 | register: result 89 | - set_fact: 90 | rt_id: "{{ result.route_table.id }}" 91 | 92 | - name: create ingress rules yaml body 93 | template: src=./templates/ingress_security_rules.yaml.j2 dest=/tmp/instance_ingress_security_rules.yaml 94 | 95 | - name: create egress yaml body 96 | template: src=./templates/egress_security_rules.yaml.j2 dest=/tmp/instance_egress_security_rules.yaml 97 | 98 | - name: load the variables defined in the ingress rules yaml body 99 | include_vars: 100 | file: /tmp/instance_ingress_security_rules.yaml 101 | name: loaded_ingress 102 | 103 | - name: print loaded_ingress 104 | debug: 105 | msg: "loaded ingress is {{loaded_ingress}}" 106 | 107 | - name: load the variables defined in the egress rules yaml body 108 | include_vars: 109 | file: /tmp/instance_egress_security_rules.yaml 110 | name: loaded_egress 111 | 112 | - name: print loaded_egress 113 | debug: 114 | msg: "loaded egress is {{loaded_egress}}" 115 | 116 | - name: Create a security list for allowing access to public instance 117 | oci_network_security_list: 118 | name: "{{ securitylist_name }}" 119 | compartment_id: "{{ instance_compartment }}" 120 | vcn_id: '{{ vcn_id }}' 121 | ingress_security_rules: "{{ loaded_ingress.instance_ingress_security_rules }}" 122 | egress_security_rules: "{{ loaded_egress.instance_egress_security_rules }}" 123 | register: result 124 | - set_fact: 125 | instance_security_list_ocid: "{{ result.security_list.id }}" 126 | 127 | - name: Create a subnet to host the public instance. Link security_list and route_table. 128 | oci_network_subnet: 129 | availability_domain: "{{ instance_ad }}" 130 | cidr_block: "{{ subnet_cidr }}" 131 | compartment_id: "{{ instance_compartment }}" 132 | display_name: "{{ subnet_name }}" 133 | prohibit_public_ip_on_vnic: false 134 | route_table_id: "{{ rt_id }}" 135 | security_list_ids: [ "{{ instance_security_list_ocid }}" ] 136 | vcn_id: '{{ vcn_id }}' 137 | dns_label: "{{ subnet_dns_label }}" 138 | register: result 139 | - set_fact: 140 | instance_subnet_id: "{{ result.subnet.id }}" 141 | 142 | - name: Launch an instance 143 | oci_compute_instance: 144 | availability_domain: "{{ instance_ad }}" 145 | compartment_id: "{{ instance_compartment }}" 146 | name: "{{ instance_hostname }}" 147 | image_id: "{{ instance_image }}" 148 | shape: "{{ instance_shape }}" 149 | create_vnic_details: 150 | assign_public_ip: true 151 | hostname_label: "{{ instance_hostname }}" 152 | subnet_id: "{{ instance_subnet_id }}" 153 | metadata: 154 | ssh_authorized_keys: "{{ SSH_AUTHORIZED_KEYS }}" 155 | register: result 156 | 157 | - name: Print instance details 158 | debug: 159 | msg: "Launched a new instance {{ result }}" 160 | - set_fact: 161 | instance_id: "{{result.instance.id }}" 162 | 163 | - name: Get the VNIC attachment details of instance 164 | oci_compute_vnic_attachment_facts: 165 | compartment_id: "{{ instance_compartment }}" 166 | instance_id: "{{ instance_id }}" 167 | register: result 168 | 169 | - name: Get details of the VNIC 170 | oci_network_vnic_facts: 171 | id: "{{ result.vnic_attachments[0].vnic_id }}" 172 | register: result 173 | - set_fact: 174 | instance_public_ip: "{{result.vnic.public_ip}}" 175 | 176 | - name: Print the public ip of the newly launched instance 177 | debug: 178 | msg: "Public IP of launched instance {{ instance_public_ip }}, connect as user opc with the uploaded public key" 179 | 180 | - name: Fail task when variable rescue_test is set to true (for testing the rescue section) 181 | fail: 182 | msg: "Rescue debug is enabled, we will fail to test the rescue steps" 183 | when: "( rescue_test )" 184 | 185 | rescue: 186 | - name: Terminate the instance 187 | oci_compute_instance: 188 | id: "{{ instance_id }}" 189 | state: absent 190 | 191 | - name: Delete the subnet 192 | oci_network_subnet: 193 | id: "{{ instance_subnet_id }}" 194 | state: absent 195 | 196 | - name: Delete the security list 197 | oci_network_security_list: 198 | id: "{{ instance_security_list_ocid }}" 199 | state: absent 200 | 201 | - name: Delete the route table 202 | oci_network_route_table: 203 | id: "{{ rt_id }}" 204 | state: absent 205 | 206 | - name: Delete the Internet Gateway 207 | oci_network_internet_gateway: 208 | id: "{{ ig_id }}" 209 | state: absent 210 | 211 | - name: Delete the VCN 212 | oci_network_vcn: 213 | vcn_id: "{{ vcn_id }}" 214 | state: absent 215 | -------------------------------------------------------------------------------- /playbooks/OCNE/README.md: -------------------------------------------------------------------------------- 1 | # OCNE - Oracle Cloud Native Environment Ansible Playbooks 2 | 3 | The Oracle Cloud Native Environment playbooks support servers on Oracle Linux 8 with the latest version of Oracle Cloud Native Environment (OCNE). The playbooks only supports the quick install procedure with an configuration file and works with OCNE 1.7 and OCNE 1.8. 4 | 5 | These are the main playbooks to run in Ansible to install, upscale or downscale a OCNE cluster: 6 | 7 | * ocne-quick-install.yml - Installs initial OCNE environment with quick install using configuration file 8 | * ocne-upscale-cluster.yml - Add additional nodes with quick install using configuration file 9 | * ocne-downscale-cluster.yml - Remove nodes using configuration file 10 | 11 | With following playbooks you can add modules to existing OCNE environment 12 | 13 | * deploy-mod-metallb.yml - Deploys MetalLB loadbalancer 14 | * deploy-mod-ociccm.yml - Deploys the OCI-CCM module when running in Oracle OCI used for OCI loadbalancer and storage 15 | * deploy-mod-istio.yml - deploys Istio service mesh 16 | * deploy-mod-olm.yml - deploys OCNE lifecycle manager for Operators 17 | 18 | 19 | # Configuration 20 | 21 | Make sure the following configuration steps are done before running the playbooks. 22 | 23 | ## OCNE Configuration file 24 | 25 | The playbooks use the [Quick Install using Configuration File](https://docs.oracle.com/en/operating-systems/olcne/1.8/quickinstall/task-provision-config.html) installation scenario. 26 | An OCNE configuration file includes all information about the environments and modules you want to create. 27 | This file in combination with the quick install procedure of OCNE saves repeated steps in the 28 | installation process. 29 | 30 | The OCNE playbooks requires to have the OCNE configuration file downloaded from a specified download url (`env_file_url` variable) and the configuration file will be stored on the OCNE operator node in a specified file (`env_file` variable). 31 | 32 | Information on how to create a configuration file is explained in the [OCNE Platform CLI documentation](https://docs.oracle.com/en/operating-systems/olcne/1.8/olcnectl/config.html#write). 33 | 34 | ## Inventory file 35 | The Inventory file defines the hostnames and roles in the OCNE cluster. Example inventory files are provided in the `/inventories` directory. The `hosts-example.ini` file provides configuration information to deploy the initial OCNE cluster. 36 | 37 | Add the hostnames of your OCNE cluster to the following groups in the inventory file: 38 | | Variable | Required | Description | 39 | | -------- | -------- | ----------- | 40 | | ocne_op | Yes | the OCNE Operator Node 41 | | ocne_kube_control | Yes | the Kubernetes Control Plane Nodes 42 | | ocne_kube_worker | Yes | the Kubernetes Worker Nodes 43 | 44 | 45 | ## Variables 46 | The variables for the OCNE cluster are defined in the `/group_vars/all.yml` file. Example variable files are provided for an initial cluster. Below is a list of the used variables. All the variables must exist in the file, some are required others can be left empty when not in use. 47 | 48 | | Variable | Required | Description | 49 | | -------- | -------- | ----------- | 50 | | ocne_version | Optional | Either _ocne17_ or _ocne18_, if not defined _ocne18_ is default 51 | | env_file_url | Yes | URL for the OCNE configuration file 52 | | env_file | Yes | Name of OCNE configuration file 53 | | use_proxy | Yes | Set use_proxy to _true_ if the environment is behind a proxy, else set to _false_ 54 | | my_https_proxy | | Proxy details, leave empty if not using proxy 55 | | my_http_proxy | | Proxy details, leave empty if not using proxy 56 | | my_no_proxy | | Proxy details, leave empty if not using proxy 57 | | container_registry | Yes | Container registry path to get the OCNE component container images 58 | | ocne_environment | Yes | Set name for the OCNE environment 59 | | ocne_k8s | Yes | Set name of the OCNE Kubernetes module 60 | | ocne_helm | Yes | Set name of the OCNE Helm module, installed by default but only used when other modules are configured 61 | | ocne_istio | | Set name of the OCNE Istio module. Leave empty if not creating Istio module 62 | | ocne_olm | | Set name of the OCNE OLM module. Leave empty if not creating OLM module 63 | | ocne_oci | | Set name of the OCNE OCI-CCM module. Leave empty if not creating CCM module 64 | | ocne_metallb | | Set name of the OCNE MetalLB module. Leave empty if not creating CCM module 65 | 66 | ## MetalLB Load balancer module (optional) 67 | You must provide a MetalLB configuration file in the playbooks files subdirectory, the file will be copied to the on the operator node and used with the MetalLB module configuration. An example configuration file is provided, adjust to your own IP address range settings. 68 | 69 | $ cd /files 70 | $ ls -l 71 | -rw-rw-r-- 1 opc opc 98 Aug 12 12:23 metallb-config.yaml 72 | $ cat metallb-config.yaml 73 | address-pools: 74 | - name: default 75 | protocol: layer2 76 | addresses: 77 | - 192.168.178.90-192.168.178.95 78 | 79 | ## Oracle Cloud OCI-CCM module (optional) 80 | 81 | The playbook to deploy the [OCI Cloud Controller Manager (CCM)](https://github.com/oracle/oci-cloud-controller-manager) module includes your OCI cloud authentication and configuration settings. When you have added your authentication and OCI configuration information in the playbook, it is recommended to encrypt the file with an ansible vault password. The password wil be asked when you run the playbook in the CLI or in Oracle Linux Automation Manager. To encrypt the playbook: 82 | 83 | $ cp deploy-mod-ociccm-example.yml deploy-mod-ociccm.yml 84 | $ 85 | $ ansible-vault encrypt deploy-mod-ociccm.yml 86 | 87 | The following variables are required in the `deploy-mod-ociccm.yml` playbook, consult the [OCNE documentation](https://docs.oracle.com/en/operating-systems/olcne/) (Storage or Application Loadbalancers) for additional information. 88 | 89 | | Variable | Required | Description | 90 | | -------- | -------- | ----------- | 91 | | oci_region | Yes | Example: uk-london-1 92 | | oci_tenancy | Yes | Example: ocid1.tenancy.oc1..aaaaaaae..........cok7mlsa 93 | | oci_compartment | Yes | Example: ocid1.compartment.oc1..aaaaaaaa..........bmn3j6qh 94 | | oci_user | Yes | Example: ocid1.user.oc1..aaaaaaaa..........wp432ssg 95 | | oci_fingerprint | Yes | Example: 4e:69:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:cc 96 | | oci_private_key | Yes | Example: /tmp/oci_api_key.pem 97 | | oci_vcn | Yes | Example: ocid1.vcn.oc1.uk-london-1.amaaaaaa..........j5jw3iag 98 | | lb_subnet1 | Yes | Example: ocid1.subnet.oc1.uk-london-1.aaaaaaaa..........w3a75jhf 99 | 100 | ### OCI API Key 101 | For the OCI-CCM module you need to store the API key file in the playbooks files subdirectory: 102 | 103 | $ cd /files 104 | $ ls -l 105 | total 16 106 | -rw------- 1 opc opc 1730 Aug 12 13:07 oci_api_key.pem 107 | 108 | # How to use 109 | 110 | The playbooks are tested with kubernetes nodes for on-premise infrastructure as well as OCI instances in Oracle cloud. They both work from the command line by running the `ansible-playbook` command or when they are imported in Oracle Linux Automation Manager (OLAM). Example command line: 111 | 112 | $ ansible-playbook -i inventories/hosts.ini ./prepare-ocne-deployment.yml 113 | $ ansible-playbook -i inventories/hosts.ini ./deploy-ocne.yml 114 | $ ansible-playbook -i inventories/hosts.ini ./deploy-mod-ociccm.yml 115 | 116 | If you stored the playbooks as project in Oracle Linux Automation Manager (OLAM) then best thing to do is to create an inventory in the OLAM UI, add hosts and groups as described above and add the required variables to the Variables section in the inventory. 117 | -------------------------------------------------------------------------------- /playbooks/OCNE/collections/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: kubernetes.core 4 | - name: ansible.posix 5 | -------------------------------------------------------------------------------- /playbooks/OCNE/deploy-mod-istio.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: Ansible playbook to create 11 | # and install the OCNE Istio module. 12 | # 13 | # yamllint disable 14 | 15 | --- 16 | 17 | - name: Create and Deploy an Istio Module 18 | hosts: ocne_op 19 | gather_facts: false 20 | 21 | tasks: 22 | 23 | - name: Create a Istio Module (includes Prometheus and Grafana) 24 | command: olcnectl module create \ 25 | --environment-name {{ ocne_environment }} \ 26 | --module istio \ 27 | --name {{ ocne_istio }} \ 28 | --istio-helm-module {{ ocne_helm }} 29 | 30 | - name: Validate the Istio Module 31 | command: olcnectl module validate \ 32 | --environment-name {{ ocne_environment }} \ 33 | --name {{ ocne_istio }} 34 | 35 | - name: Deploy the Istio Module 36 | command: olcnectl module install \ 37 | --environment-name {{ ocne_environment }} \ 38 | --name {{ ocne_istio }} 39 | register: deploy_istio 40 | until: deploy_istio is not failed 41 | retries: 3 42 | -------------------------------------------------------------------------------- /playbooks/OCNE/deploy-mod-metallb.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: Ansible playbook to create 11 | # and install the OCNE MetalLB modules. 12 | # 13 | # yamllint disable 14 | 15 | --- 16 | 17 | - name: Setting the MetalLB Network Ports 18 | hosts: ocne_kube_worker ocne_new_kube_worker 19 | become: yes 20 | gather_facts: false 21 | 22 | tasks: 23 | 24 | - name: Open Port 7946 TCP 25 | firewalld: 26 | port: 7946/tcp 27 | permanent: yes 28 | state: enabled 29 | 30 | - name: Open Port 7946 UDP 31 | firewalld: 32 | port: 7946/udp 33 | permanent: yes 34 | state: enabled 35 | 36 | - name: Restart firewalld 37 | service: 38 | name: firewalld 39 | state: restarted 40 | enabled: yes 41 | 42 | - name: Create and Deploy an MetalLB Module 43 | hosts: ocne_op 44 | gather_facts: false 45 | 46 | tasks: 47 | 48 | - name: Copy the MetalLB configuration file 49 | copy: 50 | src: "{{ playbook_dir }}/files/metallb-config.yaml" 51 | dest: "/tmp/metallb-config.yaml" 52 | mode: 0644 53 | 54 | - name: Create a MetalLB Module 55 | command: olcnectl module create \ 56 | --environment-name {{ ocne_environment }} \ 57 | --module metallb \ 58 | --name {{ ocne_metallb }} \ 59 | --metallb-helm-module {{ ocne_helm }} \ 60 | --metallb-config /tmp/metallb-config.yaml 61 | 62 | - name: Validate the MetalLB Module 63 | command: olcnectl module validate \ 64 | --environment-name {{ ocne_environment }} \ 65 | --name {{ ocne_metallb }} 66 | 67 | - name: Deploy the MetalLB Module 68 | command: olcnectl module install \ 69 | --environment-name {{ ocne_environment }} \ 70 | --name {{ ocne_metallb }} 71 | register: deploy_metallb 72 | until: deploy_metallb is not failed 73 | retries: 3 74 | -------------------------------------------------------------------------------- /playbooks/OCNE/deploy-mod-ociccm-example.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: Ansible playbook to create 11 | # and install the OCNE OCI-CCM module. 12 | # 13 | # If you want to encrypt this file, please execute following command: 14 | # 15 | # $ cp deploy-mod-ociccm-example.yml deploy-mod-ociccm.yml 16 | # $ 17 | # $ ansible-vault encrypt deploy-mod-ociccm.yml 18 | # 19 | # yamllint disable 20 | 21 | --- 22 | 23 | - name: Setting the OCI-CM Health Check Network Ports 24 | hosts: ocne_kube_worker ocne_new_kube_worker 25 | become: yes 26 | gather_facts: false 27 | 28 | tasks: 29 | 30 | - name: Open Port 10256 TCP 31 | firewalld: 32 | port: 10256/tcp 33 | permanent: yes 34 | state: enabled 35 | 36 | - name: Restart firewalld 37 | service: 38 | name: firewalld 39 | state: restarted 40 | enabled: yes 41 | 42 | 43 | - name: Create and Deploy an OCI-CCM Module 44 | hosts: ocne_op 45 | gather_facts: false 46 | 47 | vars: 48 | oci_region: "" 49 | oci_tenancy: "" 50 | oci_compartment: "" 51 | oci_user: "" 52 | oci_fingerprint: "" 53 | oci_private_key: /tmp/oci_api_key.pem 54 | oci_vcn: "" 55 | oci_lb_subnet1: "" 56 | 57 | tasks: 58 | 59 | - name: Copy the Oracle Cloud Infrastructure API signing key 60 | copy: 61 | src: "{{ playbook_dir }}/files/oci_api_key.pem" 62 | dest: "{{oci_private_key}}" 63 | mode: 0600 64 | 65 | - name: Create an Oracle OCI-CCM Module 66 | command: olcnectl module create \ 67 | --environment-name {{ ocne_environment }} \ 68 | --module oci-ccm \ 69 | --name {{ ocne_oci }} \ 70 | --oci-ccm-helm-module {{ ocne_helm }} \ 71 | --oci-region {{ oci_region }} \ 72 | --oci-tenancy {{ oci_tenancy }} \ 73 | --oci-user {{ oci_user }} \ 74 | --oci-fingerprint {{ oci_fingerprint }} \ 75 | --oci-vcn {{ oci_vcn }} \ 76 | --oci-lb-subnet1 {{ oci_lb_subnet1 }} \ 77 | --oci-compartment {{ oci_compartment }} \ 78 | --oci-private-key {{ oci_private_key }} 79 | 80 | - name: Validate the Oracle OCI-CCM Module 81 | command: olcnectl module validate \ 82 | --environment-name {{ ocne_environment }} \ 83 | --name {{ ocne_oci }} 84 | 85 | - name: Deploy the Oracle OCI-CCM Module 86 | command: olcnectl module install \ 87 | --environment-name {{ ocne_environment }} \ 88 | --name {{ ocne_oci }} 89 | register: deploy_oci_ccm 90 | until: deploy_oci_ccm is not failed 91 | retries: 3 92 | -------------------------------------------------------------------------------- /playbooks/OCNE/deploy-mod-olm.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: Ansible playbook to create 11 | # and install OCNE Operator Lifecyle Manager modules. 12 | # 13 | # yamllint disable 14 | 15 | --- 16 | 17 | - name: Create and Deploy a Helm and OLM Module 18 | hosts: ocne_op 19 | gather_facts: false 20 | 21 | tasks: 22 | 23 | - name: Create an OLM Module 24 | command: olcnectl module create \ 25 | --environment-name {{ ocne_environment }} \ 26 | --module operator-lifecycle-manager \ 27 | --name {{ ocne_olm }} \ 28 | --olm-helm-module {{ ocne_helm }} 29 | when: 30 | - ocne_olm != "" 31 | - ocne_helm != "" 32 | 33 | - name: Deploy the OLM Module 34 | command: olcnectl module install \ 35 | --environment-name {{ ocne_environment}} \ 36 | --name {{ ocne_olm }} 37 | when: 38 | - ocne_olm != "" 39 | -------------------------------------------------------------------------------- /playbooks/OCNE/files/metallb-config.yaml: -------------------------------------------------------------------------------- 1 | address-pools: 2 | - name: default 3 | protocol: layer2 4 | addresses: 5 | - 192.168.178.90-192.168.178.95 6 | -------------------------------------------------------------------------------- /playbooks/OCNE/files/ocne-environment-example.yaml: -------------------------------------------------------------------------------- 1 | environments: 2 | - environment-name: ocne_env 3 | globals: 4 | api-server: ocne-operator.example.com:8091 5 | modules: 6 | - module: kubernetes 7 | name: ocne_cluster 8 | generate-scripts: true 9 | args: 10 | container-registry: container-registry.oracle.com/olcne 11 | master-nodes: 12 | - ocne-control01.example.com:8090 13 | worker-nodes: 14 | - ocne-worker01.example.com:8090 15 | - ocne-worker02.example.com:8090 16 | selinux: enforcing 17 | restrict-service-externalip: false 18 | - module: helm 19 | name: myhelm 20 | args: 21 | helm-kubernetes-module: ocne_cluster 22 | -------------------------------------------------------------------------------- /playbooks/OCNE/files/ocne-environment-ha-example.yaml: -------------------------------------------------------------------------------- 1 | environments: 2 | - environment-name: ocne_env 3 | globals: 4 | api-server: ocne-operator.example.com:8091 5 | modules: 6 | - module: kubernetes 7 | name: ocne_cluster 8 | generate-scripts: true 9 | args: 10 | container-registry: container-registry-ams.oracle.com/olcne 11 | virtual-ip: 192.168.178.81 12 | master-nodes: 13 | - ocne-control01.example.com:8090 14 | - ocne-control02.example.com:8090 15 | - ocne-control03.example.com:8090 16 | worker-nodes: 17 | - ocne-worker01.example.com:8090 18 | - ocne-worker02.example.com:8090 19 | - ocne-worker03.example.com:8090 20 | - ocne-worker04.example.com:8090 21 | selinux: enforcing 22 | restrict-service-externalip: false 23 | - module: helm 24 | name: myhelm 25 | args: 26 | helm-kubernetes-module: ocne_cluster 27 | -------------------------------------------------------------------------------- /playbooks/OCNE/group_vars/all-example.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2023 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: 11 | # Variables used in the Playbooks 12 | # 13 | 14 | # Version to install 15 | ocne_version: ocne18 16 | 17 | # Configure OCNE configuration file download url and name 18 | env_file_url: http://example.com/ocne-configfile.yml 19 | env_file: ocne-configfile.yml 20 | 21 | # Set use_proxy to 'true' if the environment is behind a proxy, else set to 22 | # 'false' !!! 23 | # Required!!! 24 | use_proxy: false 25 | 26 | # Proxy details. Leave empty if not using proxy. 27 | my_https_proxy: 28 | my_http_proxy: 29 | my_no_proxy: 30 | 31 | # Container registry path to get the OCNE component container images 32 | # !!!Required!!! 33 | container_registry: container-registry-ams.oracle.com/olcne 34 | 35 | # The virtual IP for an olcne-nginx load balancer, replace with your 36 | # virtual IP address. 37 | # !!!Required if multi-master (HA) with nginx!!! 38 | virtual_ip: 192.168.178.81 39 | 40 | # Set name for the OCNE environment !!! 41 | # !!!Required!!! 42 | ocne_environment: ocne_env 43 | 44 | # Set name of the OCNE Kubernetes module !!! 45 | # !!!Required!!! 46 | ocne_k8s: ocne_cluster 47 | 48 | # Set name of the OCNE Helm. Required is deploying Istio or OLM Modules. 49 | # Leave empty if not creating either of them. 50 | ocne_helm: myhelm 51 | 52 | # Set name of the OCNE Istio module. Leave empty if not creating Istio module. 53 | ocne_istio: myistio 54 | 55 | # Set name of the OCNE OLM module. Leave empty if not creating OLM module. 56 | ocne_olm: myolm 57 | 58 | # Set name of the OCNE OCI-CCM module. Leave empty if not creating CCM module. 59 | ocne_oci: myoci 60 | 61 | # Set name of the OCNE MetalLB module. Leave empty if not creating CCM module. 62 | ocne_metallb: mymetallb 63 | 64 | -------------------------------------------------------------------------------- /playbooks/OCNE/inventories/hosts-example.ini: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: 11 | # Inventory file for the initial deployment of an OCNE environment 12 | # 13 | # Define groups and corresponding hostnames for the OCNE cluster. 14 | # ocne_op - The OCNE Operator Node 15 | # ocne_kube_control - The Kubernetes Control Plane Nodes 16 | # ocne_kube_worker - The Kubernetes Worker Nodes 17 | # 18 | [ocne_op] 19 | ocne-operator 20 | 21 | [ocne_kube_control] 22 | ocne-control01 23 | 24 | [ocne_kube_worker] 25 | ocne-worker01 26 | ocne-worker02 27 | 28 | -------------------------------------------------------------------------------- /playbooks/OCNE/ocne-downscale-cluster.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2023 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: Ansible playbook to setup the OCNE Operator node. 11 | # 12 | # yamllint disable 13 | 14 | --- 15 | 16 | - name: Downscale OCNE cluster 17 | hosts: ocne_op 18 | gather_facts: true 19 | vars: 20 | - proxy: "{{ use_proxy | default('false') }}" 21 | 22 | tasks: 23 | 24 | - name: Download downscaled OCNE environment file to operator node 25 | ansible.builtin.get_url: 26 | url: "{{ env_file_url }}" 27 | dest: "~/downscale-{{ env_file }}" 28 | 29 | - name: Downscale with environment file 30 | command: olcnectl provision --ssh-identity-file ~/.ssh/id_ocne_rsa --yes --timeout 20 \ 31 | --ssh-login-name "{{ ansible_user }}" \ 32 | --config-file "~/downscale-{{ env_file }}" 33 | when: 34 | - proxy == false 35 | 36 | - name: Downscale with environment file and proxy enabled 37 | command: olcnectl provision --ssh-identity-file ~/.ssh/id_ocne_rsa --yes --timeout 20 \ 38 | --ssh-login-name "{{ ansible_user }}" \ 39 | --http-proxy "{{ my_http_proxy }}" \ 40 | --https-proxy "{{ my_https_proxy }}" \ 41 | --no-proxy "{{ my_no_proxy }}" \ 42 | --config-file "~/downscale-{{ env_file }}" 43 | when: 44 | - proxy == true 45 | -------------------------------------------------------------------------------- /playbooks/OCNE/ocne-quick-install.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2023 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: Ansible playbook to setup the OCNE Operator node. 11 | # 12 | # yamllint disable 13 | 14 | --- 15 | 16 | - name: Add passwordless ssh for hosts 17 | hosts: ocne_op 18 | become: yes 19 | become_method: sudo 20 | 21 | tasks: 22 | 23 | - name: generate a unique OpenSSH keypair for passwordless ssh 24 | community.crypto.openssh_keypair: 25 | path: '/home/{{ ansible_user }}/.ssh/id_ocne_rsa' 26 | owner: "{{ ansible_user }}" 27 | group: "{{ ansible_user }}" 28 | mode: '0600' 29 | 30 | - name: fetch the unique OpenSSH public key 31 | run_once: yes 32 | fetch: 33 | src: '/home/{{ ansible_user }}/.ssh/id_ocne_rsa.pub' 34 | dest: buffer/ 35 | flat: yes 36 | 37 | - name: Enable Strict Host Key Checking 38 | copy: 39 | dest: "/home/{{ ansible_user }}/.ssh/config" 40 | owner: "{{ ansible_user }}" 41 | group: "{{ ansible_user }}" 42 | mode: 0644 43 | content: | 44 | Host * 45 | StrictHostKeyChecking no 46 | UserKnownHostsFile=/dev/null 47 | 48 | - name: SSH and DNF pre-requisites 49 | hosts: ocne_op ocne_kube_control ocne_kube_worker 50 | become: yes 51 | become_method: sudo 52 | vars: 53 | - proxy: "{{ use_proxy | default('false') }}" 54 | 55 | tasks: 56 | 57 | - name: Set authorized key for passwordless ssh 58 | authorized_key: 59 | user: "{{ ansible_user }}" 60 | state: present 61 | key: "{{ lookup('file', 'buffer/id_ocne_rsa.pub') }}" 62 | 63 | - name: Add proxy config to dnf.conf 64 | lineinfile: 65 | path: /etc/dnf/dnf.conf 66 | line: "proxy={{ my_https_proxy }}" 67 | state: present 68 | when: 69 | - proxy == true 70 | 71 | - name: Workaround for ol8_developer channels not available bug 72 | dnf: 73 | name: oraclelinux-developer-release-el8 74 | state: latest 75 | ignore_errors: yes 76 | 77 | 78 | - name: Configures repositories and install ocnectl on Operator Node 79 | hosts: ocne_op 80 | become: yes 81 | become_method: sudo 82 | vars: 83 | - ocne_v: "{{ ocne_version | default('ocne18') }}" 84 | 85 | tasks: 86 | 87 | - name: Install oracle-olcne-release-el8 RPM 88 | dnf: 89 | name: oracle-olcne-release-el8 90 | state: latest 91 | ignore_errors: yes 92 | 93 | - name: Enable the OL8 repos for OCNE 1.8 94 | shell: | 95 | dnf config-manager --enable ol8_olcne18 ol8_addons ol8_baseos_latest ol8_appstream ol8_kvm_appstream ol8_UEKR7 96 | when: 97 | - ocne_v == "ocne18" 98 | 99 | - name: Disable OL8 older olcne repos for OCNE 1.8 100 | shell: | 101 | dnf config-manager --disable ol8_olcne17 ol8_olcne16 ol8_olcne15 ol8_olcne14 ol8_olcne13 ol8_olcne12 ol8_developer 102 | ignore_errors: yes 103 | when: 104 | - ocne_v == "ocne18" 105 | 106 | - name: Enable the OL8 repos for OCNE 1.7 107 | shell: | 108 | dnf config-manager --enable ol8_olcne17 ol8_addons ol8_baseos_latest ol8_appstream ol8_kvm_appstream ol8_UEKR7 109 | when: 110 | - ocne_v == "ocne17" 111 | 112 | - name: Disable OL8 older olcne repos for OCNE 1.7 113 | shell: | 114 | dnf config-manager --disable ol8_olcne16 ol8_olcne15 ol8_olcne14 ol8_olcne13 ol8_olcne12 ol8_developer 115 | ignore_errors: yes 116 | when: 117 | - ocne_v == "ocne17" 118 | 119 | - name: Install olcnectl RPM 120 | dnf: 121 | name: olcnectl 122 | state: latest 123 | ignore_errors: yes 124 | 125 | 126 | - name: Provision OCNE cluster with Quick install and configuration file 127 | hosts: ocne_op 128 | gather_facts: true 129 | vars: 130 | - operator_str: "{{ansible_fqdn}}:8091" 131 | - proxy: "{{ use_proxy | default('false') }}" 132 | 133 | tasks: 134 | 135 | - name: Download OCNE configuration file to operator node 136 | ansible.builtin.get_url: 137 | url: "{{ env_file_url }}" 138 | dest: "~/{{ env_file }}" 139 | 140 | - name: Quick install with configuration file 141 | command: olcnectl provision --ssh-identity-file ~/.ssh/id_ocne_rsa --yes --timeout 20 \ 142 | --ssh-login-name "{{ ansible_user }}" \ 143 | --config-file "~/{{ env_file }}" 144 | when: 145 | - proxy == false 146 | 147 | - name: Quick install with configuration file and proxy enabled 148 | command: olcnectl provision --ssh-identity-file ~/.ssh/id_ocne_rsa --yes --timeout 20 \ 149 | --ssh-login-name "{{ ansible_user }}" \ 150 | --http-proxy "{{ my_http_proxy }}" \ 151 | --https-proxy "{{ my_https_proxy }}" \ 152 | --no-proxy "{{ my_no_proxy }}" \ 153 | --config-file "~/{{ env_file }}" 154 | when: 155 | - proxy == true 156 | 157 | - name: Update global flag configuration for environment 158 | command: olcnectl module instances \ 159 | --api-server {{ operator_str }} \ 160 | --environment-name {{ ocne_environment }} \ 161 | --update-config 162 | 163 | - name: Set up kubectl on the Master Node 164 | hosts: ocne_kube_control 165 | gather_facts: false 166 | 167 | tasks: 168 | - name: Set up kubectl 169 | shell: | 170 | rm $HOME/.kube/config 171 | mkdir -p $HOME/.kube 172 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 173 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 174 | echo 'export KUBECONFIG=$HOME/.kube/config' >> $HOME/.bashrc 175 | -------------------------------------------------------------------------------- /playbooks/OCNE/ocne-upscale-cluster.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2023 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: Ansible playbook to setup the OCNE Operator node. 11 | # 12 | # yamllint disable 13 | 14 | --- 15 | 16 | - name: Fetch public key 17 | hosts: ocne_op 18 | tasks: 19 | - name: fetch the unique OpenSSH public key 20 | run_once: yes 21 | fetch: 22 | src: '/home/{{ ansible_user }}/.ssh/id_ocne_rsa.pub' 23 | dest: buffer/ 24 | flat: yes 25 | 26 | - name: SSH and DNF pre-requisites 27 | hosts: ocne_op ocne_kube_control ocne_kube_worker 28 | become: yes 29 | become_method: sudo 30 | vars: 31 | - proxy: "{{ use_proxy | default('false') }}" 32 | 33 | tasks: 34 | 35 | - name: Set authorized key for passwordless ssh 36 | authorized_key: 37 | user: "{{ ansible_user }}" 38 | state: present 39 | key: "{{ lookup('file', 'buffer/id_ocne_rsa.pub') }}" 40 | 41 | - name: Add proxy config to dnf.conf 42 | lineinfile: 43 | path: /etc/dnf/dnf.conf 44 | line: "proxy={{ my_https_proxy }}" 45 | state: present 46 | when: 47 | - proxy == true 48 | 49 | - name: Workaround for ol8_developer channels not available bug 50 | dnf: 51 | name: oraclelinux-developer-release-el8 52 | state: latest 53 | ignore_errors: yes 54 | 55 | - name: Upscale OCNE cluster 56 | hosts: ocne_op 57 | gather_facts: true 58 | vars: 59 | - operator_str: "{{ansible_fqdn}}:8091" 60 | - proxy: "{{ use_proxy | default('false') }}" 61 | 62 | tasks: 63 | 64 | - name: Download upscaled OCNE environment file to operator node 65 | ansible.builtin.get_url: 66 | url: "{{ env_file_url }}" 67 | dest: "~/upscale-{{ env_file }}" 68 | 69 | - name: Quick install with environment file 70 | command: olcnectl provision --ssh-identity-file ~/.ssh/id_ocne_rsa --yes --timeout 20 \ 71 | --ssh-login-name "{{ ansible_user }}" \ 72 | --config-file "~/upscale-{{ env_file }}" 73 | when: 74 | - proxy == false 75 | 76 | - name: Quick install with environment file and proxy enabled 77 | command: olcnectl provision --ssh-identity-file ~/.ssh/id_ocne_rsa --yes --timeout 20 \ 78 | --ssh-login-name "{{ ansible_user }}" \ 79 | --http-proxy "{{ my_http_proxy }}" \ 80 | --https-proxy "{{ my_https_proxy }}" \ 81 | --no-proxy "{{ my_no_proxy }}" \ 82 | --config-file "~/upscale-{{ env_file }}" 83 | when: 84 | - proxy == true 85 | -------------------------------------------------------------------------------- /playbooks/OLAM/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle-samples/ansible-playbooks/34a9865b110da450d9e055195795d61d264af763/playbooks/OLAM/.gitignore -------------------------------------------------------------------------------- /playbooks/OLAM/cluster-plus-hop-node/.gitignore: -------------------------------------------------------------------------------- 1 | # ansible code 2 | *hosts 3 | *hosts.ini 4 | *buffer* 5 | -------------------------------------------------------------------------------- /playbooks/OLAM/cluster-plus-hop-node/README.md: -------------------------------------------------------------------------------- 1 | # Oracle Linux Automation Manager 2.2 Cluster 2 | 3 | The playbook provides a clustered installation of [Oracle Linux Automation Manager](https://docs.oracle.com/en/operating-systems/oracle-linux-automation-manager/) using the details from an inventory file. 4 | 5 | It configures the following seven nodes: 6 | 7 | - A remote database 8 | - Two control plane nodes 9 | - Two local execution nodes 10 | - A hop node and a remote execution node 11 | 12 | ## Quickstart 13 | 14 | ### Assumptions 15 | 16 | 1. You have all the hosts running 17 | 1. You have setup the required OpenSSH keys 18 | 1. You have the necessary permissions and access 19 | 20 | ### Pre-requisites 21 | 22 | 1. Git is installed 23 | 1. SSH client is installed and configured 24 | 1. The `ansible` or `ansible-core` package is installed 25 | 26 | ### Instructions 27 | --- 28 | 29 | #### Provisioning using this git repo 30 | 31 | 1. Clone the repo: 32 | 33 | ``` 34 | git clone https://github.com/oracle-samples/ansible-playbooks.git ol-playbooks 35 | cd ol-playbooks/playbooks/OLAM/cluster-plus-hop-node 36 | cp group_vars/all.yml.example group_vars/all.yml 37 | cp inventory/hosts.ini.example inventory/hosts.ini 38 | ``` 39 | 40 | 1. Edit the group variables, change the default passwords and replace the sample ssh key files: 41 | 42 | ``` 43 | # Create Linux non-opc user account for installing Oracle Linux Automation Manager 44 | 45 | "username": oracle 46 | 47 | # Enter the non-hashed password for the non-opc user account. 48 | 49 | "user_default_password": CHANGE_ME 50 | 51 | # Enter the password for PostgreSQL awx user 52 | 53 | "awx_pguser_password": CHANGE_ME 54 | 55 | # Enter the password for PostgreSQL awx user 56 | 57 | "olam_admin_password": CHANGE_ME 58 | 59 | # Enter the name of a local ssh public key file located in the ~/.ssh directory. This key appends 60 | # to the non-opc user account's authorized_keys file. Replace and with 61 | # your user. 62 | 63 | "ssh_keyfile": /home//.ssh/.pub 64 | ``` 65 | 66 | This file also contains a variable for setting a proxy if required to reach the internet from the Oracle Linux Automation Manager nodes. 67 | 68 | 1. Edit the inventory and customize hostnames: 69 | 70 | ``` 71 | [control_nodes] 72 | olam-control01 73 | olam-control02 74 | 75 | [control_nodes:vars] 76 | node_type=control 77 | peers=local_execution_group 78 | 79 | [execution_nodes] 80 | olam-execution01 81 | olam-execution02 82 | olam-remote-execution01 83 | olam-hop01 84 | 85 | [local_execution_group] 86 | olam-execution01 87 | olam-execution02 88 | 89 | [local_execution_group:vars] 90 | node_type=execution 91 | 92 | [hop] 93 | olam-hop01 94 | 95 | [hop:vars] 96 | peers=control_nodes 97 | 98 | [remote_execution_group] 99 | olam-remote-execution01 100 | 101 | [remote_execution_group:vars] 102 | peers=hop 103 | 104 | [db_nodes] 105 | olam-db 106 | 107 | [all:vars] 108 | ansible_user=opc 109 | ansible_ssh_private_key_file=~/.ssh/oci-olam 110 | ansible_python_interpreter=/usr/bin/python3 111 | ``` 112 | 113 | The `all:vars` group variables define the user, key file, and python version used when connecting to the different nodes using SSH. 114 | 115 | A second `host.ini.example` is provided in the inventory directory for a four node cluster without hop-node and remote exexution node. 116 | 117 | 1. Test SSH connectivity to all the hosts listed in the inventory: 118 | 119 | ``` 120 | ansible-playbook -i inventory/hosts.ini pingtest.yml 121 | ``` 122 | 123 | 1. Install collection dependencies: 124 | 125 | ``` 126 | ansible-galaxy install -r requirements.yml 127 | ``` 128 | 129 | 1. Run the playbook: 130 | 131 | ``` 132 | 133 | ansible-playbook -i inventory/hosts.ini install.yml 134 | ``` 135 | 136 | ## Resources 137 | 138 | [Oracle Linux Automation Manager Training](https://www.oracle.com/goto/linuxautomationlearning) 139 | [Oracle Linux Training Station](https://www.oracle.com/goto/oltrain) 140 | 141 | 142 | 143 | 144 | 145 | 146 | -------------------------------------------------------------------------------- /playbooks/OLAM/cluster-plus-hop-node/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | nocows = 1 3 | host_key_checking = false 4 | interpreter_python = auto_silent 5 | -------------------------------------------------------------------------------- /playbooks/OLAM/cluster-plus-hop-node/group_vars/all.yml.example: -------------------------------------------------------------------------------- 1 | --- 2 | # File: group_vars/all.yml 3 | # Description: group_vars for "all" hosts in the inventory file 4 | 5 | # Create Linux non-opc user account for installing Oracle Linux Automation Manager 6 | 7 | "username": oracle 8 | 9 | # Enter the non-hashed password for the non-opc user account. 10 | 11 | "user_default_password": oracle 12 | 13 | # Enter the password for PostgreSQL awx user 14 | 15 | "awx_pguser_password": CHANGE_ME 16 | 17 | # Enter the password for PostgreSQL awx user 18 | 19 | "olam_admin_password": CHANGE_ME 20 | 21 | # Enter the name of a local ssh public key file located in the ~/.ssh directory. This key appends 22 | # to the non-opc user account's authorized_keys file. Replace and with 23 | # your user. 24 | 25 | "ssh_keyfile": /home//.ssh/.pub 26 | 27 | # Set proxy if needed 28 | # Uncomment both the pip_proxy_env and proxy_env sections, and set the proxy host and port accordingly. 29 | 30 | pip_proxy_env: 31 | # http_proxy: 'http://www.example.com:80 32 | # https_proxy: 'https://www.example.com:80' 33 | 34 | proxy_env: 35 | # http_proxy: 'www.example.com:80' 36 | # https_proxy: 'www.example.com:80' 37 | # ftp_proxy: 'www.example.com:80' 38 | # no_proxy: 'localhost,127.0.0.1,example.com' 39 | -------------------------------------------------------------------------------- /playbooks/OLAM/cluster-plus-hop-node/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: yes 4 | 5 | tasks: 6 | 7 | - name: add user account with access to sudo 8 | user: 9 | name: "{{ username }}" 10 | password: "{{ user_default_password | password_hash('sha512') }}" 11 | comment: Ansible created user 12 | groups: wheel 13 | append: yes 14 | update_password: on_create 15 | 16 | - name: set authorized key for user using local public key file 17 | ansible.posix.authorized_key: 18 | user: "{{ username }}" 19 | state: present 20 | key: "{{ lookup('file', '{{ ssh_keyfile }}') }}" 21 | 22 | 23 | - name: set user with passwordless sudo access 24 | lineinfile: 25 | path: '/etc/sudoers.d/{{ username }}' 26 | regexp: '{{ username }} ALL=' 27 | line: '{{ username}} ALL=(ALL:ALL) NOPASSWD: ALL' 28 | state: present 29 | create: yes 30 | 31 | # Add passwordless ssh between hosts 32 | 33 | - name: generate a unique OpenSSH keypair for passwordless ssh 34 | community.crypto.openssh_keypair: 35 | path: '/home/{{ username }}/.ssh/id_olam_rsa' 36 | owner: "{{ username }}" 37 | group: "{{ username }}" 38 | mode: '0700' 39 | when: ( inventory_hostname == groups["control_nodes"][0] ) 40 | 41 | - name: fetch the unique OpenSSH private key 42 | run_once: yes 43 | fetch: 44 | src: '/home/{{ username }}/.ssh/id_olam_rsa' 45 | dest: buffer/ 46 | flat: yes 47 | when: ( inventory_hostname == groups["control_nodes"][0] ) 48 | 49 | - name: fetch the unique OpenSSH public key 50 | run_once: yes 51 | fetch: 52 | src: '/home/{{ username }}/.ssh/id_olam_rsa.pub' 53 | dest: buffer/ 54 | flat: yes 55 | when: ( inventory_hostname == groups["control_nodes"][0] ) 56 | 57 | - name: copy unique private key 58 | ansible.builtin.copy: 59 | src: buffer/id_olam_rsa 60 | dest: '/home/{{ username }}/.ssh/id_olam_rsa' 61 | owner: "{{ username }}" 62 | group: "{{ username }}" 63 | mode: '0600' 64 | when: ( inventory_hostname != groups["control_nodes"][0] ) 65 | 66 | - name: copy unique public key 67 | ansible.builtin.copy: 68 | src: buffer/id_olam_rsa.pub 69 | dest: '/home/{{ username }}/.ssh/id_olam_rsa.pub' 70 | owner: "{{ username }}" 71 | group: "{{ username }}" 72 | mode: '0600' 73 | when: ( inventory_hostname != groups["control_nodes"][0] ) 74 | 75 | - name: set authorized key for passwordless ssh using local public key file 76 | authorized_key: 77 | user: "{{ username }}" 78 | state: present 79 | key: "{{ lookup('file', 'buffer/id_olam_rsa.pub') }}" 80 | 81 | - name: set authorized key for passwordless ssh using local public key file 82 | authorized_key: 83 | user: opc 84 | state: present 85 | key: "{{ lookup('file', 'buffer/id_olam_rsa.pub') }}" 86 | 87 | # Install required packages on all hosts 88 | 89 | - hosts: control_nodes,execution_nodes,db_nodes 90 | collections: 91 | - ansible.posix 92 | - community.general 93 | - community.postgresql 94 | - community.crypto 95 | become: yes 96 | 97 | tasks: 98 | 99 | - name: install Oracle EPEL repository 100 | dnf: 101 | name: oracle-epel-release-el8 102 | state: present 103 | when: 104 | - ansible_facts['distribution_major_version'] == '8' 105 | 106 | - name: install version lock plugin 107 | dnf: 108 | name: python3-dnf-plugin-versionlock 109 | enablerepo: ol8_developer_EPEL 110 | state: present 111 | 112 | - name: version lock python3-click 113 | community.general.yum_versionlock: 114 | state: present 115 | name: python3-click 116 | 117 | - name: install additional packages for ansible 118 | dnf: 119 | name: 120 | - python3-psycopg2 121 | - python3-pyOpenSSL 122 | - python3-pip 123 | enablerepo: ol8_developer_EPEL 124 | state: latest 125 | 126 | - name: install pexpect 127 | pip: 128 | name: pexpect 129 | environment: "{{ pip_proxy_env }}" 130 | 131 | environment: "{{ proxy_env }}" 132 | 133 | # Configure remote database 134 | 135 | - hosts: db_nodes 136 | collections: 137 | - ansible.posix 138 | - community.general 139 | - community.postgresql 140 | - community.crypto 141 | become: yes 142 | 143 | vars: 144 | db_node_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" 145 | 146 | tasks: 147 | 148 | - name: enable the PostgreSQL 13 module stream 149 | copy: 150 | dest: /etc/dnf/modules.d/postgresql.module 151 | content: | 152 | [postgresql] 153 | name=postgresql 154 | stream=13 155 | profiles= 156 | state=enabled 157 | when: 158 | - ansible_distribution_major_version | int >= 8 159 | 160 | - name: install the database 161 | dnf: 162 | name: postgresql-server 163 | state: present 164 | environment: "{{ proxy_env }}" 165 | 166 | - name: ensure postgresql data directory exists 167 | file: 168 | path: "/var/lib/pgsql/data" 169 | owner: "postgres" 170 | group: "postgres" 171 | state: directory 172 | mode: 0700 173 | 174 | - name: check if postgresql is initialized 175 | stat: 176 | path: "/var/lib/pgsql/data/PG_VERSION" 177 | register: pgdata_dir_version 178 | 179 | - name: initialize postgresql database 180 | command: "postgresql-setup --initdb" 181 | when: not pgdata_dir_version.stat.exists 182 | become_user: postgres 183 | 184 | - name: change database password storage encryption 185 | ansible.builtin.replace: 186 | path: /var/lib/pgsql/data/postgresql.conf 187 | regexp: '^#password_encryption.*' 188 | replace: 'password_encryption = scram-sha-256' 189 | 190 | - name: Enable and start postgresql.service 191 | systemd: 192 | name: postgresql 193 | state: started 194 | enabled: yes 195 | 196 | - name: create awx postgresql user 197 | community.postgresql.postgresql_user: 198 | name: awx 199 | password: "{{ awx_pguser_password }}" 200 | role_attr_flags: NOSUPERUSER 201 | become_user: postgres 202 | 203 | - name: create awx postgresql db 204 | community.postgresql.postgresql_db: 205 | name: awx 206 | owner: awx 207 | become_user: postgres 208 | 209 | - name: update host-based authentication 210 | ansible.builtin.lineinfile: 211 | path: /var/lib/pgsql/data/pg_hba.conf 212 | line: "host all all 0.0.0.0/0 scram-sha-256" 213 | 214 | - name: set the database listening address 215 | ansible.builtin.lineinfile: 216 | path: /var/lib/pgsql/data/postgresql.conf 217 | insertbefore: '^#port = 5432' 218 | line: "listen_addresses = '{{ db_node_ip }}'" 219 | 220 | - name: set firewall port rules for db 221 | ansible.posix.firewalld: 222 | zone: public 223 | port: "{{ item }}" 224 | permanent: yes 225 | state: enabled 226 | immediate: yes 227 | loop: 228 | - 5432/tcp 229 | 230 | - name: restart the database 231 | systemd: 232 | name: postgresql 233 | state: restarted 234 | 235 | # Configure OLAM 2.2 hosts 236 | 237 | - hosts: control_nodes,execution_nodes 238 | collections: 239 | - ansible.posix 240 | - community.general 241 | - community.postgresql 242 | - community.crypto 243 | become: yes 244 | 245 | tasks: 246 | 247 | - name: install Oracle Linux Automation Manager repository 248 | dnf: 249 | name: oraclelinux-automation-manager-release-el8-2.2 250 | state: present 251 | environment: "{{ proxy_env }}" 252 | 253 | - name: disable Oracle Linux Automation Manager 1.0 repository 254 | ini_file: 255 | path: "/etc/yum.repos.d/oraclelinux-automation-manager-ol8.repo" 256 | section: ol8_automation 257 | option: enabled 258 | value: "0" 259 | mode: 0644 260 | 261 | - name: disable Oracle Linux Automation Manager 2.0 repository 262 | ini_file: 263 | path: "/etc/yum.repos.d/oraclelinux-automation-manager-ol8.repo" 264 | section: ol8_automation2 265 | option: enabled 266 | value: "0" 267 | mode: 0644 268 | 269 | - name: enable Oracle Linux Automation Manager 2.2 repository 270 | ini_file: 271 | path: "/etc/yum.repos.d/oraclelinux-automation-manager-ol8.repo" 272 | section: ol8_automation2.2 273 | option: enabled 274 | value: "1" 275 | mode: 0644 276 | 277 | - name: install Oracle Linux Automation Manager 278 | dnf: 279 | name: ol-automation-manager 280 | state: present 281 | environment: "{{ proxy_env }}" 282 | 283 | - name: edit unixsocket in /etc/redis.conf 284 | lineinfile: 285 | path: /etc/redis.conf 286 | regexp: '^unixsocket ' 287 | insertafter: '^# unixsocketperm ' 288 | line: unixsocket /var/run/redis/redis.sock 289 | 290 | - name: edit unixsocketperm in /etc/redis.conf 291 | lineinfile: 292 | path: /etc/redis.conf 293 | regexp: '^unixsocketperm ' 294 | insertafter: '^unixsocket ' 295 | line: unixsocketperm 775 296 | 297 | - name: configure CLUSTER_HOST_ID setting 298 | copy: 299 | dest: /etc/tower/conf.d/olam.py 300 | content: | 301 | CLUSTER_HOST_ID = "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" 302 | DEFAULT_EXECUTION_QUEUE_NAME = 'execution' 303 | DEFAULT_CONTROL_PLANE_QUEUE_NAME = 'controlplane' 304 | owner: awx 305 | group: awx 306 | mode: '0640' 307 | 308 | - name: provision olam ansible container environment 309 | containers.podman.podman_image: 310 | name: container-registry.oracle.com/oracle_linux_automation_manager/olam-ee 311 | become_user: awx 312 | environment: "{{ proxy_env }}" 313 | 314 | - name: Generate an OpenSSL private key with a different size (2048 bits) 315 | community.crypto.openssl_privatekey: 316 | path: /etc/tower/tower.key 317 | size: 2048 318 | 319 | - name: Generate an OpenSSL Certificate Signing Request 320 | community.crypto.openssl_csr: 321 | path: /etc/tower/tower.csr 322 | privatekey_path: /etc/tower/tower.key 323 | 324 | - name: Generate a Self Signed OpenSSL certificate 325 | community.crypto.x509_certificate: 326 | path: /etc/tower/tower.crt 327 | privatekey_path: /etc/tower/tower.key 328 | csr_path: /etc/tower/tower.csr 329 | provider: selfsigned 330 | 331 | - name: remove default server section in nginx configuration 332 | template: 333 | src: ../templates/nginx.conf.tpl 334 | dest: /etc/nginx/nginx.conf 335 | owner: root 336 | group: root 337 | mode: 0644 338 | 339 | - name: set firewall service rules 340 | ansible.posix.firewalld: 341 | zone: public 342 | service: "{{ item }}" 343 | permanent: yes 344 | state: enabled 345 | immediate: yes 346 | loop: 347 | - http 348 | - https 349 | 350 | - name: set firewall port rules 351 | ansible.posix.firewalld: 352 | zone: public 353 | port: "{{ item }}" 354 | permanent: yes 355 | state: enabled 356 | immediate: yes 357 | loop: 358 | - 27199/tcp 359 | 360 | - hosts: control_nodes,execution_nodes 361 | collections: 362 | - ansible.posix 363 | - community.general 364 | - community.postgresql 365 | - community.crypto 366 | become: yes 367 | 368 | tasks: 369 | 370 | - name: get db ipaddress 371 | set_fact: 372 | db_node_ip: "{{ hostvars[groups['db_nodes'][0]]['ansible_default_ipv4']['address'] }}" 373 | 374 | - name: configure DATABASE settings 375 | copy: 376 | dest: /etc/tower/conf.d/db.py 377 | content: | 378 | DATABASES = { 379 | 'default': { 380 | 'ATOMIC_REQUESTS': True, 381 | 'ENGINE': 'awx.main.db.profiled_pg', 382 | 'NAME': 'awx', 383 | 'USER': 'awx', 384 | 'PASSWORD': '{{ awx_pguser_password }}', 385 | 'HOST': '{{ db_node_ip }}', 386 | 'PORT': '5432', 387 | } 388 | } 389 | owner: awx 390 | group: awx 391 | mode: '0640' 392 | 393 | - name: check if awx setup 394 | stat: 395 | path: /var/lib/awx/.awx-setup 396 | register: awx_setup 397 | when: ( inventory_hostname == groups["control_nodes"][0] ) 398 | 399 | - name: run awx-manage migrate 400 | shell: | 401 | awx-manage migrate 402 | touch /var/lib/awx/.awx-setup 403 | args: 404 | executable: /bin/bash 405 | become_user: awx 406 | register: awx_migrate 407 | when: 408 | - ( inventory_hostname == groups["control_nodes"][0] ) 409 | - not awx_setup.stat.exists 410 | changed_when: awx_migrate is not search('already exists') 411 | 412 | - name: create awx superuser 413 | block: 414 | - name: set awx password 415 | expect: 416 | command: "awx-manage createsuperuser --username admin --email admin@example.com" 417 | responses: 418 | 'Password:': "{{ olam_admin_password }}" 419 | 'Password \(again\):': "{{ olam_admin_password }}" 420 | register: awx_password 421 | become_user: awx 422 | ignore_errors: yes 423 | changed_when: awx_password.stdout is not search('is already taken') 424 | when: 425 | - ( inventory_hostname == groups["control_nodes"][0] ) 426 | - not awx_setup.stat.exists 427 | 428 | - debug: 429 | var: awx_password 430 | when: 431 | - ( inventory_hostname == groups["control_nodes"][0] ) 432 | - not awx_setup.stat.exists 433 | 434 | - hosts: control_nodes:execution_nodes:!control_nodes[0] 435 | collections: 436 | - ansible.posix 437 | become: yes 438 | 439 | tasks: 440 | 441 | - name: copy SECRET from control-node01 to all other hosts 442 | ansible.posix.synchronize: 443 | src: /etc/tower/SECRET_KEY 444 | dest: /etc/tower/SECRET_KEY 445 | owner: true 446 | group: true 447 | private_key: '/home/{{ username }}/.ssh/id_olam_rsa' 448 | delegate_to: "{{ groups['control_nodes'][0] }}" 449 | 450 | - hosts: control_nodes,execution_nodes 451 | become: yes 452 | 453 | tasks: 454 | 455 | - name: create initial configuration in receptor 456 | template: 457 | src: ../templates/receptor.conf.tpl 458 | dest: /etc/receptor/receptor.conf 459 | owner: root 460 | group: root 461 | mode: '0644' 462 | 463 | - name: change work type to ansible-runner for execution node 464 | ansible.builtin.replace: 465 | path: /etc/receptor/receptor.conf 466 | regexp: 'worktype: local' 467 | replace: 'worktype: ansible-runner' 468 | when: 469 | - inventory_hostname in groups.execution_nodes 470 | - inventory_hostname not in groups.hop 471 | 472 | - name: add tcp-peers if present 473 | ansible.builtin.lineinfile: 474 | path: /etc/receptor/receptor.conf 475 | insertbefore: '^- control-service:' 476 | line: | 477 | - tcp-peer: 478 | address: {{ hostvars[item]['ansible_default_ipv4']['address'] }}:27199 479 | redial: true 480 | with_inventory_hostnames: 481 | - "{{ peers }}" 482 | when: peers is defined 483 | 484 | # - name: start receptor service 485 | # systemd: 486 | # name: "{{ item }}" 487 | # state: started 488 | # enabled: yes 489 | # with_items: 490 | # - receptor_awx 491 | 492 | - name: display receptor.conf contents 493 | command: cat receptor.conf chdir=/etc/receptor 494 | register: command_output 495 | 496 | - name: print to console 497 | debug: 498 | msg: "{{command_output.stdout_lines}}" 499 | 500 | - hosts: control_nodes[0] 501 | become: yes 502 | 503 | tasks: 504 | 505 | - name: check if awx provisioned 506 | stat: 507 | path: /var/lib/awx/.awx-provisioned 508 | register: awx_provision 509 | 510 | - name: generate awx-manage script 511 | copy: 512 | dest: /var/lib/awx/awx_provision.sh 513 | content: | 514 | #!/bin/bash 515 | 516 | {% for item in groups['control_nodes'] %} 517 | awx-manage provision_instance --hostname={{ hostvars[item]['ansible_default_ipv4']['address'] }} --node_type=control 518 | awx-manage register_queue --queuename=controlplane --hostnames={{ hostvars[item]['ansible_default_ipv4']['address'] }} 519 | {% endfor %} 520 | {% for item in groups['execution_nodes'] %} 521 | awx-manage provision_instance --hostname={{ hostvars[item]['ansible_default_ipv4']['address'] }} --node_type={% if item in groups['hop'] %}hop{% else %}execution{% endif %}{{''}} 522 | {% endfor %} 523 | {% for item in groups['execution_nodes'] %} 524 | {% if item not in groups['hop'] %} 525 | awx-manage register_queue --queuename=execution --hostnames={{ hostvars[item]['ansible_default_ipv4']['address'] }} 526 | {% endif %} 527 | {% endfor %} 528 | awx-manage register_default_execution_environments 529 | awx-manage create_preload_data 530 | touch /var/lib/awx/.awx-provisioned 531 | owner: awx 532 | group: awx 533 | mode: '0700' 534 | 535 | - name: cat script 536 | shell: | 537 | cat /var/lib/awx/awx_provision.sh 538 | args: 539 | executable: /bin/bash 540 | become_user: awx 541 | register: provision_script 542 | 543 | - name: print script contents 544 | debug: 545 | msg: "{{ provision_script }}" 546 | 547 | - name: execute awx-manage provision script 548 | shell: | 549 | /var/lib/awx/awx_provision.sh 550 | args: 551 | executable: /bin/bash 552 | become_user: awx 553 | register: awx_provision_script 554 | changed_when: awx_provision_script is not search('already exists') 555 | 556 | - name: print awx_provision_script 557 | debug: 558 | msg: "{{ awx_provision_script }}" 559 | 560 | 561 | - hosts: control_nodes,execution_nodes 562 | become: yes 563 | 564 | tasks: 565 | 566 | - name: run awx-manage peers 567 | shell: | 568 | awx-manage register_peers {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} --peers {{ hostvars[item]['ansible_default_ipv4']['address'] }} 569 | args: 570 | executable: /bin/bash 571 | become_user: awx 572 | register: awx_peers 573 | with_inventory_hostnames: 574 | - "{{ peers }}" 575 | when: ( peers is defined ) 576 | delegate_to: "{{ groups['control_nodes'][0] }}" 577 | 578 | - name: print to awx_peers 579 | debug: 580 | msg: "{{awx_peers}}" 581 | 582 | - name: Enable and start ol-automation service 583 | systemd: 584 | name: "{{ item }}" 585 | state: started 586 | enabled: yes 587 | with_items: 588 | - ol-automation-manager 589 | 590 | 591 | -------------------------------------------------------------------------------- /playbooks/OLAM/cluster-plus-hop-node/inventory/hosts.ini.example: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: 11 | # Inventory file for the deployment of an Oracle Linux Automation 12 | # Manager 2.2 clustered environment. 13 | # 14 | # Define groups and corresponding hostnames for the Oracle Linux 15 | # Automation Manager 2.2 cluster. 16 | # 17 | # control_nodes - list of control plane nodes 18 | # execution_nodes - list of execution and hop nodes 19 | # local_execution_group - list of local execution nodes, which is a subset of execution_nodes 20 | # hop - list of hop nodes, which is a subset of execution_nodes 21 | # remote_execution_group - list of remote execution nodes, which is a subset of execution_nodes 22 | # db_nodes - list of database nodes 23 | # 24 | # Create the peering within the cluster using the group_vars and the "peers" variable. The example 25 | # shows the local executions nodes as peers of the control plane nodes, indicating communication 26 | # flows from the control plane nodes to the local execution nodes. 27 | # 28 | # The "all" group_vars include the user and OpenSSH keypair used to connect to each node and the 29 | # specific python version used once connected. 30 | # 31 | [control_nodes] 32 | olam-control01 33 | olam-control02 34 | 35 | [control_nodes:vars] 36 | node_type=control 37 | peers=local_execution_group 38 | 39 | [execution_nodes] 40 | olam-execution01 41 | olam-execution02 42 | olam-remote-execution01 43 | olam-hop01 44 | 45 | [local_execution_group] 46 | olam-execution01 47 | olam-execution02 48 | 49 | [local_execution_group:vars] 50 | node_type=execution 51 | 52 | [hop] 53 | olam-hop01 54 | 55 | [hop:vars] 56 | peers=control_nodes 57 | 58 | [remote_execution_group] 59 | olam-remote-execution01 60 | 61 | [remote_execution_group:vars] 62 | peers=hop 63 | 64 | [db_nodes] 65 | olam-db 66 | 67 | [all:vars] 68 | ansible_user=opc 69 | ansible_ssh_private_key_file=~/.ssh/oci-olam 70 | ansible_python_interpreter=/usr/bin/python3 71 | 72 | -------------------------------------------------------------------------------- /playbooks/OLAM/cluster-plus-hop-node/inventory/hosts.ini.example-4nodes: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: 11 | # Inventory file for the deployment of an Oracle Linux Automation 12 | # Manager 2.2 clustered environment. 13 | # 14 | # Define groups and corresponding hostnames for the Oracle Linux 15 | # Automation Manager 2.2 cluster. 16 | # 17 | # control_nodes - list of control plane nodes 18 | # execution_nodes - list of execution and hop nodes 19 | # local_execution_group - list of local execution nodes, which is a subset of execution_nodes 20 | # hop - list of hop nodes, which is a subset of execution_nodes 21 | # remote_execution_group - list of remote execution nodes, which is a subset of execution_nodes 22 | # db_nodes - list of database nodes 23 | # 24 | # Create the peering within the cluster using the group_vars and the "peers" variable. The example 25 | # shows the local executions nodes as peers of the control plane nodes, indicating communication 26 | # flows from the control plane nodes to the local execution nodes. 27 | # 28 | # The "all" group_vars include the user and OpenSSH keypair used to connect to each node and the 29 | # specific python version used once connected. 30 | # 31 | [control_nodes] 32 | olam-control01 33 | olam-control02 34 | 35 | [control_nodes:vars] 36 | node_type=control 37 | peers=local_execution_group 38 | 39 | [execution_nodes] 40 | olam-execution01 41 | olam-execution02 42 | 43 | [local_execution_group] 44 | olam-execution01 45 | olam-execution02 46 | 47 | [local_execution_group:vars] 48 | node_type=execution 49 | 50 | [hop] 51 | 52 | [hop:vars] 53 | 54 | [remote_execution_group] 55 | 56 | [remote_execution_group:vars] 57 | 58 | [db_nodes] 59 | olam-db 60 | 61 | [all:vars] 62 | ansible_user=opc 63 | ansible_ssh_private_key_file=~/.ssh/oci-olam 64 | ansible_python_interpreter=/usr/bin/python3 65 | 66 | -------------------------------------------------------------------------------- /playbooks/OLAM/cluster-plus-hop-node/inventory/local.ini: -------------------------------------------------------------------------------- 1 | [local] 2 | localhost ansible_connection=local 3 | -------------------------------------------------------------------------------- /playbooks/OLAM/cluster-plus-hop-node/pingtest.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | tasks: 4 | - name: test connection 5 | ping: 6 | -------------------------------------------------------------------------------- /playbooks/OLAM/cluster-plus-hop-node/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | roles: 3 | 4 | collections: 5 | - ansible.posix 6 | - community.general 7 | - community.postgresql 8 | - community.crypto 9 | - containers.podman 10 | -------------------------------------------------------------------------------- /playbooks/OLAM/cluster-plus-hop-node/templates/nginx.conf.tpl: -------------------------------------------------------------------------------- 1 | # For more information on configuration, see: 2 | # * Official English Documentation: http://nginx.org/en/docs/ 3 | # * Official Russian Documentation: http://nginx.org/ru/docs/ 4 | 5 | user nginx; 6 | worker_processes auto; 7 | error_log /var/log/nginx/error.log; 8 | pid /run/nginx.pid; 9 | 10 | # Load dynamic modules. See /usr/share/doc/nginx/README.dynamic. 11 | include /usr/share/nginx/modules/*.conf; 12 | 13 | events { 14 | worker_connections 1024; 15 | } 16 | 17 | http { 18 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 19 | '$status $body_bytes_sent "$http_referer" ' 20 | '"$http_user_agent" "$http_x_forwarded_for"'; 21 | 22 | access_log /var/log/nginx/access.log main; 23 | 24 | sendfile on; 25 | tcp_nopush on; 26 | tcp_nodelay on; 27 | keepalive_timeout 65; 28 | types_hash_max_size 2048; 29 | 30 | include /etc/nginx/mime.types; 31 | default_type application/octet-stream; 32 | 33 | # Load modular configuration files from the /etc/nginx/conf.d directory. 34 | # See http://nginx.org/en/docs/ngx_core_module.html#include 35 | # for more information. 36 | include /etc/nginx/conf.d/*.conf; 37 | 38 | # server { 39 | # listen 80 default_server; 40 | # listen [::]:80 default_server; 41 | # server_name _; 42 | # root /usr/share/nginx/html; 43 | # 44 | # # Load configuration files for the default server block. 45 | # include /etc/nginx/default.d/*.conf; 46 | # 47 | # location / { 48 | # } 49 | # 50 | # error_page 404 /404.html; 51 | # location = /40x.html { 52 | # } 53 | # 54 | # error_page 500 502 503 504 /50x.html; 55 | # location = /50x.html { 56 | # } 57 | # } 58 | # 59 | # Settings for a TLS enabled server. 60 | # 61 | # server { 62 | # listen 443 ssl http2 default_server; 63 | # listen [::]:443 ssl http2 default_server; 64 | # server_name _; 65 | # root /usr/share/nginx/html; 66 | # 67 | # ssl_certificate "/etc/pki/nginx/server.crt"; 68 | # ssl_certificate_key "/etc/pki/nginx/private/server.key"; 69 | # ssl_session_cache shared:SSL:1m; 70 | # ssl_session_timeout 10m; 71 | # ssl_ciphers PROFILE=SYSTEM; 72 | # ssl_prefer_server_ciphers on; 73 | # 74 | # # Load configuration files for the default server block. 75 | # include /etc/nginx/default.d/*.conf; 76 | # 77 | # location / { 78 | # } 79 | # 80 | # error_page 404 /404.html; 81 | # location = /40x.html { 82 | # } 83 | # 84 | # error_page 500 502 503 504 /50x.html; 85 | # location = /50x.html { 86 | # } 87 | # } 88 | 89 | } 90 | 91 | -------------------------------------------------------------------------------- /playbooks/OLAM/cluster-plus-hop-node/templates/receptor.conf.tpl: -------------------------------------------------------------------------------- 1 | --- 2 | - node: 3 | id: {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} 4 | 5 | - log-level: info 6 | 7 | - tcp-listener: 8 | port: 27199 9 | 10 | - control-service: 11 | service: control 12 | filename: /var/run/receptor/receptor.sock 13 | 14 | - work-command: 15 | worktype: local 16 | command: /var/lib/ol-automation-manager/venv/awx/bin/ansible-runner 17 | params: worker 18 | allowruntimeparams: true 19 | verifysignature: false 20 | -------------------------------------------------------------------------------- /playbooks/OLAM/single-node/.gitignore: -------------------------------------------------------------------------------- 1 | # ansible code 2 | *hosts 3 | *hosts.ini 4 | *buffer* 5 | -------------------------------------------------------------------------------- /playbooks/OLAM/single-node/README.md: -------------------------------------------------------------------------------- 1 | # Oracle Linux Automation Manager 2.2 Single Host Deployment 2 | 3 | The playbook provides a single host installation of [Oracle Linux Automation Manager](https://docs.oracle.com/en/operating-systems/oracle-linux-automation-manager/) using the details from an inventory file. 4 | 5 | It configures a single node with the following roles: 6 | 7 | - database 8 | - control plane 9 | - execution 10 | 11 | ## Quickstart 12 | 13 | ### Assumptions 14 | 15 | 1. You have one Oracle Linux 8 host running 16 | 1. You have setup the required OpenSSH keys 17 | 1. You have the necessary permissions and access for the target host user with sudo access 18 | 19 | ### Pre-requisites 20 | 21 | 1. Git is installed 22 | 1. SSH client is installed and configured 23 | 1. The `ansible` or `ansible-core` package is installed 24 | 25 | ### Instructions 26 | --- 27 | 28 | #### Provisioning using this git repo 29 | 30 | 1. Clone the repo: 31 | 32 | ``` 33 | git clone https://github.com/oracle-samples/ansible-playbooks.git ol-playbooks 34 | cd ol-playbooks/playbooks/OLAM/single-node 35 | cp group_vars/all.yml.example group_vars/all.yml 36 | cp inventory/hosts.ini.example inventory/hosts.ini 37 | ``` 38 | 39 | 1. Edit the group variables: 40 | 41 | ``` 42 | # Enter the password for postgress awx user 43 | 44 | "awx_pguser_password": password 45 | 46 | # Enter the password for OLAM admin user 47 | 48 | "olam_admin_password": admin 49 | 50 | # NOTE: use these passwords for demo purposes only, use other ansible features to 51 | # protect your passwords such as using ansible-vault to encrypt passwords. 52 | ``` 53 | 54 | This file also contains a variable for setting a proxy if required to reach the internet from the Oracle Linux Automation Manager nodes. 55 | 56 | 1. Edit the inventory: 57 | 58 | ``` 59 | [control_node] 60 | my_olam_node 61 | 62 | [all:vars] 63 | ansible_user=opc 64 | ansible_ssh_private_key_file=~/.ssh/id_rsa 65 | ansible_python_interpreter=/usr/bin/python3 66 | ``` 67 | 68 | The `all:vars` group variables define the user, key file, and python version used when connecting to the different nodes using SSH. 69 | 70 | 1. Test SSH connectivity to all the hosts listed in the inventory: 71 | 72 | ``` 73 | ansible-playbook -i inventory/hosts.ini pingtest.yml 74 | ``` 75 | 76 | 1. Install collection dependencies: 77 | 78 | ``` 79 | ansible-galaxy install -r requirements.yml 80 | ``` 81 | 82 | 1. Run the playbook: 83 | 84 | ``` 85 | 86 | ansible-playbook -i inventory/hosts.ini install.yml 87 | ``` 88 | 89 | ## Resources 90 | 91 | [Oracle Linux Automation Manager Training](https://www.oracle.com/goto/linuxautomationlearning) 92 | [Oracle Linux Training Station](https://www.oracle.com/goto/oltrain) 93 | 94 | 95 | 96 | 97 | 98 | 99 | -------------------------------------------------------------------------------- /playbooks/OLAM/single-node/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | nocows = 1 3 | host_key_checking = false 4 | interpreter_python = auto_silent 5 | -------------------------------------------------------------------------------- /playbooks/OLAM/single-node/group_vars/all.yml.example: -------------------------------------------------------------------------------- 1 | --- 2 | # File: group_vars/all.yml 3 | # Description: group_vars for "all" hosts in the inventory file 4 | 5 | 6 | # Enter the password for postgress awx user 7 | 8 | "awx_pguser_password": password 9 | 10 | # Enter the password for postgress awx user 11 | 12 | "olam_admin_password": admin 13 | 14 | # NOTE: use these passwords for demo purposes only, use other ansible features to 15 | # protect your passwords such as using ansible-vault to encrypt passwords. 16 | 17 | # Set proxy if needed 18 | # Uncomment both the pip_proxy_env and proxy_env sections, and set the proxy host and port accordingly. 19 | 20 | pip_proxy_env: 21 | # http_proxy: 'http://www.example.com:80 22 | # https_proxy: 'https://www.example.com:80' 23 | 24 | proxy_env: 25 | # http_proxy: 'www.example.com:80' 26 | # https_proxy: 'www.example.com:80' 27 | # ftp_proxy: 'www.example.com:80' 28 | # no_proxy: 'localhost,127.0.0.1,example.com' 29 | -------------------------------------------------------------------------------- /playbooks/OLAM/single-node/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Install required packages on hosts 3 | 4 | - name: Install required packages on hosts 5 | hosts: all 6 | collections: 7 | - ansible.posix 8 | - community.general 9 | - community.postgresql 10 | - community.crypto 11 | become: yes 12 | 13 | tasks: 14 | 15 | - name: install Oracle EPEL repository 16 | dnf: 17 | name: oracle-epel-release-el8 18 | state: present 19 | when: 20 | - ansible_facts['distribution_major_version'] == '8' 21 | 22 | - name: install version lock plugin 23 | dnf: 24 | name: python3-dnf-plugin-versionlock 25 | enablerepo: ol8_developer_EPEL 26 | state: present 27 | 28 | - name: version lock python3-click 29 | community.general.yum_versionlock: 30 | state: present 31 | name: python3-click 32 | 33 | - name: install additional packages for ansible 34 | dnf: 35 | name: 36 | - python3-psycopg2 37 | - python3-pyOpenSSL 38 | - python3-pip 39 | enablerepo: ol8_developer_EPEL 40 | state: latest 41 | 42 | - name: install pexpect 43 | pip: 44 | name: pexpect 45 | environment: "{{ pip_proxy_env }}" 46 | 47 | environment: "{{ proxy_env }}" 48 | 49 | # Install and configure OLAM database 50 | 51 | - name: Install and configure OLAM database 52 | hosts: control_node 53 | collections: 54 | - ansible.posix 55 | - community.general 56 | - community.postgresql 57 | - community.crypto 58 | become: yes 59 | 60 | vars: 61 | node_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" 62 | 63 | tasks: 64 | 65 | - name: enable the PostgreSQL 13 module stream 66 | copy: 67 | dest: /etc/dnf/modules.d/postgresql.module 68 | content: | 69 | [postgresql] 70 | name=postgresql 71 | stream=13 72 | profiles= 73 | state=enabled 74 | when: 75 | - ansible_distribution_major_version | int >= 8 76 | 77 | - name: install the database 78 | dnf: 79 | name: postgresql-server 80 | state: present 81 | environment: "{{ proxy_env }}" 82 | 83 | - name: ensure postgresql data directory exists 84 | file: 85 | path: "/var/lib/pgsql/data" 86 | owner: "postgres" 87 | group: "postgres" 88 | state: directory 89 | mode: 0700 90 | 91 | - name: check if postgresql is initialized 92 | stat: 93 | path: "/var/lib/pgsql/data/PG_VERSION" 94 | register: pgdata_dir_version 95 | 96 | - name: initialize postgresql database 97 | command: "postgresql-setup --initdb" 98 | when: not pgdata_dir_version.stat.exists 99 | become_user: postgres 100 | 101 | - name: change database password storage encryption 102 | ansible.builtin.replace: 103 | path: /var/lib/pgsql/data/postgresql.conf 104 | regexp: '^#password_encryption.*' 105 | replace: 'password_encryption = scram-sha-256' 106 | 107 | - name: Enable and start postgresql.service 108 | systemd: 109 | name: postgresql 110 | state: started 111 | enabled: yes 112 | 113 | - name: create awx postgresql user 114 | community.postgresql.postgresql_user: 115 | name: awx 116 | password: "{{ awx_pguser_password }}" 117 | role_attr_flags: NOSUPERUSER 118 | become_user: postgres 119 | 120 | - name: create awx postgresql db 121 | community.postgresql.postgresql_db: 122 | name: awx 123 | owner: awx 124 | become_user: postgres 125 | 126 | - name: update host-based authentication 127 | ansible.builtin.lineinfile: 128 | path: /var/lib/pgsql/data/pg_hba.conf 129 | line: "host all all 0.0.0.0/0 scram-sha-256" 130 | 131 | - name: set the database listening address 132 | ansible.builtin.lineinfile: 133 | path: /var/lib/pgsql/data/postgresql.conf 134 | insertbefore: '^#port = 5432' 135 | line: "listen_addresses = '{{ node_ip }}'" 136 | 137 | - name: set firewall port rules for db 138 | ansible.posix.firewalld: 139 | zone: public 140 | port: "{{ item }}" 141 | permanent: yes 142 | state: enabled 143 | immediate: yes 144 | loop: 145 | - 5432/tcp 146 | 147 | - name: restart the database 148 | systemd: 149 | name: postgresql 150 | state: restarted 151 | 152 | # Configure OLAM 2.2 host 153 | 154 | - name: Configure OLAM 2.2 host 155 | hosts: control_node 156 | collections: 157 | - ansible.posix 158 | - community.general 159 | - community.postgresql 160 | - community.crypto 161 | become: yes 162 | 163 | vars: 164 | node_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" 165 | 166 | tasks: 167 | - name: install Oracle Linux Automation Manager repository 168 | dnf: 169 | name: oraclelinux-automation-manager-release-el8-2.2 170 | state: present 171 | environment: "{{ proxy_env }}" 172 | 173 | - name: disable Oracle Linux Automation Manager 1.0 repository 174 | ini_file: 175 | path: "/etc/yum.repos.d/oraclelinux-automation-manager-ol8.repo" 176 | section: ol8_automation 177 | option: enabled 178 | value: "0" 179 | mode: 0644 180 | 181 | - name: disable Oracle Linux Automation Manager 2.0 repository 182 | ini_file: 183 | path: "/etc/yum.repos.d/oraclelinux-automation-manager-ol8.repo" 184 | section: ol8_automation2 185 | option: enabled 186 | value: "0" 187 | mode: 0644 188 | 189 | - name: enable Oracle Linux Automation Manager 2.2 repository 190 | ini_file: 191 | path: "/etc/yum.repos.d/oraclelinux-automation-manager-ol8.repo" 192 | section: ol8_automation2.2 193 | option: enabled 194 | value: "1" 195 | mode: 0644 196 | 197 | - name: install Oracle Linux Automation Manager 198 | dnf: 199 | name: ol-automation-manager 200 | state: present 201 | environment: "{{ proxy_env }}" 202 | 203 | - name: edit unixsocket in /etc/redis.conf 204 | lineinfile: 205 | path: /etc/redis.conf 206 | regexp: '^unixsocket ' 207 | insertafter: '^# unixsocketperm ' 208 | line: unixsocket /var/run/redis/redis.sock 209 | 210 | - name: edit unixsocketperm in /etc/redis.conf 211 | lineinfile: 212 | path: /etc/redis.conf 213 | regexp: '^unixsocketperm ' 214 | insertafter: '^unixsocket ' 215 | line: unixsocketperm 775 216 | 217 | - name: configure CLUSTER_HOST_ID setting 218 | copy: 219 | dest: /etc/tower/conf.d/olam.py 220 | content: | 221 | CLUSTER_HOST_ID = "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" 222 | owner: awx 223 | group: awx 224 | mode: '0640' 225 | 226 | - name: provision olam ansible container environment 227 | containers.podman.podman_image: 228 | name: container-registry.oracle.com/oracle_linux_automation_manager/olam-ee 229 | become_user: awx 230 | environment: "{{ proxy_env }}" 231 | 232 | - name: Generate an OpenSSL private key with a different size (2048 bits) 233 | community.crypto.openssl_privatekey: 234 | path: /etc/tower/tower.key 235 | size: 2048 236 | 237 | - name: Generate an OpenSSL Certificate Signing Request 238 | community.crypto.openssl_csr: 239 | path: /etc/tower/tower.csr 240 | privatekey_path: /etc/tower/tower.key 241 | 242 | - name: Generate a Self Signed OpenSSL certificate 243 | community.crypto.x509_certificate: 244 | path: /etc/tower/tower.crt 245 | privatekey_path: /etc/tower/tower.key 246 | csr_path: /etc/tower/tower.csr 247 | provider: selfsigned 248 | 249 | - name: remove default server section in nginx configuration 250 | template: 251 | src: ../templates/nginx.conf.tpl 252 | dest: /etc/nginx/nginx.conf 253 | owner: root 254 | group: root 255 | mode: 0644 256 | 257 | - name: set firewall service rules 258 | ansible.posix.firewalld: 259 | zone: public 260 | service: "{{ item }}" 261 | permanent: yes 262 | state: enabled 263 | immediate: yes 264 | loop: 265 | - http 266 | - https 267 | 268 | - name: configure DATABASE settings 269 | copy: 270 | dest: /etc/tower/conf.d/db.py 271 | content: | 272 | DATABASES = { 273 | 'default': { 274 | 'ATOMIC_REQUESTS': True, 275 | 'ENGINE': 'awx.main.db.profiled_pg', 276 | 'NAME': 'awx', 277 | 'USER': 'awx', 278 | 'PASSWORD': '{{ awx_pguser_password }}', 279 | 'HOST': '{{ node_ip }}', 280 | 'PORT': '5432', 281 | } 282 | } 283 | owner: awx 284 | group: awx 285 | mode: '0640' 286 | 287 | - name: check if awx setup 288 | stat: 289 | path: /var/lib/awx/.awx-setup 290 | register: awx_setup 291 | 292 | - name: run awx-manage migrate 293 | shell: | 294 | awx-manage migrate 295 | touch /var/lib/awx/.awx-setup 296 | args: 297 | executable: /bin/bash 298 | become_user: awx 299 | register: awx_migrate 300 | when: 301 | - not awx_setup.stat.exists 302 | changed_when: awx_migrate is not search('already exists') 303 | 304 | - name: create awx superuser 305 | block: 306 | - name: set awx password 307 | expect: 308 | command: "awx-manage createsuperuser --username admin --email admin@example.com" 309 | responses: 310 | 'Password:': "{{ olam_admin_password }}" 311 | 'Password \(again\):': "{{ olam_admin_password }}" 312 | register: awx_password 313 | become_user: awx 314 | ignore_errors: yes 315 | changed_when: awx_password.stdout is not search('is already taken') 316 | when: 317 | - not awx_setup.stat.exists 318 | 319 | - debug: 320 | var: awx_password 321 | when: 322 | - not awx_setup.stat.exists 323 | 324 | # Configure hybrid node type 325 | 326 | - name: Configure hybrid node type 327 | hosts: control_node 328 | become: yes 329 | 330 | tasks: 331 | 332 | - name: check if awx provisioned 333 | stat: 334 | path: /var/lib/awx/.awx-provisioned 335 | register: awx_provision 336 | 337 | - name: generate awx-manage script 338 | copy: 339 | dest: /var/lib/awx/awx_provision.sh 340 | content: | 341 | #!/bin/bash 342 | 343 | awx-manage provision_instance --hostname={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} --node_type=hybrid 344 | awx-manage register_default_execution_environments 345 | awx-manage register_queue --queuename=default --hostnames={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} 346 | awx-manage register_queue --queuename=controlplane --hostnames={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} 347 | awx-manage create_preload_data 348 | touch /var/lib/awx/.awx-provisioned 349 | owner: awx 350 | group: awx 351 | mode: '0700' 352 | 353 | - name: cat script 354 | shell: | 355 | cat /var/lib/awx/awx_provision.sh 356 | args: 357 | executable: /bin/bash 358 | become_user: awx 359 | register: provision_script 360 | 361 | - name: print script contents 362 | debug: 363 | msg: "{{ provision_script }}" 364 | 365 | - name: execute awx-manage provision script 366 | shell: | 367 | /var/lib/awx/awx_provision.sh 368 | args: 369 | executable: /bin/bash 370 | become_user: awx 371 | register: awx_provision_script 372 | changed_when: awx_provision_script is not search('already exists') 373 | 374 | - name: print awx_provision_script 375 | debug: 376 | msg: "{{ awx_provision_script }}" 377 | 378 | - name: create initial configuration in receptor 379 | template: 380 | src: ../templates/receptor.conf.tpl 381 | dest: /etc/receptor/receptor.conf 382 | owner: root 383 | group: root 384 | mode: '0644' 385 | 386 | - name: Enable and start ol-automation service 387 | systemd: 388 | name: "{{ item }}" 389 | state: started 390 | enabled: yes 391 | with_items: 392 | - ol-automation-manager 393 | 394 | 395 | -------------------------------------------------------------------------------- /playbooks/OLAM/single-node/inventory/hosts.ini.example: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 9 | # 10 | # Description: 11 | # Inventory file for the deployment of an Oracle Linux Automation 12 | # Manager 2.0 hybrid node environment (single node). 13 | # 14 | # control_node - node to run control, ececution and db 15 | # 16 | # The "all" group_vars include the user and OpenSSH keypair used to connect to each node and the 17 | # specific python version used once connected. 18 | # 19 | [control_node] 20 | my_olam_node 21 | 22 | [all:vars] 23 | ansible_user=opc 24 | ansible_ssh_private_key_file=~/.ssh/id_rsa 25 | ansible_python_interpreter=/usr/bin/python3 26 | 27 | -------------------------------------------------------------------------------- /playbooks/OLAM/single-node/inventory/local.ini: -------------------------------------------------------------------------------- 1 | [local] 2 | localhost ansible_connection=local 3 | -------------------------------------------------------------------------------- /playbooks/OLAM/single-node/pingtest.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | tasks: 4 | - name: test connection 5 | ping: 6 | -------------------------------------------------------------------------------- /playbooks/OLAM/single-node/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | roles: 3 | 4 | collections: 5 | - ansible.posix 6 | - community.general 7 | - community.postgresql 8 | - community.crypto 9 | - containers.podman 10 | -------------------------------------------------------------------------------- /playbooks/OLAM/single-node/templates/nginx.conf.tpl: -------------------------------------------------------------------------------- 1 | # For more information on configuration, see: 2 | # * Official English Documentation: http://nginx.org/en/docs/ 3 | # * Official Russian Documentation: http://nginx.org/ru/docs/ 4 | 5 | user nginx; 6 | worker_processes auto; 7 | error_log /var/log/nginx/error.log; 8 | pid /run/nginx.pid; 9 | 10 | # Load dynamic modules. See /usr/share/doc/nginx/README.dynamic. 11 | include /usr/share/nginx/modules/*.conf; 12 | 13 | events { 14 | worker_connections 1024; 15 | } 16 | 17 | http { 18 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 19 | '$status $body_bytes_sent "$http_referer" ' 20 | '"$http_user_agent" "$http_x_forwarded_for"'; 21 | 22 | access_log /var/log/nginx/access.log main; 23 | 24 | sendfile on; 25 | tcp_nopush on; 26 | tcp_nodelay on; 27 | keepalive_timeout 65; 28 | types_hash_max_size 2048; 29 | 30 | include /etc/nginx/mime.types; 31 | default_type application/octet-stream; 32 | 33 | # Load modular configuration files from the /etc/nginx/conf.d directory. 34 | # See http://nginx.org/en/docs/ngx_core_module.html#include 35 | # for more information. 36 | include /etc/nginx/conf.d/*.conf; 37 | 38 | # server { 39 | # listen 80 default_server; 40 | # listen [::]:80 default_server; 41 | # server_name _; 42 | # root /usr/share/nginx/html; 43 | # 44 | # # Load configuration files for the default server block. 45 | # include /etc/nginx/default.d/*.conf; 46 | # 47 | # location / { 48 | # } 49 | # 50 | # error_page 404 /404.html; 51 | # location = /40x.html { 52 | # } 53 | # 54 | # error_page 500 502 503 504 /50x.html; 55 | # location = /50x.html { 56 | # } 57 | # } 58 | # 59 | # Settings for a TLS enabled server. 60 | # 61 | # server { 62 | # listen 443 ssl http2 default_server; 63 | # listen [::]:443 ssl http2 default_server; 64 | # server_name _; 65 | # root /usr/share/nginx/html; 66 | # 67 | # ssl_certificate "/etc/pki/nginx/server.crt"; 68 | # ssl_certificate_key "/etc/pki/nginx/private/server.key"; 69 | # ssl_session_cache shared:SSL:1m; 70 | # ssl_session_timeout 10m; 71 | # ssl_ciphers PROFILE=SYSTEM; 72 | # ssl_prefer_server_ciphers on; 73 | # 74 | # # Load configuration files for the default server block. 75 | # include /etc/nginx/default.d/*.conf; 76 | # 77 | # location / { 78 | # } 79 | # 80 | # error_page 404 /404.html; 81 | # location = /40x.html { 82 | # } 83 | # 84 | # error_page 500 502 503 504 /50x.html; 85 | # location = /50x.html { 86 | # } 87 | # } 88 | 89 | } 90 | 91 | -------------------------------------------------------------------------------- /playbooks/OLAM/single-node/templates/receptor.conf.tpl: -------------------------------------------------------------------------------- 1 | --- 2 | - node: 3 | id: {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} 4 | 5 | - log-level: info 6 | 7 | - tcp-listener: 8 | port: 27199 9 | 10 | - control-service: 11 | service: control 12 | filename: /var/run/receptor/receptor.sock 13 | 14 | - work-command: 15 | worktype: local 16 | command: /var/lib/ol-automation-manager/venv/awx/bin/ansible-runner 17 | params: worker 18 | allowruntimeparams: true 19 | # verifysignature: false 20 | -------------------------------------------------------------------------------- /playbooks/OLVM/README.md: -------------------------------------------------------------------------------- 1 | # OLVM - Ansible Playbooks for Oracle Linux Virtualization 2 | 3 | 4 | A collection of Ansible playbooks to use with Oracle Linux Virtualization Manager. Playbooks are tested with Ansible CLI commands on Oracle Linux and with Oracle Linux Automation Manager. 5 | 6 | The playbooks uses modules from the [`ovirt.ovirt` Ansible collection](https://docs.ansible.com/ansible/latest/collections/ovirt/ovirt/index.html) which should be downloaded before using the playbooks. Read the collection documentation page for additional explanation or for extending the functionality of the playbooks. 7 | 8 | ## How to use the playbooks 9 | 10 | ### Ansible CLI 11 | 12 | First step is the configuration of the playbook variables which are mostly configured in ``default_vars.yml`` file. Variables may be used in the command line when not configured in the default variables file. Variables are required to configure your infrastructure settings for the OLVM server, VM configuration and cloud-init. See below table for explanation of the variables. 13 | 14 | For example, the playbooks can be used like this (adjust variables to your infrastructure): 15 | 16 | $ git clone https://github.com/oracle-samples/ansible-playbooks.git ol-playbooks 17 | $ cd ol-playbooks/playbooks/OLVM 18 | $ ansible-galaxy collection install -f ovirt.ovirt 19 | $ ansible-galaxy collection install -f community.general 20 | $ vi default_vars.yml 21 | $ export "OVIRT_URL=https://olvm-engine.demo.local/ovirt-engine/api" 22 | $ export "OVIRT_USERNAME=admin@internal" 23 | $ export "OVIRT_PASSWORD=CHANGE_ME" 24 | 25 | # create a single VM 26 | $ ansible-playbook -i olvm-engine.demo.local, -u opc --key-file ~/.ssh/id_rsa \ 27 | -e "vm_name=vm01" -e "vm_ip_address=192.168.1.101" \ 28 | olvm_create_one_vm.yml 29 | 30 | # create multiple VMs with inventory file, see example hosts.ini file 31 | $ ansible-playbook -i inventory/hosts.ini -u opc --key-file ~/.ssh/id_rsa \ 32 | olvm_create_multiple_vms.yml 33 | 34 | # delete a VM 35 | $ ansible-playbook -i olvm-engine.demo.local, -u opc --key-file ~/.ssh/id_rsa \ 36 | -e "vm_name=vm01" olvm_delete_vm.yml 37 | 38 | # live migrate a VM 39 | $ ansible-playbook -i olvm-engine.demo.local, -u opc --key-file ~/.ssh/id_rsa \ 40 | -e "vm_name=vm01" -e "dst_kvmhost=KVM2" olvm_migrate_vm.yml 41 | 42 | Note 1: using the OLVM server FQDN (in this example olvm-engine.demo.local), appended with a comma, is a quick-way to not use a inventory file. 43 | 44 | Note 2: as it includes clear-text password, for better security you may want to encrypt the ``default_vars.yml`` file with the `ansible-vault` command. When running the playbook, Ansible asks for a secret to decrypt the YAML file. 45 | 46 | $ ansible-vault encrypt default_vars.yml 47 | $ ansible-playbook -i olvm-engine.demo.local, -u opc --key-file ~/.ssh/id_rsa \ 48 | -e "vm_name=oltest" -e "vm_ip_address=192.168.1.100" \ 49 | --ask-vault-pass olvm_create_single_vm.yml 50 | 51 | ### Oracle Linux Automation Manager 52 | 53 | #### Project: 54 | In Oracle Linux Automation Manager you can directly import the playbook repository from GitHub as project. The top-level directory of the repository contains the requirements file to download the `ovirt.ovirt` ansible collection. 55 | 56 | #### Inventory: 57 | Create an inventory and add one host with the details of your OLVM server, this is the target host were you run the playbook. Make sure you have a Machine credential setup for this host so that ansible can SSH to it (run the ping Module for this host). For the VMs you want to create add an inventory group ``[instances]`` and add the VM names including hostvars for ``vm_name`` and ``vm_ip_address``. 58 | 59 | #### Credentials: 60 | Besides the standard SSH credential to access the target host, an additional credential is required to use the ovirt modules in the playbooks. It's based on credential type ``Red Hat Virtualization`` and you need to fill in the OLVM FQDN, username, password and CA File. For example: 61 | 62 | Host (Authentication URL): https://olvm-engine.demo.local/ovirt-engine/api 63 | Username: admin@internal 64 | Password: CHANGE_ME 65 | 66 | #### Templates: 67 | Create a new job template and provide the following information: 68 | 69 | Inventory: Select the inventory containing the OLVM host 70 | Project: Select project from the Github repository 71 | Playbook: Select playbook from Project, for example olvm_create_single_vm.yml 72 | Credentials: Select Machine (SSH) credential and the Virtualization credentials 73 | Variables: Enter the variables as used in the example default_vars.yml file 74 | 75 | ### Secure API connection 76 | 77 | By default the API connection to the OLVM server is insecure, if you want to use a secure API connection then you need to define variable ``olvm_insecure`` and make sure the CA file is available (default location is ``/etc/pki/ovirt-engine/ca.pem``). You may use ``olvm_cafile`` to specify alternative location. 78 | 79 | olvm_insecure: false 80 | olvm_cafile: /home/opc/ca.pem 81 | 82 | The CA file can be downloaded from the main OLVM web portal or directly from the OLVM server, for example: 83 | 84 | $ scp root@olvm-engine.demo.local:/etc/pki/ovirt-engine/ca.pem /home/opc/ca.pem 85 | 86 | ## Variables used in the playbooks 87 | 88 | | Variable | Example value | Description | 89 | | -------- | ------------- | ----------- | 90 | | OVIRT_URL | https://olvm-fqdn/ovirt-engine/api | The API URL of the OLVM server 91 | | OVIRT_USERNAME | admin@internal | The name of the user, same as used for GUI login 92 | | OVIRT_PASSWORD | CHANGE_ME | The password of the user, same as used for GUI login 93 | | olvm_cluster | Default | Name of the cluster, where VM should be created 94 | | olvm_template | OL9U4_x86_64-olvm-b234 |Name of the template, which should be used to create VM 95 | | vm_name | oltest | Name of the VM, will also be used as hostname 96 | | vm_ip_address | 192.168.1.100 | Static IP address of VM, if DHCP is required cloud-init section in playbook should be changed 97 | | vm_ram | 2048MiB | Amount of memory of the VM 98 | | vm_cpu | 4 | Number of virtual CPUs sockets of the VM 99 | | vm_root_passwd | your_secret_root_pw | Root password of the VM, used bu cloud-init 100 | | vm_dns | 192.168.1.3 | DNS server to be used for VM 101 | | vm_dns_domain | demo.local | DNS domainto to be used for VM 102 | | vm_gateway | 192.168.1.1 | Default gateway to be used for VM 103 | | vm_netmask | 255.255.255.0 | Netmask to be used for VM 104 | | vm_timezone | Europe/Amsterdam | Timezone for VM 105 | | vm_user | opc | Standard user for Oracle provided template, otherwise use your own or root user 106 | | vm_user_sshpubkey | "ssh-rsa AAAA...YOUR KEY HERE...hj8= " | SSH Public key for stndard user 107 | | src_vm | oltest | VM used as source VM for cloning operation 108 | | src_vm_snapshot | base_snapshot | Name of snapshot of source VM, for cloning operation 109 | | dst_vm | oltest_cloned | Name of destination VM for cloning operation 110 | | dst_kvmhost | KVM2 | Name (not hostname) of kvm host in OLVM cluster and destination for live-migration 111 | | vm_id | 76c76c8b-a9ad-414e-8274-181a1ba8948b | VM ID for the VM, used for rename of VM 112 | | vm_newname | oltest | New name for VM with vm_id, used for rename of VM 113 | | olvm_insecure | false | By default ``true``, but define ``false`` in case you need secure API connection 114 | | olvm_cafile | /home/opc/ca.pem | Location of CA file in case you wish alternative location 115 | 116 | 117 | ## Deploying Oracle Linux OLVM VM templates 118 | 119 | Two playbooks are provided to deploy new virtual machines in Oracle Linux Virtualization Manager based on a pre-configured template. This may be your own template or templates downloaded from Oracle's website which can be [imported directly in Oracle Linux Virtualization Manager](https://docs.oracle.com/en/virtualization/oracle-linux-virtualization-manager/admin/admin-admin-tasks.html#templates-create): 120 | 121 | * [Free Oracle Linux templates](https://yum.oracle.com/oracle-linux-templates.html) 122 | * [Single Instance and Oracle Real Application Clusters (RAC) templates](https://www.oracle.com/database/technologies/rac/vm-db-templates.html) 123 | 124 | The Oracle provided templates use cloud-init to automate the initial setup of virtual machines and cloud-init variables are included in the playbooks. 125 | 126 | -------------------------------------------------------------------------------- /playbooks/OLVM/default_vars.yml: -------------------------------------------------------------------------------- 1 | # This file containes password for the root user for the VM. If you 2 | # add the passwords in clear-text, you may encrypt this file using the 3 | # ansible-vault command to protect the passwords (alternative is to 4 | # use -e "vm_root_passwd=XXX" in CLI: 5 | # 6 | # $ ansible-vault encrypt default_vars.yml 7 | # 8 | # For Oracle Linux Automation Manager GUI: 9 | # configure password in Vault and use Vault in template 10 | 11 | # Variables used for VM configuration: 12 | 13 | olvm_cluster: Default 14 | olvm_template: OL9U4_x86_64-olvm-b234 15 | vm_ram: 1024MiB 16 | vm_cpu: 2 17 | 18 | # Variables used for cloud-init: 19 | 20 | vm_root_passwd: CHANGE_ME 21 | vm_dns: 192.168.1.3 22 | vm_dns_domain: demo.local 23 | vm_gateway: 192.168.1.1 24 | vm_netmask: 255.255.255.0 25 | vm_timezone: Europe/Amsterdam 26 | vm_user: opc 27 | vm_user_sshpubkey: "ssh-rsa AAAA......hj8= " 28 | -------------------------------------------------------------------------------- /playbooks/OLVM/inventory/hosts-example.ini: -------------------------------------------------------------------------------- 1 | 2 | [olvm] 3 | olvm-engine.demo.local 4 | 5 | [instances] 6 | vm01 vm_name=vm01 vm_ip_address=192.168.1.101 7 | vm02 vm_name=vm02 vm_ip_address=192.168.1.102 8 | vm03 vm_name=vm03 vm_ip_address=192.168.1.103 9 | -------------------------------------------------------------------------------- /playbooks/OLVM/olvm_clone_vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Configure default_vars.yml to setup default infrastrcuture variables 4 | # 5 | # Define following variables as extra-vars: 6 | # --extra-vars "src_vm=XXX" 7 | # --extra-vars "src_vm_snapshot=YYY" 8 | # --extra-vars "dst_vm=ZZZ" 9 | 10 | - hosts: all 11 | become: yes 12 | become_method: sudo 13 | gather_facts: no 14 | 15 | vars_files: 16 | - default_vars.yml 17 | 18 | tasks: 19 | 20 | - name: Login to OLVM manager 21 | ovirt_auth: 22 | url: "{{ lookup('env', 'OVIRT_URL') }}" 23 | username: "{{ lookup('env', 'OVIRT_USERNAME') }}" 24 | password: "{{ lookup('env', 'OVIRT_PASSWORD') }}" 25 | ca_file: "{{ olvm_cafile | default('/etc/pki/ovirt-engine/ca.pem') }}" 26 | insecure: "{{ olvm_insecure | default(true) }}" 27 | tags: 28 | - always 29 | 30 | - name: Clone Virtual Machine from snapshot 31 | ovirt.ovirt.ovirt_vm: 32 | auth: "{{ ovirt_auth }}" 33 | snapshot_vm: "{{ src_vm }}" 34 | snapshot_name: "{{ src_vm_snapshot }}" 35 | name: "{{ dst_vm }}" 36 | state: present 37 | 38 | - name: Cleanup OLVM auth token 39 | ovirt.ovirt.ovirt_auth: 40 | ovirt_auth: "{{ ovirt_auth }}" 41 | state: absent 42 | -------------------------------------------------------------------------------- /playbooks/OLVM/olvm_create_multiple_vms.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: olvm 3 | become: yes 4 | become_method: sudo 5 | gather_facts: yes 6 | 7 | 8 | vars_files: 9 | - default_vars.yml 10 | 11 | tasks: 12 | 13 | - name: Login to OLVM manager 14 | ovirt_auth: 15 | url: "{{ lookup('env', 'OVIRT_URL') }}" 16 | username: "{{ lookup('env', 'OVIRT_USERNAME') }}" 17 | password: "{{ lookup('env', 'OVIRT_PASSWORD') }}" 18 | ca_file: "{{ olvm_cafile | default('/etc/pki/ovirt-engine/ca.pem') }}" 19 | insecure: "{{ olvm_insecure | default(true) }}" 20 | tags: 21 | - always 22 | 23 | - name: Create Virtual Machine 24 | ovirt.ovirt.ovirt_vm: 25 | auth: "{{ ovirt_auth }}" 26 | name: "{{ item }}" 27 | template: "{{ olvm_template }}" 28 | cluster: "{{ olvm_cluster | default('Default') }}" 29 | memory: "{{ hostvars[item]['vm_ram'] | default('1024MiB') }}" 30 | cpu_sockets: "{{ hostvars[item]['vm_cpu'] | default('1') }}" 31 | high_availability: true 32 | state: running 33 | wait: yes 34 | cloud_init: 35 | host_name: "{{ hostvars[item]['vm_name'] + '.' + vm_dns_domain }}" 36 | root_password: "{{ vm_root_passwd }}" 37 | user_name: "{{ vm_user }}" 38 | authorized_ssh_keys: "{{ vm_user_sshpubkey }}" 39 | dns_servers: "{{ vm_dns }}" 40 | dns_search: "{{ vm_dns_domain }}" 41 | nic_name: "{{ vm_nicname | default('eth0') }}" 42 | nic_boot_protocol: static 43 | nic_ip_address: "{{ hostvars[item]['vm_ip_address'] }}" 44 | nic_gateway: "{{ vm_gateway }}" 45 | nic_netmask: "{{ vm_netmask }}" 46 | timezone: "{{ vm_timezone }}" 47 | custom_script: | 48 | runcmd: 49 | - hostnamectl set-hostname {{ hostvars[item]['vm_name'] + '.' + vm_dns_domain }} 50 | - yum -y remove cloud-init 51 | wait: true 52 | loop: "{{ groups['instances'] }}" 53 | 54 | - name: Cleanup OLVM auth token 55 | ovirt.ovirt.ovirt_auth: 56 | ovirt_auth: "{{ ovirt_auth }}" 57 | state: absent 58 | -------------------------------------------------------------------------------- /playbooks/OLVM/olvm_create_one_vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Configure default_vars.yml to setup default infrastrcuture variables 4 | # 5 | # Define following variables as extra-vars: 6 | # --extra-vars "vm_name=oltest" 7 | # --extra-vars "vm_ip_address=192.168.1.100" 8 | 9 | - hosts: olvm 10 | become: yes 11 | become_method: sudo 12 | gather_facts: yes 13 | 14 | vars_files: 15 | - default_vars.yml 16 | 17 | tasks: 18 | 19 | - name: Login to OLVM manager 20 | ovirt_auth: 21 | url: "{{ lookup('env', 'OVIRT_URL') }}" 22 | username: "{{ lookup('env', 'OVIRT_USERNAME') }}" 23 | password: "{{ lookup('env', 'OVIRT_PASSWORD') }}" 24 | ca_file: "{{ olvm_cafile | default('/etc/pki/ovirt-engine/ca.pem') }}" 25 | insecure: "{{ olvm_insecure | default(true) }}" 26 | tags: 27 | - always 28 | 29 | - name: Create and run VM from template 30 | ovirt_vm: 31 | auth: "{{ ovirt_auth }}" 32 | name: "{{ vm_name }}" 33 | template: "{{ olvm_template }}" 34 | cluster: "{{ olvm_cluster }}" 35 | memory: "{{ vm_ram }}" 36 | cpu_sockets: "{{ vm_cpu }}" 37 | high_availability: true 38 | state: running 39 | wait: yes 40 | cloud_init: 41 | host_name: "{{ vm_name + '.' + vm_dns_domain }}" 42 | root_password: "{{ vm_root_passwd }}" 43 | user_name: "{{ vm_user }}" 44 | authorized_ssh_keys: "{{ vm_user_sshpubkey }}" 45 | dns_servers: "{{ vm_dns }}" 46 | dns_search: "{{ vm_dns_domain }}" 47 | nic_name: "{{ vm_nicname | default('eth0') }}" 48 | nic_boot_protocol: static 49 | nic_ip_address: "{{ vm_ip_address }}" 50 | nic_gateway: "{{ vm_gateway }}" 51 | nic_netmask: "{{ vm_netmask }}" 52 | timezone: "{{ vm_timezone }}" 53 | custom_script: | 54 | runcmd: 55 | - hostnamectl set-hostname {{ vm_name + '.' + vm_dns_domain }} 56 | - yum -y remove cloud-init 57 | 58 | - name: Cleanup OLVM auth token 59 | ovirt_auth: 60 | ovirt_auth: "{{ ovirt_auth }}" 61 | state: absent 62 | -------------------------------------------------------------------------------- /playbooks/OLVM/olvm_delete_vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Configure default_vars.yml to setup default infrastrcuture variables 4 | # 5 | # Define following variables as extra-vars: 6 | # --extra-vars "vm_name=oltest" 7 | 8 | - hosts: olvm 9 | become: yes 10 | 11 | vars_files: 12 | - default_vars.yml 13 | 14 | tasks: 15 | 16 | - name: Login to OLVM manager 17 | ovirt_auth: 18 | url: "{{ lookup('env', 'OVIRT_URL') }}" 19 | username: "{{ lookup('env', 'OVIRT_USERNAME') }}" 20 | password: "{{ lookup('env', 'OVIRT_PASSWORD') }}" 21 | ca_file: "{{ olvm_cafile | default('/etc/pki/ovirt-engine/ca.pem') }}" 22 | insecure: "{{ olvm_insecure | default(true) }}" 23 | tags: 24 | - always 25 | 26 | - name: Delete the VM {{ vm_name }} 27 | ovirt_vm: 28 | auth: "{{ ovirt_auth }}" 29 | state: absent 30 | name: "{{ vm_name }}" 31 | 32 | - name: Cleanup OLVM auth token 33 | ovirt_auth: 34 | ovirt_auth: "{{ ovirt_auth }}" 35 | state: absent 36 | -------------------------------------------------------------------------------- /playbooks/OLVM/olvm_list_vminfo.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Configure default_vars.yml to setup default infrastrcuture variables 4 | # 5 | # Define following variables as extra-vars, for example: 6 | # --extra-vars "vm_name=oltest" 7 | 8 | - hosts: olvm 9 | become: yes 10 | 11 | vars_files: 12 | - default_vars.yml 13 | 14 | tasks: 15 | - name: List OLVM VM's {{ vm_name }} by Cluster {{ olvm_cluster }} 16 | ovirt_vm_info: 17 | auth: 18 | url: "{{ lookup('env', 'OVIRT_URL') }}" 19 | username: "{{ lookup('env', 'OVIRT_USERNAME') }}" 20 | password: "{{ lookup('env', 'OVIRT_PASSWORD') }}" 21 | ca_file: "{{ olvm_cafile | default(omit) }}" 22 | insecure: "{{ olvm_insecure | default(true) }}" 23 | pattern: name="{{ vm_name }}" and cluster="{{ olvm_cluster }}" 24 | register: result 25 | 26 | - name: Print out {{ vm_name }} VM information 27 | debug: 28 | msg: "{{ result.ovirt_vms }}" 29 | -------------------------------------------------------------------------------- /playbooks/OLVM/olvm_migrate_vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Configure default_vars.yml to setup default infrastrcuture variables 4 | # 5 | # Define following variables as extra-vars, for example: 6 | # --extra-vars "vm_name=XXX" 7 | # --extra-vars "dst_kvmhost=YYY" 8 | 9 | - hosts: olvm 10 | become: yes 11 | become_method: sudo 12 | gather_facts: no 13 | 14 | vars_files: 15 | - default_vars.yml 16 | 17 | tasks: 18 | 19 | - name: Login to OLVM manager 20 | ovirt_auth: 21 | url: "{{ lookup('env', 'OVIRT_URL') }}" 22 | username: "{{ lookup('env', 'OVIRT_USERNAME') }}" 23 | password: "{{ lookup('env', 'OVIRT_PASSWORD') }}" 24 | ca_file: "{{ olvm_cafile | default('/etc/pki/ovirt-engine/ca.pem') }}" 25 | insecure: "{{ olvm_insecure | default(true) }}" 26 | tags: 27 | - always 28 | 29 | - name: Migrate VM to assigned host 30 | ovirt_vm: 31 | auth: "{{ ovirt_auth }}" 32 | state: running 33 | name: "{{ vm_name }}" 34 | host: "{{ dst_kvmhost }}" 35 | cluster: "{{ olvm_cluster }}" 36 | wait: true 37 | 38 | - name: Cleanup OLVM auth token 39 | ovirt_auth: 40 | ovirt_auth: "{{ ovirt_auth }}" 41 | state: absent 42 | -------------------------------------------------------------------------------- /playbooks/OLVM/olvm_rename_vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Configure default_vars.yml to setup default infrastrcuture variables 4 | # 5 | # Define following variables as extra-vars, for example: 6 | # --extra-vars "vm_id=XXX" 7 | # --extra-vars "vm_newname=YYY" 8 | 9 | - hosts: all 10 | become: yes 11 | become_method: sudo 12 | gather_facts: no 13 | 14 | vars_files: 15 | - default_vars.yml 16 | 17 | tasks: 18 | 19 | - name: Login to OLVM manager 20 | ovirt_auth: 21 | url: "{{ lookup('env', 'OVIRT_URL') }}" 22 | username: "{{ lookup('env', 'OVIRT_USERNAME') }}" 23 | password: "{{ lookup('env', 'OVIRT_PASSWORD') }}" 24 | ca_file: "{{ olvm_cafile | default('/etc/pki/ovirt-engine/ca.pem') }}" 25 | insecure: "{{ olvm_insecure | default(true) }}" 26 | tags: 27 | - always 28 | 29 | - name: Rename VM 30 | ovirt_vm: 31 | auth: "{{ ovirt_auth }}" 32 | id: "{{ vm_id }}" 33 | name: "{{ vm_newname }}" 34 | wait: true 35 | 36 | - name: Cleanup OLVM auth token 37 | ovirt_auth: 38 | ovirt_auth: "{{ ovirt_auth }}" 39 | state: absent 40 | -------------------------------------------------------------------------------- /playbooks/OLVM/ovirt_list_resources.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Oracle Linux Automation Manager 4 | # 5 | # Copyright (c) 2025 Oracle and/or its affiliates. 6 | # Licensed under the Universal Permissive License v 1.0 as shown at 7 | # https://oss.oracle.com/licenses/upl. 8 | # 9 | # Description: Playbook to list VM's by Oracle Linux Virtualization Manager (OLVM) cluster 10 | # 11 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 12 | 13 | 14 | --- 15 | - name: List VMs in OLVM using environment variables 16 | hosts: all 17 | gather_facts: false 18 | tasks: 19 | 20 | - name: Login to OLVM manager 21 | ovirt_auth: 22 | url: "{{ lookup('env', 'OVIRT_URL') }}" 23 | username: "{{ lookup('env', 'OVIRT_USERNAME') }}" 24 | password: "{{ lookup('env', 'OVIRT_PASSWORD') }}" 25 | ca_file: "{{ olvm_cafile | default(omit) }}" 26 | insecure: "{{ olvm_insecure | default(true) }}" 27 | tags: 28 | - always 29 | 30 | - name: List OLVM VM's 31 | ovirt_vm_info: 32 | auth: "{{ ovirt_auth }}" 33 | register: result 34 | 35 | - name: Print out {{ vm_name }} VM information 36 | debug: 37 | msg: "{{ result.ovirt_vms }}" 38 | 39 | - name: Get list of storage domains 40 | ovirt_storage_domain_info: 41 | auth: "{{ ovirt_auth }}" 42 | register: storage_domains_info 43 | 44 | - name: Print storage domain names 45 | debug: 46 | msg: >- 47 | Storage Domains: 48 | {{ storage_domains_info.ovirt_storage_domains | map(attribute='name') | join(', ') }} 49 | 50 | - name: Get list of networks 51 | ovirt.ovirt.ovirt_network_info: 52 | auth: "{{ ovirt_auth }}" 53 | register: networks_info 54 | 55 | - name: Print network names 56 | debug: 57 | msg: >- 58 | Networks: 59 | {{ networks_info.ovirt_networks | map(attribute='name') | join(', ') }} 60 | 61 | - name: Get list of templates 62 | ovirt.ovirt.ovirt_template_info: 63 | auth: "{{ ovirt_auth }}" 64 | register: templates_info 65 | 66 | - name: Print template names 67 | debug: 68 | msg: >- 69 | Templates: 70 | {{ templates_info.ovirt_templates | map(attribute='name') | join(', ') }} 71 | -------------------------------------------------------------------------------- /playbooks/OL_Admin/Readme.txt: -------------------------------------------------------------------------------- 1 | This directory is for playbooks for OL administration. 2 | -------------------------------------------------------------------------------- /playbooks/OL_Admin/adduser.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook to add a user with password to the targeted host. 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | # This playbook uses the following additional variables which should be configured at run time, 13 | # 14 | # username: name of the user to create [oracle] 15 | # user_default_password: password to assign to the new user [oracle] 16 | # enable_sudo: [true|false] 17 | # enable_passwordless_sudo: [true|false] 18 | # ssh_keyfile: location of public key file on Oracle Linux Automation Manager server [~/.ssh/key.pub] 19 | # ssh_keyurl: url containing public key file [http://github.com/username.keys] 20 | # 21 | 22 | --- 23 | - hosts: all 24 | become: yes 25 | 26 | vars: 27 | username: 28 | user_default_password: 29 | enable_sudo: 30 | enable_passwordless_sudo: 31 | ssh_keyfile: 32 | ssh_keyurl: 33 | 34 | tasks: 35 | 36 | - name: add user account with access to sudo 37 | user: 38 | name: "{{ username }}" 39 | password: "{{ user_default_password | password_hash('sha512') }}" 40 | comment: Ansible created user 41 | update_password: on_create 42 | 43 | - name: add user to wheel group for sudo access 44 | user: 45 | name: "{{ username }}" 46 | group: wheel 47 | append: yes 48 | when: enable_sudo|default(false)|bool == true 49 | 50 | - name: set authorized key for user using local pubilc key file 51 | authorized_key: 52 | user: "{{ username }}" 53 | state: present 54 | key: "{{ lookup('file', '{{ ssh_keyfile }}') }}" 55 | when: ssh_keyfile is defined and ssh_keyfile 56 | 57 | - name: set authorized key for user using local pubilc key file 58 | authorized_key: 59 | user: "{{ username }}" 60 | state: present 61 | key: "{{ ssh_keyurl }}" 62 | when: ssh_keyurl is defined and (ssh_keyurl|length > 0) 63 | 64 | - name: set user with passwordless sudo access 65 | lineinfile: 66 | path: /etc/sudoers.d/oracle 67 | regexp: '{{ username }} ALL=' 68 | line: '{{ username }} ALL=(ALL:ALL) NOPASSWD: ALL' 69 | state: present 70 | create: yes 71 | when: enable_passwordless_sudo|default(false)|bool == true 72 | 73 | 74 | -------------------------------------------------------------------------------- /playbooks/OL_Admin/auditd.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2025 Oracle and/or its affiliates. 2 | # Licensed under the Universal Permissive License v 1.0 as shown at 3 | # https://oss.oracle.com/licenses/upl. 4 | # 5 | # Description: Auditd is a userspace system daemon running in the background, generating logs about activities performed on Oracle Linux. This playbook 6 | # check if auditd is installed, running and general administartion commands 7 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 8 | 9 | --- 10 | - name: Auditd Admin Playbook 11 | hosts: all 12 | become: true 13 | tasks: 14 | 15 | - name: Ensure auditd is installed 16 | package: 17 | name: audit 18 | state: present 19 | 20 | - name: Ensure auditd is started 21 | service: 22 | name: auditd 23 | state: started 24 | become: true 25 | 26 | - name: Check current audit rules 27 | command: auditctl -l 28 | register: audit_rules_output 29 | changed_when: false 30 | 31 | - name: Display audit rules 32 | debug: 33 | msg: "{{ audit_rules_output.stdout_lines }}" 34 | 35 | - name: Check the status of the audit system 36 | command: auditctl -s 37 | register: auditctl_status_output 38 | changed_when: false 39 | 40 | - name: Display audit status 41 | debug: 42 | msg: "{{ auditctl_status_output.stdout_lines }}" 43 | 44 | - name: Searching failed user logins 45 | command: ausearch -m USER_LOGIN -sv no 46 | register: ausearch_status_output 47 | changed_when: false 48 | 49 | - name: Display failed logins if any 50 | debug: 51 | msg: "{{ ausearch_status_output.stdout_lines }}" 52 | 53 | - name: Displaying report 54 | command: aureport --summary 55 | register: aureport_output 56 | changed_when: false 57 | 58 | - name: Displaying report 59 | debug: 60 | msg: "{{ aureport_output.stdout_lines }}" 61 | -------------------------------------------------------------------------------- /playbooks/OL_Admin/collections/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: community.general 4 | - name: ansible.posix 5 | -------------------------------------------------------------------------------- /playbooks/OL_Admin/hello-world.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 Oracle and/or its affiliates. 2 | # Licensed under the Universal Permissive License v 1.0 as shown at 3 | # https://oss.oracle.com/licenses/upl. 4 | # 5 | # Description: Playbook that prints a hello_world message. 6 | # 7 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 8 | 9 | --- 10 | - name: hello world sample 11 | hosts: all 12 | tasks: 13 | - name: print welcome message 14 | debug: 15 | msg: "Hello! Welcome to Oracle Linux Automation Manager." 16 | 17 | -------------------------------------------------------------------------------- /playbooks/OL_Admin/iptables-httpd.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Pplaybook to install & start HTTPD and configure an iptables based firewall 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | 13 | --- 14 | - hosts: all 15 | tasks: 16 | - name: Install httpd packages 17 | yum: 18 | name: httpd 19 | state: present 20 | - name: ensure httpd is running 21 | service: 22 | name: httpd 23 | state: started 24 | enabled: true 25 | - name: Insert a rule for port 80 26 | ansible.builtin.iptables: 27 | chain: INPUT 28 | protocol: tcp 29 | destination_port: 80 30 | jump: ACCEPT 31 | action: insert 32 | -------------------------------------------------------------------------------- /playbooks/OL_Admin/update_ol.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022 Oracle and/or its affiliates. 2 | # Licensed under the Universal Permissive License v 1.0 as shown at 3 | # https://oss.oracle.com/licenses/upl. 4 | # 5 | # Description: Playbook to and Oracle Linux targeted host. 6 | # 7 | # Options: 8 | # use_proxy: Allows the playbook to use a proxy if required. 9 | # proxy: Set the proxy by replacing proxy_host with the FQDN of the proxy server, and proxy_port with the proxy server port. 10 | # no_proxy: Set list of hosts to ignore proxy setting (comma delimited) by replacing no_proxy_list. 11 | # 12 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 13 | 14 | --- 15 | - hosts: all 16 | become: yes 17 | vars: 18 | use_proxy: no 19 | proxy: '{{ proxy_host|default(omit) }}:{{ proxy_port|default(omit) }}' 20 | proxy_env: 21 | http_proxy: '{{ proxy }}' 22 | https_proxy: '{{ proxy }}' 23 | ftp_proxy: '{{ proxy }}' 24 | no_proxy: '{{ no_proxy_list | default(omit) }}' 25 | 26 | tasks: 27 | - name: update all packages on all Oracle Linux 8 systems 28 | ansible.builtin.dnf: 29 | name: "*" 30 | state: latest 31 | when: ansible_distribution == 'OracleLinux' and (ansible_facts['distribution_version'] is version('8', '>=')) 32 | 33 | - name: update all packages on all Oracle Linux 7 systems 34 | ansible.builtin.yum: 35 | name: "*" 36 | state: latest 37 | when: ansible_distribution == 'OracleLinux' and (ansible_distribution_version is version ('8', '<')) 38 | 39 | - name: check if reboot required 40 | command: /usr/bin/needs-restarting -r 41 | register: reboot_required 42 | ignore_errors: yes 43 | changed_when: false 44 | failed_when: reboot_required.rc == 2 45 | when: ansible_distribution == 'OracleLinux' 46 | 47 | - debug: 48 | var: reboot_required 49 | 50 | - name: reboot (if needed) to apply latest kernel and updates 51 | reboot: 52 | when: ansible_distribution == 'OracleLinux' and reboot_required.rc == 1 53 | 54 | environment: "{{ proxy_env | default(omit) }}" 55 | -------------------------------------------------------------------------------- /playbooks/OL_Admin/vnc_install_configure.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Oracle Linux Automation Manager 3 | # 4 | # Copyright (c) 2022 Oracle and/or its affiliates. 5 | # Licensed under the Universal Permissive License v 1.0 as shown at 6 | # https://oss.oracle.com/licenses/upl. 7 | # 8 | # Description: Playbook to install, confgiure & start VNC Server for OL7 and OL8 9 | # 10 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 11 | # 12 | # This playbook uses the following additional variables which should be configured at run time for the template, these are examples values used 13 | # in the lab: 14 | # vnc_port: 1 15 | # template_path: /var/lib/awx/projects/templates 16 | # username: olam 17 | # usergroup: olam 18 | # vnc_default_password: Welcome1 19 | # vnc_geometry: 1280x1024 20 | # 21 | # Also, a template file is needed which exists in the templates directory which is linked here: templates/vncserver-service.j2 22 | # Within the lab OLAM server I placed this file in: /var/lib/awx/projects/templates with awx:awx permissions for both file and directory 23 | 24 | --- 25 | - hosts: all 26 | become: yes 27 | 28 | tasks: 29 | - name: Install the 'Server with GUI' package group on Oracle Linux 8 or higher 30 | dnf: 31 | name: "@Server with GUI" 32 | state: present 33 | when: ansible_distribution == 'OracleLinux' and (ansible_facts['distribution_version'] is version('8', '>=')) 34 | 35 | - name: Install the 'Server with GUI' package group on Oracle Linux 7 36 | yum: 37 | name: "@^Server with GUI" 38 | state: present 39 | when: ansible_distribution == 'OracleLinux' and (ansible_facts['distribution_version'] is version('8', '<')) 40 | 41 | - name: Install the vnc package for Oracle Linux 8 or higher 42 | dnf: 43 | name: 44 | - tigervnc-server 45 | - tigervnc-server-module 46 | state: latest 47 | when: ansible_distribution == 'OracleLinux' and (ansible_facts['distribution_version'] is version('8', '>=')) 48 | 49 | - name: Install the vnc package for Oracle Linux 7 50 | yum: 51 | name: 52 | - tigervnc-server 53 | state: latest 54 | when: ansible_distribution == 'OracleLinux' and (ansible_facts['distribution_version'] is version('8', '<')) 55 | 56 | - name: Configure firewalld for the VNC ports 57 | firewalld: 58 | service: vnc-server 59 | permanent: yes 60 | state: enabled 61 | 62 | - name: Reload the firewall 63 | systemd: 64 | name: firewalld 65 | state: reloaded 66 | 67 | - name: Copy systemd template OL8 68 | copy: 69 | src: /usr/lib/systemd/system/vncserver@.service 70 | dest: /etc/systemd/system/vncserver@:{{ vnc_port }}.service 71 | remote_src: yes 72 | when: ansible_distribution == 'OracleLinux' and (ansible_facts['distribution_version'] is version('8', '>=')) 73 | 74 | - name: Copy systemd template OL7 including vnc port:{{ vnc_port }} and {{ vnc_geometry }} 75 | template: 76 | src: "{{ template_path }}/vncserver-service.j2" 77 | dest: /etc/systemd/system/vncserver@:{{ vnc_port }}.service 78 | owner: root 79 | group: root 80 | mode: '0644' 81 | when: ansible_distribution == 'OracleLinux' and (ansible_facts['distribution_version'] is version('8', '<')) 82 | 83 | - name: Assign {{ username }} to vnc port OL8 84 | lineinfile: 85 | path: /etc/tigervnc/vncserver.users 86 | line: ":{{ vnc_port }}={{ username }}" 87 | when: ansible_distribution == 'OracleLinux' and (ansible_facts['distribution_version'] is version('8', '>=')) 88 | 89 | - name: Set vnc geometry and sessions OL8 90 | blockinfile: 91 | path: /etc/tigervnc/vncserver-config-defaults 92 | block: | 93 | session=gnome 94 | geometry={{ vnc_geometry }} 95 | when: ansible_distribution == 'OracleLinux' and (ansible_facts['distribution_version'] is version('8', '>=')) 96 | 97 | - name: Create .vnc directory for user 98 | file: 99 | path: /home/{{ username }}/.vnc 100 | state: directory 101 | mode: 0700 102 | owner: "{{ username }}" 103 | group: "{{ username }}" 104 | 105 | - name: Generate vnc password for the remote user 106 | shell: | 107 | set -o pipefail 108 | echo {{ vnc_default_password }} | vncpasswd -f > /home/{{ username }}/.vnc/passwd 109 | args: 110 | chdir: "/home/{{ username }}/.vnc" 111 | creates: "/home/{{ username }}/.vnc/passwd" 112 | executable: /bin/bash 113 | 114 | - name: Change the permission to 600 for .vnc/passwd file 115 | file: 116 | path: "/home/{{ username }}/.vnc/passwd" 117 | owner: "{{ username }}" 118 | group: "{{ usergroup }}" 119 | mode: '0600' 120 | 121 | - name: Start and enable the vnc service 122 | systemd: 123 | state: started 124 | daemon_reload: yes 125 | name: vncserver@:{{ vnc_port }}.service 126 | enabled: true 127 | -------------------------------------------------------------------------------- /playbooks/STIG/README.md: -------------------------------------------------------------------------------- 1 | ## Introduction 2 | Security Technical Implementation Guide, which is released by The Defense Information Systems Agency (DISA) is a document that provides guidance on configuring a system to meet cybersecurity requirements for deployment within the Department of Defense (DoD) IT network systems. 3 | The published STIG guidelines have been included in the scap-security-guide package available under ol8_appstream channel, which can be used with the openscap tool for evaluating the compliance of an Oracle Linux installation. 4 | Individual rules and the remediation details are well documented in [ssg-ol8-guide-standard](https://static.open-scap.org/ssg-guides/ssg-ol8-guide-standard.html). 5 | RPM Package scap-security-guide provides the remediation playbooks for different profiles including STIG which is located under /usr/share/scap-security-guide/ansible/ . 6 | 7 | ### Details about the playbooks 8 | - openscap.yml - Installs the openscap rpm. Runs the scan against the STIG profile and saves the report at /var/www/html/ssg-results.html 9 | - ol8-playbook-stig.yml - Applies the remediation for STIG profile to the specified set of hosts. 10 | 11 | ## Considerations 12 | As part of the remediation activtity, there could be many modifications made to the target host. Hence, kindly review each task before proceeding. 13 | Here are the examples, to list a few: 14 | 15 | - DISA-STIG-OL08-00-010550 - Disables SSH root login. SSH session to the servers might give an "Access Denied" error. 16 | - DISA-STIG-OL08-00-010020 - Enables FIPS. Ensure to reboot the host for the FIPS to be enabled. Else, though the remediation is applied, the rule will still be marked as failed. 17 | - DISA-STIG-OL08-00-010670 - Disables kdump services. 18 | 19 | 20 | -------------------------------------------------------------------------------- /playbooks/STIG/collections/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: oracle.oci 4 | - name: community.general 5 | - name: ansible.posix 6 | -------------------------------------------------------------------------------- /playbooks/STIG/openscap.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | 3 | tasks: 4 | 5 | - name: install openscap scanner 6 | package: 7 | name: openscap 8 | state: latest 9 | with_items: 10 | - openscap-scanner 11 | - scap-security-guide 12 | - openscap-utils 13 | 14 | - name: run openscap 15 | command: oscap xccdf eval \ 16 | --profile stig \ 17 | --results /tmp/ssg.xml 18 | --report /var/www/html/ssg-results.html \ 19 | --cpe /usr/share/xml/scap/ssg/content/ssg-ol8-cpe-dictionary.xml \ 20 | /usr/share/xml/scap/ssg/content/ssg-ol8-xccdf.xml \ 21 | ignore_errors: True 22 | -------------------------------------------------------------------------------- /templates/vncserver-service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Remote desktop service (VNC) 3 | After=syslog.target network.target 4 | 5 | [Service] 6 | Type=forking 7 | WorkingDirectory=/home/{{ username }} 8 | User={{ username }} 9 | Group={{ username }} 10 | 11 | # Clean any existing files in /tmp/.X11-unix environment 12 | ExecStartPre=/bin/sh -c '/usr/bin/vncserver -kill %i > /dev/null 2>&1 || :' 13 | ExecStart=/usr/bin/vncserver %i -geometry {{ vnc_geometry }} 14 | PIDFile=/home/{{ username }}/.vnc/%H%i.pid 15 | ExecStop=/usr/bin/vncserver -kill %i 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | --------------------------------------------------------------------------------