├── .gitignore ├── LICENSE ├── README.md ├── ansible.cfg ├── blueprints ├── bare_cluster.bp.j2 └── bare_cluster.ct.j2 ├── filter_plugins ├── hashpass.py ├── ifelse.py ├── list2csv.py └── uniqekeyvalues.py ├── group_vars ├── all └── ambariserver ├── inventories ├── example_inventory └── vagrant │ ├── Vagrantfile │ ├── boxes.json │ ├── inventory.cfg │ ├── inventory.py │ ├── pb_vagrant_provision.yml │ ├── roles │ └── hosts_file │ │ └── tasks │ │ └── main.yml │ └── vagrant.json ├── library ├── blueprints └── kerberise ├── pb_ambari_blueprint_cluster.yml ├── pb_ambari_kerberise_cluster.yml ├── pb_ambari_setup.yml ├── pb_directory_services.yml ├── pb_provision_cluster.yml ├── pb_provision_env.yml ├── pb_ssl_certificates.yml ├── repo_files ├── ambari-2.1.2.centos.6.repo ├── ambari-2.1.2.centos.7.repo ├── ambari-2.2.0.centos.6.repo ├── ambari-2.2.0.centos.7.repo ├── ambari-2.2.1.centos.6.repo ├── ambari-2.2.2.2-2.centos.6.repo ├── ambari-2.2.2.centos.6.repo ├── ambari-2.4.0.1.centos.6.repo └── ambari-2.4.0.1.centos.7.repo ├── roles ├── ambariagent │ └── tasks │ │ └── main.yml ├── ambariblueprint │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── ambariserver │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── ambari-server.service │ └── tasks │ │ ├── ambari_ddl.yml │ │ ├── ambari_setup_start.yml │ │ ├── main.yml │ │ └── mysql_grants.yml ├── create_kerberos_users │ └── tasks │ │ └── main.yml ├── etc_krb5 │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── krb5.conf.j2 ├── hadoop_os_configuration │ ├── files │ │ ├── 90-nproc.conf │ │ ├── limits.conf │ │ └── security_limits.conf │ └── tasks │ │ ├── config_files.yml │ │ ├── main.yml │ │ └── page_files.yml ├── haveged │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── install_haveged.yml │ │ └── main.yml ├── hosts_file │ └── tasks │ │ └── main.yml ├── kdcmaster │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── kdb5_create.exp.j2 │ │ └── kdc.conf.j2 ├── kerberisecluster │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── desired_config.j2 ├── mysql_server │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── openldap_server │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── dn.ldif.j2 │ │ ├── groups.ldif.j2 │ │ ├── ou.ldif.j2 │ │ └── users.ldif.j2 ├── oracle-java │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── main.yml │ │ └── set_vars.yml ├── ssl_certs │ └── tasks │ │ ├── gen_certs.yml │ │ └── main.yml └── sssd │ └── tasks │ └── main.yml └── vars ├── cert_vars.yml ├── kdc_config ├── ldap_config └── users.yml /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant/ 2 | *.pyc 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2015 Alex Bush 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ansible-hadoop-asap 2 | > “ASAP. Whatever that means. It must mean, 'Act swiftly awesome pachyderm!'” 3 | 4 | Ansible Playbooks to install Hortonworks Data Platform (HDP) using Ambari Blueprints. Currently the Playbooks install an MIT KDC and resulting cluster is fully kerberised. 5 | 6 | This has been tested against CentOS6 and CentOS7 in Vagrant. 7 | 8 | ##### Updates 9 | * Preinstalled HDP boxes now available for Virtualbox (should considerably speed up cluster creation time). 10 | * Kerberos now deployed optionally during blueprint build. Specify it as a service in: [group_var/all](group_vars/all) (Only available Ambari 2.2.1+ due to [AMBARI-14409](https://issues.apache.org/jira/browse/AMBARI-14409), use the old deployment method if a lower Ambari version is needed) 11 | * Ranger now deployed as an optional service 12 | * Ranger SSL now optional. Check ssl_services in [group_var/all](group_vars/all) 13 | * Namenode HA now optional by specifying namenode in ha_services in [group_var/all](group_vars/all) 14 | 15 | ## Getting started 16 | The inventory file is expected in be in a format similar to the example: [example](inventories/example_inventory). 17 | 18 | All hosts must be in a _clustername_ group, in the appropriate _services_ group and a _clustername_service_ group. 19 | 20 | Alternatively, you can use one of the instance creation methods below and use an included dynamic inventory (e.g. [inventory.py](inventories/vagrant/inventory.py)). 21 | 22 | #### Creating instances 23 | 24 | ###### Vagrant 25 | To use these scripts with Vagrant, change directory into [inventories/vagrant](inventories/vagrant), modify the [vagrant.json](inventories/vagrant/vagrant.json) file to your liking, export both OS and HDP version and run `vagrant up`. Make sure the hostnames are resolvable from the ansible host (hint: place entries in /etc/hosts). 26 | 27 | ``` 28 | cd inventories/vagrant 29 | export OS_VERSION=centos6 30 | export HDP_VERSION=HDP-2.3.4.0 31 | vagrant up 32 | ``` 33 | 34 | You can check and edit the available boxes in [boxes.json](inventories/vagrant/boxes.json). 35 | 36 | #### Configuration 37 | Most configuration is done through the [group_vars](group_vars) files. 38 | 39 | For now, users can be configured in the [vars/users.yml](vars/users.yml) file and KDC credentials can be configured in the [vars/kdc_config](vars/kdc_config) file. 40 | 41 | Services can be configured in the [group_vars/all](group_vars/all) file. Currently only Kerberos, Ranger, Spark, HBase, Oozie, Falcon, Storm and Kafka are optional services. All others are mandatory. 42 | 43 | Ranger admin and admin<->plugins can now be optionally SSL'd by using setting ssl_services: [group_vars/all](group_vars/all) 44 | 45 | #### Running the Playbook 46 | ``` 47 | ansible-playbook -i inventories/vagrant/inventory.py pb_provision_cluster.yml -e 'cluster_name=vagrantcluster' 48 | ``` 49 | 50 | #### Notes 51 | Currently, users are managed in an OpenLDAP server and their credentials are stored in an MIT KDC. Unix authentication is done by SSSD using KDC5. 52 | 53 | ## TO DO 54 | - [ ] Build blueprints dynamically (j2) depending on services requested 55 | - [ ] FreeIPA support (alternative to MIT KDC) 56 | - [x] OpenLDAP when using KDC (no local users) 57 | - [x] Ranger 58 | - [ ] RangerKMS, Knox and other advanced services support 59 | - [ ] Pull implementations in [library](library/) modules to shared Ambari python class 60 | - [x] CentOS 7 support 61 | - [ ] AWS support 62 | - [ ] OpenStack support 63 | - [ ] Azure support 64 | - [x] NTP 65 | - [ ] Make use of config_recommendation_strategy For Ambari version >= 2.2.0 66 | 67 | 68 | ## [License](LICENSE) 69 | 70 | Copyright (c) 2016 Alex Bush. 71 | Licensed under the [Apache License](LICENSE). 72 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | 4 | [ssh_connection] 5 | scp_if_ssh = True 6 | -------------------------------------------------------------------------------- /blueprints/bare_cluster.bp.j2: -------------------------------------------------------------------------------- 1 | { 2 | "configurations": [ 3 | { 4 | "hive-env": { 5 | "properties": { 6 | {% if 'KERBEROS' in blueprint_services %} 7 | "hive_security_authorization" : "Ranger", 8 | {% endif %} 9 | "hive_database": "Existing MySQL Database" 10 | } 11 | } 12 | }, 13 | {% if 'KERBEROS' in blueprint_services %} 14 | { 15 | "kerberos-env": { 16 | "properties_attributes" : { }, 17 | "properties" : { 18 | "realm" : "{{ krb_realm }}", 19 | "kdc_type" : "mit-kdc", 20 | "kdc_host" : "{{ groups[cluster_name+'_directoryservices'][0] }}", 21 | "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 22 | "ldap_url" : "", 23 | "container_dn" : "", 24 | "admin_server_host" : "{{ groups[cluster_name+'_directoryservices'][0] }}" 25 | } 26 | } 27 | }, 28 | { 29 | "krb5-conf": { 30 | "properties_attributes" : { }, 31 | "properties" : { 32 | "domains" : "", 33 | "manage_krb5_conf" : "false" 34 | } 35 | } 36 | }, 37 | {% endif %} 38 | {% if 'STORM' in blueprint_services %} 39 | { 40 | "storm-site" : { 41 | "properties" : { 42 | {% if 'STORMUI' in ssl_services %} 43 | "ui.https.port" : "8740", 44 | "ui.https.keystore.type" : "jks", 45 | "ui.https.keystore.path" : "{{ cert_dir }}/{{ groups[cluster_name+'_mn02'][0] }}.jks", 46 | "ui.https.keystore.password" : "{{ keystore_default_pass }}", 47 | "ui.https.key.password" : "{{ key_default_pass }}", 48 | "ui.https.truststore.path" : "{{ cert_dir }}/clusterTrustStore.jks", 49 | "ui.https.truststore.password" : "{{ keystore_default_pass }}", 50 | "ui.https.truststore.type" : "jks", 51 | {% endif %} 52 | {% if 'RANGER' in blueprint_services %} 53 | "nimbus.authorizer" : "org.apache.ranger.authorization.storm.authorizer.RangerStormAuthorizer" 54 | {% else %} 55 | "nimbus.authorizer" : "backtype.storm.security.auth.authorizer.SimpleACLAuthorizer" 56 | {% endif %} 57 | } 58 | } 59 | }, 60 | {% endif %} 61 | {% if 'RANGER' in blueprint_services %} 62 | { 63 | "admin-properties" : { 64 | "properties_attributes" : { }, 65 | "properties" : { 66 | "DB_FLAVOR" : "MYSQL", 67 | "audit_db_name" : "{{ mysql_rangeraudit_database }}", 68 | "db_name" : "{{ mysql_ranger_database }}", 69 | "audit_db_user" : "{{ mysql_rangeraudit_user }}", 70 | "SQL_CONNECTOR_JAR" : "/usr/share/java/mysql-connector-java.jar", 71 | "db_user" : "{{ mysql_ranger_user }}", 72 | {% if 'RANGER' in ssl_services %} 73 | "policymgr_external_url" : "https://{{ groups[cluster_name+'_mn03'][0] }}:6182", 74 | {% else %} 75 | "policymgr_external_url" : "http://{{ groups[cluster_name+'_mn03'][0] }}:6080", 76 | {% endif %} 77 | "db_host" : "{{ groups[cluster_name+'_mn03'][0] }}:3306", 78 | "db_root_user" : "{{ mysql_root_user }}", 79 | "db_root_password" : "{{ mysql_root_password }}", 80 | "db_password" : "{{ mysql_ranger_password }}", 81 | "audit_db_password" : "{{ mysql_rangeraudit_password }}" 82 | } 83 | } 84 | }, 85 | { 86 | "ranger-admin-site" : { 87 | "properties_attributes" : { }, 88 | "properties" : { 89 | {% if 'RANGER' in ssl_services %} 90 | "ranger.service.https.attrib.ssl.enabled" : "true", 91 | "ranger.service.http.enabled" : "false", 92 | "ranger.https.attrib.keystore.file" : "{{ cert_dir }}/{{ groups[cluster_name+'_mn03'][0] }}.jks", 93 | "ranger.service.https.attrib.keystore.pass" : "{{ keystore_default_pass }}", 94 | "ranger.service.https.attrib.clientAuth" : "want", 95 | "ranger.service.https.attrib.keystore.keyalias" : "{{ groups[cluster_name+'_mn03'][0] }}", 96 | {% endif %} 97 | "ranger.jpa.jdbc.url" : "jdbc:mysql://{{ groups[cluster_name+'_mysql'][0] }}:3306/{{ mysql_ranger_database }}", 98 | "ranger.jpa.audit.jdbc.url" : "jdbc:mysql://{{ groups[cluster_name+'_mysql'][0] }}:3306/{{ mysql_rangeraudit_database }}" 99 | } 100 | } 101 | }, 102 | { 103 | "ranger-ugsync-site" : { 104 | "properties_attributes" : { }, 105 | "properties" : { 106 | {% if 'RANGER' in ssl_services %} 107 | "ranger.usersync.truststore.file" : "{{ cert_dir }}/clusterTrustStore.jks", 108 | "ranger.usersync.truststore.password" : "{{ truststore_default_pass }}" 109 | {% endif %} 110 | } 111 | } 112 | }, 113 | { 114 | "ranger-env" : { 115 | "properties_attributes" : { }, 116 | "properties" : { 117 | "ranger_user" : "ranger", 118 | "ranger-hdfs-plugin-enabled" : "Yes", 119 | "ranger-yarn-plugin-enabled" : "No", 120 | {% if 'KAFKA' in blueprint_services %} 121 | "ranger-kafka-plugin-enabled" : "Yes", 122 | {% endif %} 123 | {% if 'HBASE' in blueprint_services %} 124 | "ranger-hbase-plugin-enabled" : "Yes", 125 | {% endif %} 126 | "ranger-hive-plugin-enabled" : "Yes", 127 | {% if 'STORM' in blueprint_services %} 128 | "ranger-storm-plugin-enabled" : "Yes", 129 | {% endif %} 130 | "xasecure.audit.destination.solr" : "false", 131 | "xasecure.audit.destination.db" : "true", 132 | "ranger_group" : "ranger", 133 | "ranger_admin_username" : "amb_ranger_admin", 134 | "ranger_admin_password" : "amb_ranger_admin", 135 | "admin_username" : "admin" 136 | } 137 | } 138 | }, 139 | { 140 | "ranger-hdfs-plugin-properties" : { 141 | "properties_attributes" : { }, 142 | "properties" : { 143 | "REPOSITORY_CONFIG_USERNAME" : "hadoop", 144 | "ranger-hdfs-plugin-enabled" : "Yes", 145 | {% if 'RANGER' in ssl_services %} 146 | "common.name.for.certificate" : "rangerHdfsAgent", 147 | {% else %} 148 | "common.name.for.certificate" : "", 149 | {% endif %} 150 | "policy_user" : "ambari-qa", 151 | "hadoop.rpc.protection" : "" 152 | } 153 | } 154 | }, 155 | {% if 'RANGER' in ssl_services %} 156 | { 157 | "ranger-hdfs-policymgr-ssl" : { 158 | "properties_attributes" : { }, 159 | "properties" : { 160 | "xasecure.policymgr.clientssl.keystore" : "{{ cert_dir }}/rangerHdfsAgent.jks", 161 | "xasecure.policymgr.clientssl.keystore.password" : "{{ keystore_default_pass }}", 162 | "xasecure.policymgr.clientssl.truststore" : "{{ cert_dir }}/clusterTrustStore.jks", 163 | "xasecure.policymgr.clientssl.truststore.password" : "{{ truststore_default_pass }}" 164 | } 165 | } 166 | }, 167 | {% endif %} 168 | { 169 | "ranger-hive-plugin-properties" : { 170 | "properties_attributes" : { }, 171 | "properties" : { 172 | "REPOSITORY_CONFIG_USERNAME" : "hive", 173 | "ranger-hive-plugin-enabled" : "Yes", 174 | {% if 'RANGER' in ssl_services %} 175 | "common.name.for.certificate" : "rangerHiveAgent", 176 | {% else %} 177 | "common.name.for.certificate" : "", 178 | {% endif %} 179 | "policy_user" : "ambari-qa", 180 | "hadoop.rpc.protection" : "" 181 | } 182 | } 183 | }, 184 | {% if 'RANGER' in ssl_services %} 185 | { 186 | "ranger-hive-policymgr-ssl" : { 187 | "properties_attributes" : { }, 188 | "properties" : { 189 | "xasecure.policymgr.clientssl.keystore" : "{{ cert_dir }}/rangerHiveAgent.jks", 190 | "xasecure.policymgr.clientssl.keystore.password" : "{{ keystore_default_pass }}", 191 | "xasecure.policymgr.clientssl.truststore" : "{{ cert_dir }}/clusterTrustStore.jks", 192 | "xasecure.policymgr.clientssl.truststore.password" : "{{ truststore_default_pass }}" 193 | } 194 | } 195 | }, 196 | {% endif %} 197 | {% if 'HBASE' in blueprint_services %} 198 | { 199 | "ranger-hbase-plugin-properties" : { 200 | "properties_attributes" : { }, 201 | "properties" : { 202 | "REPOSITORY_CONFIG_USERNAME" : "hbase", 203 | "ranger-hbase-plugin-enabled" : "Yes", 204 | {% if 'RANGER' in ssl_services %} 205 | "common.name.for.certificate" : "rangerHbaseAgent", 206 | {% else %} 207 | "common.name.for.certificate" : "", 208 | {% endif %} 209 | "policy_user" : "ambari-qa", 210 | "hadoop.rpc.protection" : "" 211 | } 212 | } 213 | }, 214 | {% if 'RANGER' in ssl_services %} 215 | { 216 | "ranger-hbase-policymgr-ssl" : { 217 | "properties_attributes" : { }, 218 | "properties" : { 219 | "xasecure.policymgr.clientssl.keystore" : "{{ cert_dir }}/rangerHbaseAgent.jks", 220 | "xasecure.policymgr.clientssl.keystore.password" : "{{ keystore_default_pass }}", 221 | "xasecure.policymgr.clientssl.truststore" : "{{ cert_dir }}/clusterTrustStore.jks", 222 | "xasecure.policymgr.clientssl.truststore.password" : "{{ truststore_default_pass }}" 223 | } 224 | } 225 | }, 226 | {% endif %} 227 | {% endif %} 228 | {% if 'KAFKA' in blueprint_services %} 229 | { 230 | "ranger-kafka-plugin-properties" : { 231 | "properties_attributes" : { }, 232 | "properties" : { 233 | "REPOSITORY_CONFIG_USERNAME" : "kafka", 234 | "ranger-kafka-plugin-enabled" : "Yes", 235 | {% if 'RANGER' in ssl_services %} 236 | "common.name.for.certificate" : "rangerKafkaAgent", 237 | {% else %} 238 | "common.name.for.certificate" : "", 239 | {% endif %} 240 | "policy_user" : "ambari-qa", 241 | "hadoop.rpc.protection" : "" 242 | } 243 | } 244 | }, 245 | {% if 'RANGER' in ssl_services %} 246 | { 247 | "ranger-kafka-policymgr-ssl" : { 248 | "properties_attributes" : { }, 249 | "properties" : { 250 | "xasecure.policymgr.clientssl.keystore" : "{{ cert_dir }}/rangerKafkaAgent.jks", 251 | "xasecure.policymgr.clientssl.keystore.password" : "{{ keystore_default_pass }}", 252 | "xasecure.policymgr.clientssl.truststore" : "{{ cert_dir }}/clusterTrustStore.jks", 253 | "xasecure.policymgr.clientssl.truststore.password" : "{{ truststore_default_pass }}" 254 | } 255 | } 256 | }, 257 | {% endif %} 258 | {% endif %} 259 | {% if 'STORM' in blueprint_services %} 260 | { 261 | "ranger-storm-plugin-properties" : { 262 | "properties_attributes" : { }, 263 | "properties" : { 264 | "REPOSITORY_CONFIG_USERNAME" : "storm", 265 | "ranger-storm-plugin-enabled" : "Yes", 266 | {% if 'RANGER' in ssl_services %} 267 | "common.name.for.certificate" : "rangerStormAgent", 268 | {% else %} 269 | "common.name.for.certificate" : "", 270 | {% endif %} 271 | "policy_user" : "ambari-qa", 272 | "hadoop.rpc.protection" : "" 273 | } 274 | } 275 | }, 276 | {% if 'RANGER' in ssl_services %} 277 | { 278 | "ranger-storm-policymgr-ssl" : { 279 | "properties_attributes" : { }, 280 | "properties" : { 281 | "xasecure.policymgr.clientssl.keystore" : "{{ cert_dir }}/rangerStormAgent.jks", 282 | "xasecure.policymgr.clientssl.keystore.password" : "{{ keystore_default_pass }}", 283 | "xasecure.policymgr.clientssl.truststore" : "{{ cert_dir }}/clusterTrustStore.jks", 284 | "xasecure.policymgr.clientssl.truststore.password" : "{{ truststore_default_pass }}" 285 | } 286 | } 287 | }, 288 | {% endif %} 289 | {% endif %} 290 | {% endif %} 291 | {% if 'OOZIE' in blueprint_services %} 292 | { 293 | "oozie-site" : { 294 | "properties" : { 295 | "oozie.service.JPAService.jdbc.driver" : "com.mysql.jdbc.Driver", 296 | "oozie.service.JPAService.jdbc.url" : "jdbc:mysql://{{ groups[cluster_name+'_mysql'][0] }}/oozie", 297 | "oozie.service.JPAService.jdbc.username" : "{{ mysql_oozie_user }}", 298 | "oozie.db.schema.name" : "{{ mysql_oozie_database }}" 299 | } 300 | } 301 | }, 302 | { 303 | "oozie-env" : { 304 | "properties" : { 305 | "oozie_hostname" : "{{ groups[cluster_name+'_mn03'][0] }}", 306 | "oozie_database" : "Existing MySQL Database" 307 | } 308 | } 309 | }, 310 | {% endif %} 311 | { 312 | "ams-site": { 313 | "properties": { 314 | } 315 | } 316 | }, 317 | { 318 | "core-site": { 319 | "properties": { 320 | {% if 'NAMENODE' in ha_services %} 321 | "fs.defaultFS" : "hdfs://{{ cluster_name }}", 322 | "ha.zookeeper.quorum" : "{{ groups[cluster_name+'_mn01'][0] }}:2181,{{ groups[cluster_name+'_mn02'][0] }}:2181,{{ groups[cluster_name+'_mn03'][0] }}:2181", 323 | {% endif %} 324 | "hadoop.proxyuser.hive.groups": "*", 325 | "hadoop.proxyuser.hdfs.groups": "*", 326 | "hadoop.proxyuser.hcat.hosts": "{{ groups[cluster_name+'_mn03'][0] }}", 327 | "hadoop.proxyuser.hdfs.hosts": "*", 328 | "hadoop.proxyuser.hive.hosts": "{{ groups[cluster_name+'_mn03'][0] }}" 329 | } 330 | } 331 | }, 332 | { 333 | "hive-site": { 334 | "properties": { 335 | {% if 'RANGER' in blueprint_services %} 336 | "hive.security.authorization.enabled" : "true", 337 | "hive.server2.enable.doAs" : "false", 338 | {% endif %} 339 | "hive.enforce.bucketing": "false", 340 | "javax.jdo.option.ConnectionURL": "jdbc:mysql://{{ groups[cluster_name+'_mysql'][0] }}/hive?createDatabaseIfNotExist=true" 341 | } 342 | } 343 | }, 344 | { 345 | "hiveserver2-site": { 346 | "properties": { 347 | {% if 'RANGER' in blueprint_services %} 348 | "hive.security.authorization.enabled" : "true", 349 | "hive.conf.restricted.list" : "hive.security.authorization.enabled,hive.security.authorization.manager,hive.security.authenticator.manager", 350 | "hive.security.authenticator.manager" : "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator", 351 | "hive.security.authorization.manager" : "org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizerFactory" 352 | {% endif %} 353 | } 354 | } 355 | }, 356 | { 357 | "yarn-env": { 358 | "properties": { 359 | "min_user_id": "1000" 360 | } 361 | } 362 | }, 363 | { 364 | "tez-site": { 365 | "properties": { 366 | } 367 | } 368 | }, 369 | { 370 | "mapred-site": { 371 | "properties": { 372 | } 373 | } 374 | }, 375 | { 376 | "yarn-site": { 377 | "properties": { 378 | } 379 | } 380 | }, 381 | { 382 | "zoo.cfg": { 383 | "properties": { 384 | } 385 | } 386 | }, 387 | { 388 | "hadoop-env": { 389 | "properties": { 390 | } 391 | } 392 | }, 393 | { 394 | "ams-env": { 395 | "properties": { 396 | } 397 | } 398 | }, 399 | { 400 | "hdfs-site": { 401 | "properties": { 402 | {% if 'NAMENODE' in ha_services %} 403 | "dfs.client.failover.proxy.provider.{{ cluster_name }}" : "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider", 404 | "dfs.ha.automatic-failover.enabled" : "true", 405 | "dfs.ha.fencing.methods" : "shell(/bin/true)", 406 | "dfs.ha.namenodes.{{ cluster_name }}" : "nn1,nn2", 407 | "dfs.namenode.http-address" : "{{ groups[cluster_name+'_mn01'][0] }}:50070", 408 | "dfs.namenode.http-address.{{ cluster_name }}.nn1" : "{{ groups[cluster_name+'_mn01'][0] }}:50070", 409 | "dfs.namenode.http-address.{{ cluster_name }}.nn2" : "{{ groups[cluster_name+'_mn02'][0] }}:50070", 410 | "dfs.namenode.https-address" : "{{ groups[cluster_name+'_mn01'][0] }}:50470", 411 | "dfs.namenode.https-address.{{ cluster_name }}.nn1" : "{{ groups[cluster_name+'_mn01'][0] }}:50470", 412 | "dfs.namenode.https-address.{{ cluster_name }}.nn2" : "{{ groups[cluster_name+'_mn02'][0] }}:50470", 413 | "dfs.namenode.rpc-address.{{ cluster_name }}.nn1" : "{{ groups[cluster_name+'_mn01'][0] }}:8020", 414 | "dfs.namenode.rpc-address.{{ cluster_name }}.nn2" : "{{ groups[cluster_name+'_mn02'][0] }}:8020", 415 | "dfs.namenode.shared.edits.dir" : "qjournal://{{ groups[cluster_name+'_mn01'][0] }}:8485;{{ groups[cluster_name+'_mn02'][0] }}:8485;{{ groups[cluster_name+'_mn03'][0] }}:8485/{{ cluster_name }}", 416 | "dfs.nameservices" : "{{ cluster_name }}", 417 | {% endif %} 418 | {% if 'RANGER' in blueprint_services %} 419 | "dfs.namenode.inode.attributes.provider.class" : "org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer", 420 | {% endif %} 421 | "dfs.cluster.administrators" : "hdfs" 422 | } 423 | } 424 | }, 425 | { 426 | "ams-hbase-site": { 427 | "properties": { 428 | } 429 | } 430 | } 431 | ], 432 | "Blueprints" : { 433 | {% if 'KERBEROS' in blueprint_services %} 434 | "security" : { 435 | "type" : "KERBEROS" 436 | }, 437 | {% endif %} 438 | "stack_name" : "HDP", 439 | "stack_version" : "{{ hdp_stack }}" 440 | }, 441 | "host_groups" : [ 442 | { 443 | "name" : "mn03", 444 | "configurations" : [ ], 445 | "components" : [ 446 | {% if 'KERBEROS' in blueprint_services %} 447 | { 448 | "name" : "KERBEROS_CLIENT" 449 | }, 450 | {% endif %} 451 | {% if 'RANGER' in blueprint_services %} 452 | { 453 | "name" : "RANGER_USERSYNC" 454 | }, 455 | { 456 | "name" : "RANGER_ADMIN" 457 | }, 458 | {% endif %} 459 | { 460 | "name" : "ZOOKEEPER_SERVER" 461 | }, 462 | { 463 | "name" : "ZOOKEEPER_CLIENT" 464 | }, 465 | {% if 'OOZIE' in blueprint_services %} 466 | {% if 'FALCON' in blueprint_services %} 467 | { 468 | "name" : "FALCON_SERVER" 469 | }, 470 | { 471 | "name" : "FALCON_CLIENT" 472 | }, 473 | {% endif %} 474 | { 475 | "name" : "OOZIE_SERVER" 476 | }, 477 | { 478 | "name" : "OOZIE_CLIENT" 479 | }, 480 | {% endif %} 481 | { 482 | "name" : "PIG" 483 | }, 484 | {% if 'SPARK' in blueprint_services %} 485 | { 486 | "name" : "SPARK_CLIENT" 487 | }, 488 | {% endif %} 489 | { 490 | "name" : "HIVE_SERVER" 491 | }, 492 | { 493 | "name" : "HCAT" 494 | }, 495 | { 496 | "name" : "METRICS_MONITOR" 497 | }, 498 | { 499 | "name" : "TEZ_CLIENT" 500 | }, 501 | { 502 | "name" : "HIVE_METASTORE" 503 | }, 504 | {% if 'NAMENODE' in ha_services %} 505 | { 506 | "name" : "JOURNALNODE" 507 | }, 508 | {% endif %} 509 | { 510 | "name" : "AMBARI_SERVER" 511 | }, 512 | { 513 | "name" : "SQOOP" 514 | }, 515 | { 516 | "name" : "HIVE_CLIENT" 517 | }, 518 | { 519 | "name" : "HDFS_CLIENT" 520 | }, 521 | { 522 | "name" : "YARN_CLIENT" 523 | }, 524 | { 525 | "name" : "METRICS_COLLECTOR" 526 | }, 527 | { 528 | "name" : "MAPREDUCE2_CLIENT" 529 | }, 530 | { 531 | "name" : "WEBHCAT_SERVER" 532 | } 533 | ], 534 | "cardinality" : "1" 535 | }, 536 | { 537 | "name" : "datanode", 538 | "configurations" : [ ], 539 | "components" : [ 540 | {% if 'KERBEROS' in blueprint_services %} 541 | { 542 | "name" : "KERBEROS_CLIENT" 543 | }, 544 | {% endif %} 545 | { 546 | "name" : "NODEMANAGER" 547 | }, 548 | { 549 | "name" : "HDFS_CLIENT" 550 | }, 551 | { 552 | "name" : "METRICS_MONITOR" 553 | }, 554 | {% if 'KAFKA' in blueprint_services %} 555 | { 556 | "name" : "KAFKA_BROKER" 557 | }, 558 | {% endif %} 559 | {% if 'STORM' in blueprint_services %} 560 | { 561 | "name" : "SUPERVISOR" 562 | }, 563 | {% endif %} 564 | {% if 'HBASE' in blueprint_services %} 565 | { 566 | "name" : "HBASE_REGIONSERVER" 567 | }, 568 | {% endif %} 569 | { 570 | "name" : "DATANODE" 571 | } 572 | ] 573 | }, 574 | { 575 | "name" : "mn01", 576 | "configurations" : [ ], 577 | "components" : [ 578 | {% if 'KERBEROS' in blueprint_services %} 579 | { 580 | "name" : "KERBEROS_CLIENT" 581 | }, 582 | {% endif %} 583 | { 584 | "name" : "ZOOKEEPER_SERVER" 585 | }, 586 | { 587 | "name" : "ZOOKEEPER_CLIENT" 588 | }, 589 | {% if 'STORM' in blueprint_services %} 590 | {% if 'STORM' in ha_services %} 591 | { 592 | "name" : "NIMBUS" 593 | }, 594 | {% endif %} 595 | {% endif %} 596 | {% if 'KAFKA' in blueprint_services %} 597 | {% if 'KAFKA' in ha_services %} 598 | { 599 | "name" : "KAFKA_BROKER" 600 | }, 601 | {% endif %} 602 | {% endif %} 603 | { 604 | "name" : "PIG" 605 | }, 606 | { 607 | "name" : "NAMENODE" 608 | }, 609 | {% if 'NAMENODE' in ha_services %} 610 | { 611 | "name" : "ZKFC" 612 | }, 613 | { 614 | "name" : "JOURNALNODE" 615 | }, 616 | {% endif %} 617 | { 618 | "name" : "HCAT" 619 | }, 620 | { 621 | "name" : "METRICS_MONITOR" 622 | }, 623 | { 624 | "name" : "TEZ_CLIENT" 625 | }, 626 | { 627 | "name" : "SQOOP" 628 | }, 629 | { 630 | "name" : "HIVE_CLIENT" 631 | }, 632 | { 633 | "name" : "HDFS_CLIENT" 634 | }, 635 | { 636 | "name" : "YARN_CLIENT" 637 | }, 638 | { 639 | "name" : "MAPREDUCE2_CLIENT" 640 | }, 641 | {% if 'HBASE' in blueprint_services %} 642 | { 643 | "name" : "HBASE_MASTER" 644 | }, 645 | { 646 | "name" : "HBASE_CLIENT" 647 | }, 648 | {% endif %} 649 | { 650 | "name" : "RESOURCEMANAGER" 651 | } 652 | ], 653 | "cardinality" : "1" 654 | }, 655 | { 656 | "name" : "jobserver", 657 | "configurations" : [ ], 658 | "components" : [ 659 | {% if 'KERBEROS' in blueprint_services %} 660 | { 661 | "name" : "KERBEROS_CLIENT" 662 | }, 663 | {% endif %} 664 | { 665 | "name" : "ZOOKEEPER_CLIENT" 666 | }, 667 | { 668 | "name" : "METRICS_MONITOR" 669 | }, 670 | { 671 | "name" : "TEZ_CLIENT" 672 | }, 673 | { 674 | "name" : "SQOOP" 675 | }, 676 | { 677 | "name" : "HIVE_CLIENT" 678 | }, 679 | { 680 | "name" : "HDFS_CLIENT" 681 | }, 682 | { 683 | "name" : "YARN_CLIENT" 684 | }, 685 | { 686 | "name" : "MAPREDUCE2_CLIENT" 687 | } 688 | ] 689 | }, 690 | { 691 | "name" : "mn02", 692 | "configurations" : [ ], 693 | "components" : [ 694 | {% if 'KERBEROS' in blueprint_services %} 695 | { 696 | "name" : "KERBEROS_CLIENT" 697 | }, 698 | {% endif %} 699 | { 700 | "name" : "ZOOKEEPER_SERVER" 701 | }, 702 | { 703 | "name" : "ZOOKEEPER_CLIENT" 704 | }, 705 | {% if 'STORM' in blueprint_services %} 706 | { 707 | "name" : "NIMBUS" 708 | }, 709 | { 710 | "name" : "DRPC_SERVER" 711 | }, 712 | { 713 | "name" : "STORM_UI_SERVER" 714 | }, 715 | {% endif %} 716 | { 717 | "name" : "PIG" 718 | }, 719 | { 720 | "name" : "HISTORYSERVER" 721 | }, 722 | { 723 | "name" : "HCAT" 724 | }, 725 | { 726 | "name" : "METRICS_MONITOR" 727 | }, 728 | {% if 'NAMENODE' in ha_services %} 729 | { 730 | "name" : "ZKFC" 731 | }, 732 | { 733 | "name" : "JOURNALNODE" 734 | }, 735 | { 736 | "name" : "NAMENODE" 737 | }, 738 | {% else %} 739 | { 740 | "name" : "SECONDARY_NAMENODE" 741 | }, 742 | {% endif %} 743 | { 744 | "name" : "TEZ_CLIENT" 745 | }, 746 | { 747 | "name" : "APP_TIMELINE_SERVER" 748 | }, 749 | { 750 | "name" : "SQOOP" 751 | }, 752 | { 753 | "name" : "HIVE_CLIENT" 754 | }, 755 | { 756 | "name" : "HDFS_CLIENT" 757 | }, 758 | { 759 | "name" : "YARN_CLIENT" 760 | }, 761 | {% if 'HBASE' in blueprint_services %} 762 | { 763 | "name" : "HBASE_CLIENT" 764 | }, 765 | {% endif %} 766 | {% if 'SPARK' in blueprint_services %} 767 | { 768 | "name" : "SPARK_JOBHISTORYSERVER" 769 | }, 770 | { 771 | "name" : "SPARK_CLIENT" 772 | }, 773 | {% endif %} 774 | { 775 | "name" : "MAPREDUCE2_CLIENT" 776 | } 777 | ], 778 | "cardinality" : "1" 779 | } 780 | ] 781 | } 782 | -------------------------------------------------------------------------------- /blueprints/bare_cluster.ct.j2: -------------------------------------------------------------------------------- 1 | { 2 | "default_password" : "{{ blueprint_default_password }}", 3 | "blueprint" : "{{ cluster_name }}", 4 | {% if 'KERBEROS' in blueprint_services %} 5 | "credentials" : [ 6 | { 7 | "alias" : "kdc.admin.credential", 8 | "principal" : "{{ kdc_admin_username }}/admin", 9 | "key" : "{{ kdc_admin_password }}", 10 | "type" : "TEMPORARY" 11 | } 12 | ], 13 | "security" : { 14 | "type" : "KERBEROS" 15 | }, 16 | {% endif %} 17 | "configurations" : [ 18 | {% if 'OOZIE' in blueprint_services %} 19 | { 20 | "oozie-site" : { 21 | "oozie.service.JPAService.jdbc.password" : "{{ mysql_oozie_password }}" 22 | } 23 | }, 24 | {% endif %} 25 | { 26 | "hive-site" : { 27 | "javax.jdo.option.ConnectionPassword" : "{{ mysql_hive_password }}" 28 | } 29 | } 30 | ], 31 | "host_groups" :[ 32 | { 33 | "name" : "mn01", 34 | "hosts" : [ 35 | { 36 | "fqdn" : "{{ groups[cluster_name+'_mn01'][0] }}" 37 | } 38 | ] 39 | }, 40 | { 41 | "name" : "mn02", 42 | "hosts" : [ 43 | { 44 | "fqdn" : "{{ groups[cluster_name+'_mn02'][0] }}" 45 | } 46 | ] 47 | }, 48 | { 49 | "name" : "mn03", 50 | "hosts" : [ 51 | { 52 | "fqdn" : "{{ groups[cluster_name+'_mn03'][0] }}" 53 | } 54 | ] 55 | }, 56 | { 57 | "name" : "datanode", 58 | "hosts" : [ 59 | {% for host in groups[cluster_name+'_datanode'] %} 60 | { 61 | "fqdn" : "{{ host }}" 62 | } 63 | {% if not loop.last %} 64 | , 65 | {% endif %} 66 | {% endfor %} 67 | ] 68 | } 69 | ] 70 | } 71 | -------------------------------------------------------------------------------- /filter_plugins/hashpass.py: -------------------------------------------------------------------------------- 1 | from ansible import errors 2 | import crypt 3 | 4 | def hashpass(password): 5 | return crypt.crypt(password,'\$6\$\$') 6 | 7 | class FilterModule(object): 8 | ''' Filter plugin for hashing a password for use in user module ''' 9 | def filters(self): 10 | return { 11 | 'hashpass': hashpass 12 | } 13 | -------------------------------------------------------------------------------- /filter_plugins/ifelse.py: -------------------------------------------------------------------------------- 1 | from ansible import errors 2 | 3 | def ifelse(conditional,restrue,resfalse): 4 | if conditional: 5 | return restrue 6 | else: 7 | return resfalse 8 | 9 | class FilterModule(object): 10 | ''' Filter plugin for if else ''' 11 | def filters(self): 12 | return { 13 | 'ifelse': ifelse 14 | } 15 | -------------------------------------------------------------------------------- /filter_plugins/list2csv.py: -------------------------------------------------------------------------------- 1 | from ansible import errors 2 | 3 | def list2csv(items,sep=','): 4 | try: 5 | csv = sep.join(items) 6 | except: 7 | raise errors.AnsibleFilterError('Cannot convert list to csv: '+str(items) +' with seperator: '+sep) 8 | return csv 9 | 10 | class FilterModule(object): 11 | ''' Filter plugin for converting list to csv ''' 12 | def filters(self): 13 | return { 14 | 'list2csv': list2csv 15 | } 16 | -------------------------------------------------------------------------------- /filter_plugins/uniqekeyvalues.py: -------------------------------------------------------------------------------- 1 | from ansible import errors 2 | 3 | def uniquekeyvalues(data,key): 4 | def get_unique_key_values(data,key): 5 | values = set() 6 | if isinstance(data, dict): 7 | for dict_key,dict_value in data.iteritems(): 8 | if key == dict_key: 9 | values.add(data[key]) 10 | else: 11 | values = values | get_unique_key_values(dict_value,key) 12 | elif isinstance(data, list): 13 | for elem in data: 14 | values = values | get_unique_key_values(elem,key) 15 | return values 16 | return list(get_unique_key_values(data,key)) 17 | #raise errors.AnsibleFilterError('Cannot convert list to csv: '+str(items) +' with seperator: '+sep) 18 | 19 | class FilterModule(object): 20 | ''' Filter plugin for getting unique key values from a complex data structure ''' 21 | def filters(self): 22 | return { 23 | 'uniquekeyvalues': uniquekeyvalues 24 | } 25 | -------------------------------------------------------------------------------- /group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | #dns_enabled = yes means ansible will NOT configure the hosts file to include all nodes and addresses on each node, 3 | #It assumes you already have a working DNS configuration. no is the opposite of the previous statement. 4 | dns_enabled: no 5 | # Kerberos configuration. 6 | krb_realm: HADOOP.TEST 7 | krb_domain: vagrant 8 | #Oracle JDK versions 9 | #Check roles/oracle-java/tasks/set_vars.yml for allowed values 10 | java_version: 7 11 | java_subversion: 75 12 | #Ambari server version must match a repo version in repo_files 13 | ambari_version: 2.4.0.1 14 | mysql_connector_version: 5.1.39 15 | mysql_port: 3306 16 | #HDP version must match a HDP/OS version in groupvars/ambariserver. You can give custom stack versions by defining them in groupvars/ambariserver. 17 | hdp_version: 2.3.4.0 18 | #KERBEROS, SPARK, HBASE, STORM, KAFKA, OOZIE and FALCON are optional. Omit them from this list to prevent them being installed 19 | #blueprint_services: "KERBEROS,RANGER,SPARK,HBASE,STORM,KAFKA,HDFS,OOZIE,HIVE,FALCON,MAPREDUCE2,PIG,SQOOP,TEZ,YARN,ZOOKEEPER" 20 | blueprint_services: "RANGER,KAFKA,STORM,KERBEROS,HDFS,HIVE,MAPREDUCE2,PIG,SQOOP,TEZ,YARN,ZOOKEEPER" 21 | #STORM and KAFKA can be optionally HA. Omit them from this list to prevent these services being HA 22 | #ha_services: "NAMENODE,STORM,KAFKA" 23 | ha_services: "KAFKA" 24 | #ssl_services: "RANGER,STORMUI" 25 | ssl_services: "" 26 | #Whether to use Ambari Stack recommendations API 27 | use_ambari_recommendations: yes 28 | #Needed for grafana password 29 | blueprint_default_password: bigdata 30 | #Directory and Kerberos services 31 | kerberos_service: mitkdc 32 | directory_service: openldap 33 | -------------------------------------------------------------------------------- /group_vars/ambariserver: -------------------------------------------------------------------------------- 1 | --- 2 | mysql_ambari_database: ambari 3 | mysql_ambari_user: ambari 4 | mysql_ambari_password: bigdata 5 | mysql_hive_database: hive 6 | mysql_hive_user: hive 7 | mysql_hive_password: hive 8 | mysql_oozie_database: oozie 9 | mysql_oozie_user: oozie 10 | mysql_oozie_password: oozie 11 | mysql_ranger_database: ranger 12 | mysql_ranger_user: ranger 13 | mysql_ranger_password: ranger 14 | mysql_rangeraudit_database: rangeraudit 15 | mysql_rangeraudit_user: rangeraudit 16 | mysql_rangeraudit_password: rangeraudit 17 | mysql_root_user: root 18 | mysql_root_password: bigdata 19 | restart_ambari_server_after_installs: true 20 | hdp_stack: "{{ hdp_version[0:3] }}" 21 | hdp_repo_config: 22 | centos6: 23 | 2.3.4.0: http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.3.4.0 24 | 2.3.4.7: http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.3.4.7 25 | 2.3.4.14-9: http://private-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.3.4.14-9 26 | 2.4.2.0: http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.4.2.0 27 | centos7: 28 | 2.3.4.0: http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.3.4.0 29 | 2.3.4.7: http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.3.4.7 30 | 2.4.2.0: http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.4.2.0 31 | hdp_stack_repo: "{{ hdp_repo_config[ansible_distribution|lower+ansible_distribution_major_version][hdp_version] }}" 32 | -------------------------------------------------------------------------------- /inventories/example_inventory: -------------------------------------------------------------------------------- 1 | # Build a heirachy of groups. This way we can run the playbook across a directory cluster inventory files 2 | # without the risk of hitting several clusters at once. 3 | [examplecluster:children] 4 | examplecluster_datanode 5 | examplecluster_directoryservices 6 | examplecluster_ambariserver 7 | examplecluster_mn01 8 | examplecluster_mn02 9 | examplecluster_mn03 10 | examplecluster_mysql 11 | 12 | [datanode:children] 13 | examplecluster_datanode 14 | 15 | [directoryservices:children] 16 | examplecluster_directoryservices 17 | 18 | [ambariserver:children] 19 | examplecluster_ambariserver 20 | 21 | [mn01:children] 22 | examplecluster_mn01 23 | 24 | [mn02:children] 25 | examplecluster_mn02 26 | 27 | [mn03:children] 28 | examplecluster_mn03 29 | 30 | [mysql:children] 31 | examplecluster_mysql 32 | 33 | # Node definitions begin here 34 | [examplecluster_datanode] 35 | exampledn01.example.com 36 | exampledn02.example.com 37 | exampledn03.example.com 38 | 39 | [examplecluster_directoryservices] 40 | examplemn01.example.com 41 | 42 | [examplecluster_ambariserver] 43 | examplemn03.example.com 44 | 45 | [examplecluster_mn01] 46 | examplemn01.example.com 47 | 48 | [examplecluster_mn02] 49 | examplemn02.example.com 50 | 51 | [examplecluster_mn03] 52 | examplemn03.example.com 53 | 54 | [examplecluster_mysql] 55 | examplemn03.example.com 56 | -------------------------------------------------------------------------------- /inventories/vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | 3 | # vi: set ft=ruby : 4 | 5 | require 'open3' 6 | require 'json' 7 | require 'yaml' 8 | 9 | #Load dynamic inventory file 10 | stdin, stdout, stderr, wait_thr = Open3.popen3('python ./inventory.py --vagrant') 11 | output = stdout.gets(nil) 12 | stdout.close 13 | err_output = stderr.gets(nil) 14 | stderr.close 15 | exit_code = wait_thr.value 16 | if exit_code != 0 17 | puts "Error running dynamic inventory:\n"+err_output 18 | exit 1 19 | end 20 | boxes = JSON.parse(output) 21 | 22 | #Check environment variables are set 23 | vagrant_os = ENV['OS_VERSION'] 24 | if vagrant_os == nil 25 | puts "Exported environment variable OS_VERSION not set. Please see boxes.json for valid values." 26 | exit 1 27 | end 28 | 29 | vagrant_hdp = ENV['HDP_VERSION'] 30 | if vagrant_hdp == nil 31 | puts "Exported environment variable HDP_VERSION not set. Please see boxes.json for valid values." 32 | exit 1 33 | end 34 | 35 | #Load box config and choose correct one 36 | box_def_file = File.read('boxes.json') 37 | box_def = JSON.parse(box_def_file) 38 | vagrant_box_os = box_def[vagrant_os] 39 | if vagrant_box_os == nil 40 | puts "No box found for given OS_VERSION. Please see boxes.json for valid values." 41 | exit 1 42 | end 43 | vagrant_box = vagrant_box_os[vagrant_hdp] 44 | if vagrant_box == nil 45 | puts "No box found for given HDP_VERSION. Please see boxes.json for valid values." 46 | exit 1 47 | end 48 | 49 | #Vagrant configuration 50 | Vagrant.configure(2) do |config| 51 | config.ssh.insert_key = false 52 | 53 | # Turn off shared folders 54 | config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true 55 | 56 | boxes.each do |host, opts| 57 | config.vm.define host do |config| 58 | config.vm.hostname = host 59 | 60 | config.vm.provider "vmware_fusion" do |v| 61 | v.vmx["memsize"] = opts["mem"] 62 | v.vmx["numvcpus"] = opts["cpu"] 63 | end 64 | 65 | config.vm.provider "virtualbox" do |v| 66 | v.customize ["modifyvm", :id, "--memory", opts["mem"]] 67 | v.customize ["modifyvm", :id, "--cpus", opts["cpu"]] 68 | v.customize ["modifyvm", :id, "--ioapic", "on"] 69 | v.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] 70 | v.customize ["modifyvm", :id, "--nicpromisc1", "allow-all"] 71 | end 72 | 73 | config.vm.network :private_network, ip: opts["ip"] 74 | config.vm.box = vagrant_box 75 | 76 | end 77 | end 78 | config.vm.provision "ansible" do |ansible| 79 | ansible.playbook = "pb_vagrant_provision.yml" 80 | ansible.inventory_path = "./inventory.py" 81 | end 82 | end 83 | -------------------------------------------------------------------------------- /inventories/vagrant/boxes.json: -------------------------------------------------------------------------------- 1 | { 2 | "centos6" : { 3 | "_comment" : "You can use the following box where specific builts haven't been creates: puppetlabs/centos-6.6-64-nocm", 4 | "HDP-2.3.4.0" : "bushnoh/centos-6-6-HDP-2.3.4.0-x64", 5 | "HDP-2.3.4.7" : "bushnoh/centos-6-6-HDP-2.3.4.7-x64", 6 | "HDP-2.3.4.14-9" : "bushnoh/centos-6-6-HDP-2.3.4.14-9-x64", 7 | "HDP-2.4.2.0" : "bushnoh/centos-6-6-HDP-2.4.2.0-x64", 8 | "HDP-2.5.0.0" : "bushnoh/centos-6-6-HDP-2.5.0.0-x64" 9 | }, 10 | "centos7" : { 11 | "_comment" : "You can use the following box where specific builts haven't been creates: puppetlabs/centos-7.0-64-nocm", 12 | "HDP-2.4.2.0" : "bushnoh/centos-7-0-HDP-2.4.2.0-x64", 13 | "HDP-2.5.0.0" : "bushnoh/centos-7-0-HDP-2.5.0.0-x64" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /inventories/vagrant/inventory.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | #Location of database json file relative to dynamic inventory file 3 | database=vagrant.json 4 | #Ignore host variables 5 | ignore_existing_host_variables=False 6 | #SSH user 7 | ansible_ssh_user=vagrant 8 | #Private key file 9 | ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key 10 | #Group hosts by cluster_name variable 11 | group_by_cluster_name=True 12 | #Prepend groups by cluster name 13 | prepend_groups_by_cluster_name=True 14 | -------------------------------------------------------------------------------- /inventories/vagrant/inventory.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | from __future__ import print_function 3 | import optparse,json,sys,os,ConfigParser,pprint 4 | 5 | #Dynamic wrapper for clusters 6 | 7 | def parse_opts(): 8 | parser = optparse.OptionParser() 9 | parser.add_option("--list", dest="list", default=False, action="store_true", 10 | help="List groups and hosts in groups") 11 | parser.add_option("--vagrant", dest="vagrant", default=False, action="store_true", 12 | help="Output vagrant file syntax") 13 | parser.add_option("--hosts-file", dest="hosts_file", default=False, action="store_true", 14 | help="Output hosts file listing of hosts") 15 | parser.add_option("--host", dest="host", help="Details for a given host", metavar='HOST') 16 | (options, args) = parser.parse_args() 17 | #if options.list and options.host: 18 | if sum([1 for opt in [options.list, options.vagrant, options.host, options.hosts_file] if opt]) > 1: 19 | print('Please only specify one commandline argument',file=sys.stderr) 20 | parser.print_help() 21 | sys.exit(1) 22 | if options.list: 23 | return('list','') 24 | elif options.host: 25 | return('host',options.host) 26 | elif options.vagrant: 27 | return('vagrant','') 28 | elif options.hosts_file: 29 | return('hosts_file','') 30 | else: 31 | print('Please specify an action',file=sys.stderr) 32 | parser.print_help() 33 | sys.exit(1) 34 | 35 | def read_config(): 36 | python_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.basename(__file__)) 37 | python_file = os.path.expanduser(os.path.expandvars(python_file)) 38 | if not python_file.endswith('.py'): 39 | print('ERROR: this script must end with .py',file=sys.stderr) 40 | sys.exit(1) 41 | config_file = python_file[:-3]+'.cfg' 42 | if not os.path.exists(config_file): 43 | print('ERROR: config file could not be found: '+config_file,file=sys.stderr) 44 | sys.exit(1) 45 | config = ConfigParser.RawConfigParser() 46 | try: 47 | config.read(config_file) 48 | except: 49 | print('ERROR: config file could not be read: '+config_file,file=sys.stderr) 50 | sys.exit(3) 51 | configs=dict() 52 | configs['database_file'] = os.path.dirname(config_file)+'/'+config.get('default','database') 53 | def get_config_boolean(config_object,key,default=False): 54 | if config.has_option('default',key): 55 | value = config.getboolean('default',key) 56 | else: 57 | value = default 58 | return value 59 | configs['ignore_existing'] = get_config_boolean(config,'ignore_existing_host_variables') 60 | configs['group_by_cluster_name'] = get_config_boolean(config,'group_by_cluster_name') 61 | configs['prepend_group_with_cluster_name'] = get_config_boolean(config,'prepend_groups_by_cluster_name') 62 | host_vars=['ansible_ssh_user','ansible_ssh_private_key_file'] 63 | configs['host_vars']=dict() 64 | for var in host_vars: 65 | if config.has_option('default',var): 66 | configs['host_vars'][var] = config.get('default',var) 67 | return configs 68 | 69 | def inventory_consistency_check(inventory): 70 | pass 71 | 72 | def load_db(config): 73 | if not os.path.exists(config['database_file']): 74 | print('ERROR: cannot read database file: '+config['database_file'],file=sys.stderr) 75 | sys.exit(1) 76 | try: 77 | dbfile = open(config['database_file'],'r') 78 | except: 79 | print('ERROR: cannot read database file: '+config['database_file'],file=sys.stderr) 80 | exit(3) 81 | database = json.load(dbfile) 82 | dbfile.close() 83 | inventory_consistency_check(database) 84 | for host in database['hosts'].keys(): 85 | for host_var, host_var_val in config['host_vars'].iteritems(): 86 | if not host_var in database['hosts'][host].keys() or config['ignore_existing']: 87 | database['hosts'][host][host_var] = host_var_val 88 | return database 89 | 90 | def _add_host_to_group(groups,group_name,host): 91 | if not group_name in groups.keys(): 92 | groups[group_name] = list() 93 | groups[group_name].append(host) 94 | elif type(groups[group_name]) is list: 95 | groups[group_name].append(host) 96 | else: 97 | groups[group_name]['hosts'].append(host) 98 | return groups 99 | 100 | def _get_hosts_in_group(group): 101 | if type(group) is list: 102 | return group 103 | else: 104 | return group['hosts'] 105 | 106 | def list_action(inventory,configs): 107 | groups = inventory['groups'] 108 | if configs['group_by_cluster_name'] or configs['prepend_group_with_cluster_name']: 109 | hosts = inventory['hosts'] 110 | for host,hostvars in hosts.iteritems(): 111 | if 'cluster_name' in hostvars.keys(): 112 | cluster_name = hostvars['cluster_name'] 113 | if configs['group_by_cluster_name']: 114 | groups = _add_host_to_group(groups,cluster_name,host) 115 | if configs['prepend_group_with_cluster_name']: 116 | for group in groups.keys(): 117 | if not group.startswith(cluster_name) and host in _get_hosts_in_group(groups[group]): 118 | groups = _add_host_to_group(groups,cluster_name+'_'+group,host) 119 | print(json.dumps(groups,sort_keys=True,indent=4)) 120 | 121 | def host_action(inventory,host): 122 | hosts = inventory['hosts'] 123 | if not host in hosts.keys(): 124 | print('Host not found in hostlist',file=sys.stderr) 125 | sys.exit(1) 126 | print(json.dumps(hosts[host],sort_keys=True,indent=4)) 127 | 128 | def vagrant_action(inventory): 129 | hosts = inventory['hosts'] 130 | print(json.dumps(hosts,sort_keys=True,indent=4)) 131 | 132 | def hosts_file_action(inventory): 133 | hosts = inventory['hosts'] 134 | for host,hostvars in hosts.iteritems(): 135 | if 'ip' in hostvars.keys(): 136 | print(hostvars['ip']+' '+host) 137 | 138 | def main(): 139 | config = read_config() 140 | inventory = load_db(config) 141 | action,arg = parse_opts() 142 | if action == 'list': 143 | list_action(inventory,config) 144 | elif action == 'host': 145 | host_action(inventory,arg) 146 | elif action == 'vagrant': 147 | vagrant_action(inventory) 148 | elif action == 'hosts_file': 149 | hosts_file_action(inventory) 150 | 151 | if __name__ == '__main__': 152 | main() 153 | 154 | -------------------------------------------------------------------------------- /inventories/vagrant/pb_vagrant_provision.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: put entries in hosts file 3 | hosts: all 4 | roles: 5 | - hosts_file 6 | -------------------------------------------------------------------------------- /inventories/vagrant/roles/hosts_file/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: remove the vagrant binding to 127.0.0.1, this causes hadoop to listen on wrong address 3 | lineinfile: > 4 | dest=/etc/hosts regexp="^127\.0\.0\.1.*" 5 | line="127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4" 6 | sudo: yes 7 | -------------------------------------------------------------------------------- /inventories/vagrant/vagrant.json: -------------------------------------------------------------------------------- 1 | { 2 | "hosts": 3 | { 4 | "mn01.vagrant": 5 | { 6 | "ip": "192.168.205.10", 7 | "mem": "2048", 8 | "cluster_name": "vagrantcluster", 9 | "cpu": "2" 10 | }, 11 | "mn02.vagrant": 12 | { 13 | "ip": "192.168.205.11", 14 | "mem": "2048", 15 | "cluster_name": "vagrantcluster", 16 | "cpu": "2" 17 | }, 18 | "mn03.vagrant": 19 | { 20 | "ip": "192.168.205.12", 21 | "mem": "5120", 22 | "cluster_name": "vagrantcluster", 23 | "cpu": "3" 24 | }, 25 | "dn01.vagrant": 26 | { 27 | "ip": "192.168.205.13", 28 | "mem": "1024", 29 | "cluster_name": "vagrantcluster", 30 | "cpu": "1" 31 | } 32 | }, 33 | "groups": 34 | { 35 | "mn01": 36 | { 37 | "hosts": [ "mn01.vagrant" ], 38 | "vars": 39 | { 40 | "mn01_var": "var" 41 | } 42 | }, 43 | "mn02": 44 | { 45 | "hosts": [ "mn02.vagrant" ] 46 | }, 47 | "mn03": [ "mn03.vagrant" ], 48 | "ambariserver": [ "mn03.vagrant" ], 49 | "directoryservices": [ "mn01.vagrant" ], 50 | "datanode": [ "dn01.vagrant" ], 51 | "mysql": 52 | { 53 | "hosts": [ "mn03.vagrant" ] 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /library/blueprints: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import urllib 3 | import urllib2,base64,json 4 | import os.path 5 | import time 6 | 7 | # Function to order blueprints for comparison 8 | def ordered(obj): 9 | if isinstance(obj, dict): 10 | return sorted((k, ordered(v)) for k, v in obj.items()) 11 | if isinstance(obj, list): 12 | return sorted(ordered(x) for x in obj) 13 | else: 14 | return obj 15 | 16 | # Set headers, including authentication headers 17 | def get_headers(username,password): 18 | base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '') 19 | headers = { 'X-Requested-By': 'ambari', 'Authorization': 'Basic '+base64string } 20 | return headers 21 | 22 | # Return list of cluster names 23 | def get_cluster_names(ambari_address, ambari_port, username, password): 24 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters' 25 | req = urllib2.Request(url, headers=get_headers(username,password)) 26 | response = urllib2.urlopen(req) 27 | results = json.loads(response.read()) 28 | cluster_names = list() 29 | for c_entry in results['items']: 30 | cluster_names.append(c_entry['Clusters']['cluster_name']) 31 | return cluster_names 32 | 33 | # Return list of blueprint names 34 | def get_blueprint_names(ambari_address, ambari_port, username, password): 35 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/blueprints' 36 | req = urllib2.Request(url, headers=get_headers(username,password)) 37 | response = urllib2.urlopen(req) 38 | results = json.loads(response.read()) 39 | blueprint_names = list() 40 | for bp_entry in results['items']: 41 | blueprint_names.append(bp_entry['Blueprints']['blueprint_name']) 42 | return blueprint_names 43 | 44 | # Get a specific blueprint 45 | def get_blueprint(blueprint_name, ambari_address, ambari_port, username, password): 46 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/blueprints/'+blueprint_name 47 | req = urllib2.Request(url, headers=get_headers(username,password)) 48 | response = urllib2.urlopen(req) 49 | results = json.loads(response.read()) 50 | return results 51 | 52 | # Get list of services supported by the stack 53 | def get_stack_services(ambari_address, ambari_port, username, password, stack): 54 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/stacks/HDP/versions/'+str(stack)+'/services' 55 | req = urllib2.Request(url, headers=get_headers(username,password)) 56 | response = urllib2.urlopen(req) 57 | results = json.loads(response.read()) 58 | services = list() 59 | for service in results['items']: 60 | services.append(service['StackServices']['service_name']) 61 | return services 62 | 63 | # Get blueprint recommendation for this service list and stack version 64 | def get_recommendation( ambari_address, ambari_port, username, password, stack, hosts, service_list): 65 | allowed_services = get_stack_services(ambari_address, ambari_port, username, password, stack) 66 | for service in service_list: 67 | if service not in allowed_services: 68 | module.fail_json(msg='Service not allowed with this stack version:' +str(service) 69 | +' allowed services are: '+str(allowed_services)) 70 | payload = { 'recommend': 'configurations', 'hosts': hosts, 'services': service_list } 71 | payload_json = json.dumps(payload) 72 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/stacks/HDP/versions/'+str(stack)+'/recommendations' 73 | req = urllib2.Request(url, data=payload_json, headers=get_headers(username,password)) 74 | try: 75 | response = urllib2.urlopen(req) 76 | except urllib2.HTTPError, error: 77 | module.fail_json(msg='Failed to get recommendation: '+str(error.read())+' with config: '+payload_json) 78 | results = json.loads(response.read()) 79 | return results 80 | 81 | # Merge these recommendations with blueprint, our blueprint takes preference 82 | def get_merged_recommendation(blueprint_s, ambari_address, ambari_port, username, password, stack, host_list, service_list): 83 | blueprint = json.loads(blueprint_s) 84 | recommended = get_recommendation(ambari_address, ambari_port, username, password, stack, host_list, service_list) 85 | for r_key, r_value in recommended['resources'][0]['recommendations']['blueprint']['configurations'].iteritems(): 86 | conf_item = None 87 | for i in xrange(len(blueprint['configurations'])): 88 | if r_key in blueprint['configurations'][i].keys(): 89 | conf_item = i 90 | if conf_item is None: 91 | blueprint['configurations'].append({ r_key: { 'properties': { } } }) 92 | conf_item = len(blueprint['configurations'])-1 93 | for p_key, p_value in r_value['properties'].iteritems(): 94 | if not p_key in blueprint['configurations'][conf_item][r_key]['properties'].keys(): 95 | blueprint['configurations'][conf_item][r_key]['properties'][p_key] = p_value 96 | return json.dumps(blueprint) 97 | 98 | def get_registered_hosts(ambari_address, ambari_port, username, password): 99 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/hosts' 100 | req = urllib2.Request(url, headers=get_headers(username,password)) 101 | response = urllib2.urlopen(req) 102 | results = json.loads(response.read()) 103 | hosts = list() 104 | for host in results['items']: 105 | hosts.append(host['Hosts']['host_name']) 106 | return hosts 107 | 108 | def get_not_registered_hosts(ambari_address, ambari_port, username, password, host_list): 109 | reg_hosts = get_registered_hosts(ambari_address, ambari_port, username, password) 110 | missing_hosts = [item for item in host_list if item not in reg_hosts] 111 | return missing_hosts 112 | 113 | def wait_for_api(ambari_address, ambari_port, username, password,timeout): 114 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/hosts' 115 | req = urllib2.Request(url, headers=get_headers(username,password)) 116 | start = time.time() 117 | while True: 118 | try: 119 | response = urllib2.urlopen(req) 120 | except: 121 | pass 122 | else: 123 | return 124 | if timeout != 0 and (time.time() - start) > timeout: 125 | module.fail_json(msg='Waiting for Ambari API to be ready has timed out after: '+str(time.time() - start)+'s') 126 | time.sleep(3) 127 | 128 | def wait_for_registered_hosts(ambari_address, ambari_port, username, password, host_list, timeout): 129 | start = time.time() 130 | while True: 131 | missing_hosts = get_not_registered_hosts(ambari_address, ambari_port, username, password, host_list) 132 | if not missing_hosts: 133 | return True 134 | elif timeout != 0 and (time.time() - start) > timeout: 135 | module.fail_json(msg='Register has timed out after: '+str(time.time() - start)+'s'+ 136 | ' for hosts: '+','.join(missing_hosts)) 137 | time.sleep(3) 138 | 139 | # Get current baseurl for ambari stack 140 | def get_base_url(ambari_address, ambari_port, username, password, url): 141 | req = urllib2.Request(url, headers=get_headers(username,password)) 142 | response = urllib2.urlopen(req) 143 | results = json.loads(response.read()) 144 | return results['Repositories']['base_url'] 145 | 146 | # Post a new ambari stack url 147 | def post_base_url(ambari_address, ambari_port, username, password,stack_version,repo_id,os_type,base_url): 148 | os_type = os_type.replace('centos','redhat') 149 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/stacks/HDP/versions/'+str(stack_version)+'/operating_systems/'+os_type+'/repositories/HDP-'+str(stack_version) 150 | current_base_url = get_base_url(ambari_address, ambari_port, username, password, url) 151 | if current_base_url == base_url: 152 | return False 153 | payload = { "Repositories": { "base_url": base_url, "verify_base_url": True } } 154 | payload_s = json.dumps(payload) 155 | req = urllib2.Request(url, data=payload_s, headers=get_headers(username,password)) 156 | req.get_method = lambda: 'PUT' 157 | try: 158 | response = urllib2.urlopen(req) 159 | except urllib2.HTTPError, error: 160 | module.fail_json(msg='Failed to post base url to '+url+' with payload:\n'+payload_s+'\n with error: '+str(error.read())) 161 | return True 162 | 163 | # Post a blurprint, return boolean if changed or not 164 | def post_blueprint(blueprint_name, blueprint_path, ambari_address, ambari_port, username, password, recommend, stack_ver=None, host_list=None, service_list=None): 165 | if not os.path.isfile(blueprint_path): 166 | module.fail_json(msg='Cannot find blueprint file: '+blueprint_path) 167 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/blueprints/'+blueprint_name 168 | # Check if blueprint already exists, and if so get a copy and delete 169 | blueprint_names = get_blueprint_names(ambari_address, ambari_port, username, password) 170 | old_blueprint = None 171 | if blueprint_name in blueprint_names: 172 | old_blueprint = get_blueprint(blueprint_name, ambari_address, ambari_port, username, password) 173 | req = urllib2.Request(url, headers=get_headers(username,password)) 174 | req.get_method = lambda: 'DELETE' 175 | response = urllib2.urlopen(req) 176 | # Read and post new blueprint 177 | blueprint_file = open(blueprint_path,'r') 178 | blueprint = blueprint_file.read() 179 | if recommend: 180 | blueprint = get_merged_recommendation(blueprint, ambari_address, ambari_port, username, password, stack_ver, host_list, service_list) 181 | req = urllib2.Request(url, data=blueprint, headers=get_headers(username,password)) 182 | try: 183 | response = urllib2.urlopen(req) 184 | except urllib2.HTTPError, error: 185 | module.fail_json(msg='Failed to post blueprint: '+str(error.read())) 186 | # Get new blueprint and check if it has changed 187 | new_blueprint = get_blueprint(blueprint_name, ambari_address, ambari_port, username, password) 188 | if ordered(old_blueprint) != ordered(new_blueprint): 189 | return True 190 | else: 191 | return False 192 | 193 | # Get status of the build 194 | def get_build_status(url, username, password): 195 | req = urllib2.Request(url, headers=get_headers(username,password)) 196 | response = urllib2.urlopen(req) 197 | results = json.loads(response.read()) 198 | return (results['Requests']['request_status'],results['Requests']['progress_percent']) 199 | 200 | # Post cluster to ambari 201 | def post_cluster(cluster_name, cluster_path, ambari_address, ambari_port, username, password, wait_for_build, build_timeout): 202 | if not os.path.isfile(cluster_path): 203 | module.fail_json(msg='Cannot find cluster file: '+cluster_path) 204 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+cluster_name 205 | # Check if cluster already exists 206 | cluster_names = get_cluster_names(ambari_address, ambari_port, username, password) 207 | if cluster_names: 208 | if cluster_name in cluster_names: 209 | return False 210 | else: 211 | module.fail_json(msg='Cluster already exists with another name: '+str(cluster_names)) 212 | # Read and post cluster 213 | cluster_file = open(cluster_path,'r') 214 | cluster = cluster_file.read() 215 | req = urllib2.Request(url, data=cluster, headers=get_headers(username,password)) 216 | try: 217 | response = urllib2.urlopen(req) 218 | except urllib2.HTTPError, error: 219 | module.fail_json(msg='Failed to build cluster: '+str(error.read())) 220 | results = json.loads(response.read()) 221 | request_url = results['href'] 222 | #Ambari 2.2.1 bug, must wait for tasks to be issued of build fails 223 | time.sleep(10) 224 | if wait_for_build: 225 | start = time.time() 226 | while True: 227 | status,percentage = get_build_status(request_url, username, password) 228 | if status.lower() == 'completed': 229 | return True 230 | elif status.lower() == 'failed': 231 | module.fail_json(msg='Build has failed after: '+str(time.time() - start)+'s'+ 232 | ' with status: '+ status + ' and percent complete: '+str(percentage)) 233 | elif build_timeout != 0 and (time.time() - start) > build_timeout: 234 | module.fail_json(msg='Build has timed out after: '+str(time.time() - start)+'s'+ 235 | ' with status: '+ status + ' and percent complete: '+str(percentage)) 236 | return request_url 237 | 238 | def main(): 239 | # Use ansible module to parse arguments 240 | global module 241 | module = AnsibleModule( 242 | argument_spec = dict( 243 | action=dict(required=True, choices=['post_base_url','wait_for_ambari_api','wait_for_registered_hosts','post_blueprint','get_blueprint_names','get_cluster_names','post_cluster','get_blueprint']), 244 | path=dict(type='str'), 245 | blueprint_name=dict(type='str'), 246 | cluster_name=dict(type='str'), 247 | stack_version=dict(type='str'), 248 | stack_services=dict(type='str'), 249 | hosts=dict(type='str'), 250 | repo_id=dict(type='str'), 251 | os_type=dict(type='str'), 252 | base_url=dict(type='str'), 253 | ignore_get_error=dict(default=False,type='bool'), 254 | stack_recommendations=dict(default=False,type='bool'), 255 | wait_for_build=dict(default=False,type='bool'), 256 | build_timeout=dict(default=1600,type='int'), 257 | ambari_address=dict(default='localhost', type='str'), 258 | ambari_port=dict(default='8080', type='str'), 259 | username=dict(default='admin', type='str'), 260 | password=dict(default='admin', type='str') 261 | ) 262 | ) 263 | if module.params['action'] == 'wait_for_ambari_api': 264 | wait_for_api(module.params['ambari_address'],module.params['ambari_port'], 265 | module.params['username'], module.params['password'], module.params['build_timeout']) 266 | module.exit_json(changed=False, comments='Ambari API up') 267 | elif module.params['action'] == 'post_base_url': 268 | for argu in ('stack_version','repo_id','os_type','base_url'): 269 | if argu not in module.params.keys(): 270 | module.fail_json(msg='You must specify '+argu+' for this action') 271 | changed = post_base_url(module.params['ambari_address'],module.params['ambari_port'], 272 | module.params['username'],module.params['password'],module.params['stack_version'], 273 | module.params['repo_id'],module.params['os_type'],module.params['base_url']) 274 | if changed: 275 | module.exit_json(changed=True, comments='Baseurl posted to Ambari') 276 | else: 277 | module.exit_json(changed=False, comments='Baseurl already in Ambari') 278 | elif module.params['action'] == 'wait_for_registered_hosts': 279 | if 'hosts' not in module.params.keys(): 280 | module.fail_json(msg='You must specify a host list for this action') 281 | host_list = module.params['hosts'].split(',') 282 | wait_for_registered_hosts(module.params['ambari_address'],module.params['ambari_port'], 283 | module.params['username'], module.params['password'], host_list, module.params['build_timeout']) 284 | module.exit_json(changed=False, comments='Hosts registered with Ambari') 285 | elif module.params['action'] == 'post_blueprint': 286 | if 'path' not in module.params.keys(): 287 | module.fail_json(msg='You must specify a blueprint file when posting a blueprint') 288 | if 'blueprint_name' not in module.params.keys(): 289 | module.fail_json(msg='You must specify a blueprint name when posting a blueprint') 290 | if module.params['stack_recommendations'] and not 'stack_version' in module.params.keys(): 291 | module.fail_json(msg='You must specify a stack_version when using stack recommendations in blueprints') 292 | if module.params['stack_recommendations'] and not 'hosts' in module.params.keys(): 293 | module.fail_json(msg='You must specify a hosts list when using stack recommendations in blueprints') 294 | if module.params['stack_recommendations'] and not 'stack_services' in module.params.keys(): 295 | module.fail_json(msg='You must specify a service list when using stack recommendations in blueprints') 296 | stack_version = module.params['stack_version'] if module.params['stack_recommendations'] else None 297 | host_list = module.params['hosts'].split(',') if module.params['stack_recommendations'] else None 298 | service_list = module.params['stack_services'].split(',') if module.params['stack_recommendations'] else None 299 | changed = post_blueprint(module.params['blueprint_name'],module.params['path'], 300 | module.params['ambari_address'],module.params['ambari_port'], 301 | module.params['username'],module.params['password'],module.params['stack_recommendations'], 302 | stack_version, host_list, service_list) 303 | if changed: 304 | module.exit_json(changed=True, comments='Blueprint posted to Ambari') 305 | else: 306 | module.exit_json(changed=False, comments='Blueprint already in Ambari') 307 | elif module.params['action'] == 'post_cluster': 308 | if 'path' not in module.params.keys(): 309 | module.fail_json(msg='You must specify a cluster file when posting a blueprint') 310 | if 'cluster_name' not in module.params.keys(): 311 | module.fail_json(msg='You must specify a cluster name when posting a blueprint') 312 | changed = post_cluster(module.params['cluster_name'],module.params['path'], 313 | module.params['ambari_address'],module.params['ambari_port'], 314 | module.params['username'],module.params['password'], 315 | module.params['wait_for_build'],module.params['build_timeout']) 316 | if changed and module.params['wait_for_build']: 317 | module.exit_json(changed=True, comments='Cluster created in Ambari') 318 | elif changed and not module.params['wait_for_build']: 319 | module.exit_json(changed=True, comments='Cluster build started, follow: '+str(changed)) 320 | else: 321 | module.exit_json(changed=False, comments='Cluster already in Ambari') 322 | elif module.params['action'] == 'get_blueprint_names': 323 | try: 324 | names = get_blueprint_names(module.params['ambari_address'],module.params['ambari_port'], 325 | module.params['username'],module.params['password']) 326 | except urllib2.URLError, error: 327 | if module.params['ignore_get_error']: 328 | module.exit_json(changed=False, comments='Unable to get blueprint names: '+str(error)) 329 | else: 330 | raise 331 | module.exit_json(changed=False, comments='Blueprints are: '+','.join(names), ansible_facts={'blueprint_names': names }) 332 | elif module.params['action'] == 'get_cluster_names': 333 | try: 334 | names = get_cluster_names(module.params['ambari_address'],module.params['ambari_port'], 335 | module.params['username'],module.params['password']) 336 | except urllib2.URLError, error: 337 | if module.params['ignore_get_error']: 338 | module.exit_json(changed=False, comments='Unable to get cluster names: '+str(error)) 339 | else: 340 | raise 341 | module.exit_json(changed=False, comments='Clusters are: '+','.join(names), ansible_facts={'cluster_names': names }) 342 | elif module.params['action'] == 'get_blueprint': 343 | if not 'blueprint_name' in module.params.keys(): 344 | module.fail_json(msg='You must specify a blueprint name when getting a blueprint') 345 | blueprint = get_blueprint(module.params['blueprint_name'],module.params['ambari_address'],module.params['ambari_port'], 346 | module.params['username'],module.params['password']) 347 | module.exit_json(changed=False, comments='', ansible_facts={'blueprint': blueprint }) 348 | else: 349 | module.fail_json(msg='action: '+ module.params['action'] + ' is not defined.') 350 | 351 | # import module snippets 352 | from ansible.module_utils.basic import * 353 | main() 354 | -------------------------------------------------------------------------------- /library/kerberise: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import urllib 3 | import urllib2,base64,json 4 | import os.path 5 | 6 | # Set headers, including authentication headers 7 | def get_headers(username,password): 8 | base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '') 9 | headers = { 'X-Requested-By': 'ambari', 'Authorization': 'Basic '+base64string } 10 | return headers 11 | 12 | # Get security tyoe 13 | def get_security_type(ambari_address, ambari_port, username, password, clustername): 14 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername 15 | req = urllib2.Request(url, headers=get_headers(username,password)) 16 | response = urllib2.urlopen(req) 17 | results = json.loads(response.read()) 18 | return results['Clusters']['security_type'] 19 | 20 | def get_request_status(url, username, password): 21 | req = urllib2.Request(url, headers=get_headers(username,password)) 22 | response = urllib2.urlopen(req) 23 | results = json.loads(response.read()) 24 | return (results['Requests']['request_status'],results['Requests']['progress_percent']) 25 | 26 | def wait_for_request(url, username, password, timeout): 27 | start = time.time() 28 | while True: 29 | status,percentage = get_request_status(url, username, password) 30 | if status.lower() == 'completed': 31 | return True 32 | elif status.lower() == 'failed': 33 | module.fail_json(msg='Request has failed after: '+str(time.time() - start)+'s'+ 34 | ' with status: '+ status + ' and percent complete: '+str(percentage)) 35 | elif timeout != 0 and (time.time() - start) > timeout: 36 | module.fail_json(msg='Request has timed out after: '+str(time.time() - start)+'s'+ 37 | ' with status: '+ status + ' and percent complete: '+str(percentage)) 38 | 39 | def enable_kerberos(ambari_address, ambari_port, username, password, clustername, kdcprincipal, kdcpassword, wait_for_build, build_timeout): 40 | security_type = get_security_type(ambari_address, ambari_port, username, password, clustername) 41 | if security_type == 'KERBEROS': 42 | return False 43 | elif security_type != 'NONE': 44 | module.fail_json(msg='Unknown security type: '+security_type) 45 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername 46 | payload = '{"session_attributes" : {"kerberos_admin" : {"principal" : "'+kdcprincipal+'","password" : "'+kdcpassword+'"}}, "Clusters": {"security_type" : "KERBEROS"}}' 47 | req = urllib2.Request(url, data=payload, headers=get_headers(username,password)) 48 | req.get_method = lambda: 'PUT' 49 | try: 50 | response = urllib2.urlopen(req) 51 | except urllib2.HTTPError, error: 52 | module.fail_json(msg='Failed to post config: '+str(error.read())) 53 | results = json.loads(response.read()) 54 | request_url = results['href'] 55 | if wait_for_build: 56 | wait_for_request(request_url,username, password,build_timeout) 57 | return True 58 | 59 | # Return list of service names 60 | def get_cluster_services(ambari_address, ambari_port, username, password, clustername): 61 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername+'/services' 62 | req = urllib2.Request(url, headers=get_headers(username,password)) 63 | response = urllib2.urlopen(req) 64 | results = json.loads(response.read()) 65 | service_names = list() 66 | for c_entry in results['items']: 67 | service_names.append(c_entry['ServiceInfo']['service_name']) 68 | return service_names 69 | 70 | # Return list of service component names 71 | def get_service_components(ambari_address, ambari_port, username, password, clustername, service): 72 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername+'/services/'+service+'/components' 73 | req = urllib2.Request(url, headers=get_headers(username,password)) 74 | response = urllib2.urlopen(req) 75 | results = json.loads(response.read()) 76 | comp_names = list() 77 | for c_entry in results['items']: 78 | comp_names.append(c_entry['ServiceComponentInfo']['component_name']) 79 | return comp_names 80 | 81 | # Return status of service 82 | def get_service_status(ambari_address, ambari_port, username, password, clustername, service): 83 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername+'/services/'+service 84 | req = urllib2.Request(url, headers=get_headers(username,password)) 85 | response = urllib2.urlopen(req) 86 | results = json.loads(response.read()) 87 | return results['ServiceInfo']['state'] 88 | 89 | # Post kerberos service 90 | def post_kerberos_service(ambari_address, ambari_port, username, password, clustername): 91 | if 'KERBEROS' in get_cluster_services(ambari_address, ambari_port, username, password, clustername): 92 | return False 93 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername+'/services/KERBEROS' 94 | req = urllib2.Request(url, headers=get_headers(username,password)) 95 | req.get_method = lambda: 'POST' 96 | response = urllib2.urlopen(req) 97 | return True 98 | 99 | def put_service_in_state(ambari_address, ambari_port, username, password, clustername, service, state): 100 | if service == 'ALL': 101 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername+'/services' 102 | else: 103 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername+'/services/'+service 104 | req = urllib2.Request(url, data='{"ServiceInfo": {"state" : "'+state+'"}}', headers=get_headers(username,password)) 105 | req.get_method = lambda: 'PUT' 106 | response = urllib2.urlopen(req) 107 | results = json.loads(response.read()) 108 | request_url = results['href'] 109 | return request_url 110 | 111 | # Install kerberos service 112 | def install_kerberos_service(ambari_address, ambari_port, username, password, clustername, wait, timeout): 113 | if 'KERBEROS' in get_cluster_services(ambari_address, ambari_port, username, password, clustername): 114 | if get_service_status(ambari_address, ambari_port, username, password, clustername, 'KERBEROS') != 'INIT': 115 | return False 116 | request_url = put_service_in_state(ambari_address, ambari_port, username, password, clustername,'KERBEROS','INSTALLED') 117 | if wait: 118 | wait_for_request(request_url,username, password, timeout) 119 | return True 120 | 121 | def change_all_service_state(ambari_address, ambari_port, username, password, clustername, wait, timeout, state): 122 | service_changed = False 123 | for service in get_cluster_services(ambari_address, ambari_port, username, password, clustername): 124 | service_state = get_service_status(ambari_address, ambari_port, username, password, clustername, service) 125 | if service_state != 'STARTED' and service_state != 'INSTALLED': 126 | module.fail_json(msg='Cannot put service: '+service+' into state: '+state+' as it has unhandleable state: '+service_state) 127 | if state != service_state: 128 | service_changed = True 129 | if not service_changed: 130 | return False 131 | request_url = put_service_in_state(ambari_address, ambari_port, username, password, clustername, 'ALL', state) 132 | if wait: 133 | wait_for_request(request_url,username, password, timeout) 134 | return True 135 | 136 | # Post kerberos service 137 | def post_kerberos_client_component(ambari_address, ambari_port, username, password, clustername): 138 | if 'KERBEROS_CLIENT' in get_service_components(ambari_address, ambari_port, username, password, clustername, 'KERBEROS'): 139 | return False 140 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername+'/services/KERBEROS/components/KERBEROS_CLIENT' 141 | req = urllib2.Request(url, headers=get_headers(username,password)) 142 | req.get_method = lambda: 'POST' 143 | response = urllib2.urlopen(req) 144 | return True 145 | 146 | # Return list of service component names 147 | def get_desired_config_names(ambari_address, ambari_port, username, password, clustername): 148 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername 149 | req = urllib2.Request(url, headers=get_headers(username,password)) 150 | response = urllib2.urlopen(req) 151 | results = json.loads(response.read()) 152 | conf_names = results['Clusters']['desired_configs'].keys() 153 | return conf_names 154 | 155 | def post_service_config(ambari_address, ambari_port, username, password, clustername, config_path): 156 | existing_configs = get_desired_config_names(ambari_address, ambari_port, username, password, clustername) 157 | if 'krb5-conf' in existing_configs and 'kerberos-env' in existing_configs: 158 | return False 159 | if not os.path.isfile(config_path): 160 | module.fail_json(msg='Cannot find config file: '+config_path) 161 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername 162 | config_file = open(config_path,'r') 163 | config = config_file.read() 164 | req = urllib2.Request(url, data=config, headers=get_headers(username,password)) 165 | req.get_method = lambda: 'PUT' 166 | try: 167 | response = urllib2.urlopen(req) 168 | except urllib2.HTTPError, error: 169 | module.fail_json(msg='Failed to post config: '+str(error.read())) 170 | return True 171 | 172 | def get_host_components(ambari_address, ambari_port, username, password, clustername, host): 173 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername+'/hosts/'+host+'/host_components' 174 | req = urllib2.Request(url, headers=get_headers(username,password)) 175 | response = urllib2.urlopen(req) 176 | results = json.loads(response.read()) 177 | comp_names = list() 178 | for c_entry in results['items']: 179 | comp_names.append(c_entry['HostRoles']['component_name']) 180 | return comp_names 181 | 182 | 183 | def post_host_component(ambari_address, ambari_port, username, password, clustername, host): 184 | if 'KERBEROS_CLIENT' in get_host_components(ambari_address, ambari_port, username, password, clustername, host): 185 | return False 186 | url = 'http://'+ambari_address+':'+ambari_port+'/api/v1/clusters/'+clustername+'/hosts?Hosts/host_name='+host 187 | req = urllib2.Request(url, data='{"host_components" : [{"HostRoles" : {"component_name":"KERBEROS_CLIENT"}}]}', headers=get_headers(username,password)) 188 | response = urllib2.urlopen(req) 189 | return True 190 | 191 | def post_hosts_component(ambari_address, ambari_port, username, password, clustername, hosts): 192 | changed = False 193 | for host in hosts: 194 | host_changed = post_host_component(ambari_address, ambari_port, username, password, clustername, host) 195 | if host_changed: 196 | changed = True 197 | return changed 198 | 199 | def main(): 200 | # Use ansible module to parse arguments 201 | global module 202 | module = AnsibleModule( 203 | argument_spec = dict( 204 | action=dict(required=True, choices=['get_security_type','1_add_service','2_add_service_component','3_post_config','4_post_hosts_component', 205 | '5_install_kerberos_service','6_stop_all_services','7_enable_kerberos', '8_start_all_services']), 206 | cluster_name=dict(required=True, type='str'), 207 | config_path=dict(type='str'), 208 | hosts=dict(type='str'), 209 | kdc_admin_principal=dict(type='str'), 210 | kdc_admin_password=dict(type='str'), 211 | wait=dict(default=False, type='bool'), 212 | timeout=dict(default=300, type='int'), 213 | ambari_address=dict(default='localhost', type='str'), 214 | ambari_port=dict(default='8080', type='str'), 215 | username=dict(default='admin', type='str'), 216 | password=dict(default='admin', type='str') 217 | ) 218 | ) 219 | if module.params['action'] == 'get_security_type': 220 | security_type = get_security_type( module.params['ambari_address'],module.params['ambari_port'], 221 | module.params['username'],module.params['password'], 222 | module.params['cluster_name']) 223 | module.exit_json(changed=False, ansible_facts={'cluster_security': security_type }, comments='Cluster security type is: '+security_type) 224 | elif module.params['action'] == '1_add_service': 225 | changed = post_kerberos_service( module.params['ambari_address'],module.params['ambari_port'], 226 | module.params['username'],module.params['password'], 227 | module.params['cluster_name']) 228 | if changed: 229 | module.exit_json(changed=True, comments='Service added to cluster') 230 | else: 231 | module.exit_json(changed=False, comments='Service already in cluster') 232 | elif module.params['action'] == '2_add_service_component': 233 | changed = post_kerberos_client_component( module.params['ambari_address'],module.params['ambari_port'], 234 | module.params['username'],module.params['password'], 235 | module.params['cluster_name']) 236 | if changed: 237 | module.exit_json(changed=True, comments='Service component added to cluster') 238 | else: 239 | module.exit_json(changed=False, comments='Service component already in cluster') 240 | elif module.params['action'] == '3_post_config': 241 | if 'config_path' not in module.params.keys(): 242 | module.fail_json(msg='You must specify a config file for this action') 243 | changed = post_service_config( module.params['ambari_address'],module.params['ambari_port'], 244 | module.params['username'],module.params['password'], 245 | module.params['cluster_name'],module.params['config_path']) 246 | if changed: 247 | module.exit_json(changed=True, comments='Config added to cluster') 248 | else: 249 | module.exit_json(changed=False, comments='Config already in cluster') 250 | elif module.params['action'] == '4_post_hosts_component': 251 | if 'hosts' not in module.params.keys(): 252 | module.fail_json(msg='You must specify hosts for this action') 253 | host_list = module.params['hosts'].split(',') 254 | changed = post_hosts_component( module.params['ambari_address'],module.params['ambari_port'], 255 | module.params['username'],module.params['password'], 256 | module.params['cluster_name'],host_list) 257 | if changed: 258 | module.exit_json(changed=True, comments='Component added to hosts') 259 | else: 260 | module.exit_json(changed=False, comments='Components already on hosts') 261 | elif module.params['action'] == '5_install_kerberos_service': 262 | changed = install_kerberos_service( module.params['ambari_address'],module.params['ambari_port'], 263 | module.params['username'],module.params['password'], 264 | module.params['cluster_name'],module.params['wait'],module.params['timeout']) 265 | if changed: 266 | module.exit_json(changed=True, comments='Service installed to cluster') 267 | else: 268 | module.exit_json(changed=False, comments='Service already install into cluster') 269 | elif module.params['action'] == '6_stop_all_services': 270 | changed = change_all_service_state( module.params['ambari_address'],module.params['ambari_port'], 271 | module.params['username'],module.params['password'], 272 | module.params['cluster_name'],module.params['wait'],module.params['timeout'],'INSTALLED') 273 | if changed: 274 | module.exit_json(changed=True, comments='Services stopped on cluster') 275 | else: 276 | module.exit_json(changed=False, comments='Service already stopped on cluster') 277 | elif module.params['action'] == '7_enable_kerberos': 278 | if 'kdc_admin_principal' not in module.params.keys() or 'kdc_admin_password' not in module.params.keys(): 279 | module.fail_json(msg='You must specify a kdc_admin_principal and kdc_admin_password for this action') 280 | changed = enable_kerberos( module.params['ambari_address'],module.params['ambari_port'], 281 | module.params['username'],module.params['password'], 282 | module.params['cluster_name'],module.params['kdc_admin_principal'],module.params['kdc_admin_password'], 283 | module.params['wait'],module.params['timeout']) 284 | if changed: 285 | module.exit_json(changed=True, comments='Kerberos enabled in cluster') 286 | else: 287 | module.exit_json(changed=False, comments='Kerberos already enabled in cluster') 288 | elif module.params['action'] == '8_start_all_services': 289 | changed = change_all_service_state( module.params['ambari_address'],module.params['ambari_port'], 290 | module.params['username'],module.params['password'], 291 | module.params['cluster_name'],module.params['wait'],module.params['timeout'],'STARTED') 292 | if changed: 293 | module.exit_json(changed=True, comments='Services started on cluster') 294 | else: 295 | module.exit_json(changed=False, comments='Service already started on cluster') 296 | 297 | # import module snippets 298 | from ansible.module_utils.basic import * 299 | main() 300 | -------------------------------------------------------------------------------- /pb_ambari_blueprint_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ambari blueprint creation 3 | hosts: ambariserver:&{{ cluster_name }} 4 | roles: 5 | - ambariblueprint 6 | -------------------------------------------------------------------------------- /pb_ambari_kerberise_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: kerberise this cluster 3 | hosts: ambariserver:&{{ cluster_name }} 4 | roles: 5 | - kerberisecluster 6 | -------------------------------------------------------------------------------- /pb_ambari_setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ambari agent setup 3 | hosts: "{{ cluster_name }}" 4 | roles: 5 | - ambariagent 6 | 7 | - name: ambari server setup 8 | hosts: ambariserver:&{{ cluster_name }} 9 | roles: 10 | - ambariserver 11 | -------------------------------------------------------------------------------- /pb_directory_services.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### Shared steps to group hosts 3 | - name: place directory service node into correct groups 4 | hosts: directoryservices 5 | tasks: 6 | - name: group by the directory service 7 | group_by: key={{ directory_service }}server 8 | - name: group by the kerberos service 9 | group_by: key={{ kerberos_service }}server 10 | 11 | - name: place directory service clients into correct groups 12 | hosts: all 13 | tasks: 14 | - name: group by the directory service 15 | group_by: key={{ directory_service }}client 16 | - name: group by the kerberos service 17 | group_by: key={{ kerberos_service }}client 18 | 19 | ### MIT KRB + OPENLDAP 20 | - name: create krb5 file 21 | hosts: mitkdcclient:&{{ cluster_name }} 22 | roles: 23 | - etc_krb5 24 | 25 | - name: install haveged (for AWS vms due to low entropy) 26 | hosts: mitkdcserver:&{{ cluster_name }} 27 | roles: 28 | - haveged 29 | 30 | - name: create the kdc 31 | hosts: mitkdcserver:&{{ cluster_name }} 32 | roles: 33 | - kdcmaster 34 | 35 | - name: create kerberos users 36 | hosts: mitkdcserver:&{{ cluster_name }} 37 | roles: 38 | - create_kerberos_users 39 | 40 | - name: create openldap server 41 | hosts: openldapserver:&{{ cluster_name }} 42 | roles: 43 | - openldap_server 44 | 45 | - name: configure sssd to read from ldap 46 | hosts: openldapclient:&{{ cluster_name }} 47 | roles: 48 | - sssd 49 | -------------------------------------------------------------------------------- /pb_provision_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: pb_provision_env.yml 3 | 4 | - include: pb_directory_services.yml 5 | 6 | - include: pb_ssl_certificates.yml 7 | 8 | - include: pb_ambari_setup.yml 9 | 10 | - include: pb_ambari_blueprint_cluster.yml 11 | 12 | #- include: pb_ambari_kerberise_cluster.yml 13 | -------------------------------------------------------------------------------- /pb_provision_env.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: put entries in hosts file 3 | hosts: "{{ cluster_name }}" 4 | roles: 5 | - hosts_file 6 | 7 | - name: hadoop specific configuration 8 | hosts: "{{ cluster_name }}" 9 | roles: 10 | - hadoop_os_configuration 11 | 12 | - name: mysql server install 13 | hosts: mysql:&{{ cluster_name }} 14 | roles: 15 | - mysql_server 16 | 17 | - name: install oracle java 18 | hosts: "{{ cluster_name }}" 19 | roles: 20 | - oracle-java 21 | -------------------------------------------------------------------------------- /pb_ssl_certificates.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create and distribute certificates 3 | hosts: "{{ cluster_name }}" 4 | roles: 5 | - ssl_certs 6 | -------------------------------------------------------------------------------- /repo_files/ambari-2.1.2.centos.6.repo: -------------------------------------------------------------------------------- 1 | [Updates-ambari-2.1.2] 2 | name=ambari-2.1.2 - Updates 3 | baseurl=http://public-repo-1.hortonworks.com/ambari/centos6/2.x/updates/2.1.2 4 | gpgcheck=1 5 | gpgkey=http://public-repo-1.hortonworks.com/ambari/centos6/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins 6 | enabled=1 7 | priority=1 8 | -------------------------------------------------------------------------------- /repo_files/ambari-2.1.2.centos.7.repo: -------------------------------------------------------------------------------- 1 | [Updates-ambari-2.1.2] 2 | name=ambari-2.1.2 - Updates 3 | baseurl=http://public-repo-1.hortonworks.com/ambari/centos7/2.x/updates/2.1.2 4 | gpgcheck=1 5 | gpgkey=http://public-repo-1.hortonworks.com/ambari/centos7/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins 6 | enabled=1 7 | priority=1 8 | -------------------------------------------------------------------------------- /repo_files/ambari-2.2.0.centos.6.repo: -------------------------------------------------------------------------------- 1 | #VERSION_NUMBER=2.2.0.0-1310 2 | 3 | [Updates-ambari-2.2.0.0] 4 | name=ambari-2.2.0.0 - Updates 5 | baseurl=http://public-repo-1.hortonworks.com/ambari/centos6/2.x/updates/2.2.0.0 6 | gpgcheck=1 7 | gpgkey=http://public-repo-1.hortonworks.com/ambari/centos6/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins 8 | enabled=1 9 | priority=1 10 | -------------------------------------------------------------------------------- /repo_files/ambari-2.2.0.centos.7.repo: -------------------------------------------------------------------------------- 1 | #VERSION_NUMBER=2.2.0.0-1310 2 | 3 | [Updates-ambari-2.2.0.0] 4 | name=ambari-2.2.0.0 - Updates 5 | baseurl=http://public-repo-1.hortonworks.com/ambari/centos7/2.x/updates/2.2.0.0 6 | gpgcheck=1 7 | gpgkey=http://public-repo-1.hortonworks.com/ambari/centos7/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins 8 | enabled=1 9 | priority=1 10 | -------------------------------------------------------------------------------- /repo_files/ambari-2.2.1.centos.6.repo: -------------------------------------------------------------------------------- 1 | #VERSION_NUMBER=2.2.1.0-161 2 | 3 | [Updates-ambari-2.2.1.0] 4 | name=ambari-2.2.1.0 - Updates 5 | baseurl=http://public-repo-1.hortonworks.com/ambari/centos6/2.x/updates/2.2.1.0 6 | gpgcheck=1 7 | gpgkey=http://public-repo-1.hortonworks.com/ambari/centos6/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins 8 | enabled=1 9 | priority=1 10 | -------------------------------------------------------------------------------- /repo_files/ambari-2.2.2.2-2.centos.6.repo: -------------------------------------------------------------------------------- 1 | #VERSION_NUMBER=2.2.2.2-2 2 | [AMBARI.2.2.2.0-2.x] 3 | name=Ambari 2.x 4 | baseurl=http://s3.amazonaws.com/dev.hortonworks.com/ambari/centos6/2.x/BUILDS/2.2.2.2-2/ 5 | gpgcheck=1 6 | gpgkey=http://s3.amazonaws.com/dev.hortonworks.com/ambari/centos6/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins 7 | enabled=1 8 | priority=1 9 | -------------------------------------------------------------------------------- /repo_files/ambari-2.2.2.centos.6.repo: -------------------------------------------------------------------------------- 1 | #VERSION_NUMBER=2.2.2.0-460 2 | 3 | [Updates-ambari-2.2.2.0] 4 | name=ambari-2.2.2.0 - Updates 5 | baseurl=http://public-repo-1.hortonworks.com/ambari/centos6/2.x/updates/2.2.2.0 6 | gpgcheck=1 7 | gpgkey=http://public-repo-1.hortonworks.com/ambari/centos6/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins 8 | enabled=1 9 | priority=1 10 | -------------------------------------------------------------------------------- /repo_files/ambari-2.4.0.1.centos.6.repo: -------------------------------------------------------------------------------- 1 | #VERSION_NUMBER=2.4.0.1-1 2 | 3 | [Updates-ambari-2.4.0.1] 4 | name=ambari-2.4.0.1 - Updates 5 | baseurl=http://public-repo-1.hortonworks.com/ambari/centos6/2.x/updates/2.4.0.1 6 | gpgcheck=1 7 | gpgkey=http://public-repo-1.hortonworks.com/ambari/centos6/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins 8 | enabled=1 9 | priority=1 10 | -------------------------------------------------------------------------------- /repo_files/ambari-2.4.0.1.centos.7.repo: -------------------------------------------------------------------------------- 1 | #VERSION_NUMBER=2.4.0.1-1 2 | 3 | [Updates-ambari-2.4.0.1] 4 | name=ambari-2.4.0.1 - Updates 5 | baseurl=http://public-repo-1.hortonworks.com/ambari/centos7/2.x/updates/2.4.0.1 6 | gpgcheck=1 7 | gpgkey=http://public-repo-1.hortonworks.com/ambari/centos7/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins 8 | enabled=1 9 | priority=1 10 | -------------------------------------------------------------------------------- /roles/ambariagent/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: copy ambari repo file 3 | copy: > 4 | src=repo_files/ambari-{{ ambari_version }}.{{ ansible_distribution | lower }}.{{ ansible_distribution_major_version }}.repo 5 | dest=/etc/yum.repos.d/ambari.repo 6 | sudo: yes 7 | 8 | - name: Install yum packages for agent 9 | yum: name=ambari-agent state=present 10 | sudo: yes 11 | 12 | - name: modify ambari agent config file for hostname 13 | lineinfile: dest=/etc/ambari-agent/conf/ambari-agent.ini regexp='^hostname=' line="hostname={{ groups[cluster_name+'_ambariserver'][0] }}" backup=yes 14 | sudo: yes 15 | register: agentconfig 16 | 17 | - name: start ambari agent 18 | service: name=ambari-agent state=restarted enabled=yes 19 | sudo: yes 20 | when: agentconfig.changed 21 | -------------------------------------------------------------------------------- /roles/ambariblueprint/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | blueprint_folder: /etc/ambari-server/conf/blueprints 3 | blueprint_file: bare_cluster.bp.j2 4 | cluster_definition_file: bare_cluster.ct.j2 5 | -------------------------------------------------------------------------------- /roles/ambariblueprint/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include cert vars in case we are ssl 3 | include_vars: vars/cert_vars.yml 4 | 5 | - name: include kdc config vars for kerberise play 6 | include_vars: vars/kdc_config 7 | 8 | - name: create blueprint folder 9 | file: path={{ blueprint_folder }} state=directory 10 | sudo: yes 11 | 12 | - name: copy blueprint to ambari server 13 | template: src=blueprints/{{ blueprint_file }} dest={{ blueprint_folder }}/{{ cluster_name }}_blueprint.json backup=yes 14 | sudo: yes 15 | 16 | - name: wait for ambari api to be ready 17 | blueprints: action=wait_for_ambari_api 18 | register: apiwait 19 | 20 | - name: wait for hosts to be registered in ambari 21 | blueprints: action=wait_for_registered_hosts hosts="{{ groups[cluster_name]|join(',') }}" 22 | register: registerhosts 23 | 24 | - name: post repo url for this stack 25 | blueprints: > 26 | action=post_base_url stack_version={{ hdp_stack }} repo_id={{ hdp_version }} 27 | os_type={{ ansible_distribution|lower+ansible_distribution_major_version }} 28 | base_url={{ hdp_stack_repo }} 29 | 30 | - name: post blueprint to ambari 31 | blueprints: > 32 | action=post_blueprint path={{ blueprint_folder }}/{{ cluster_name }}_blueprint.json blueprint_name="{{ cluster_name }}" 33 | stack_recommendations="{{ use_ambari_recommendations }}" stack_version="{{ hdp_stack }}" hosts="{{ groups[cluster_name]|join(',') }}" 34 | stack_services="{{ blueprint_services }}" 35 | register: blueprint_post 36 | 37 | - name: get blueprint from ambari 38 | blueprints: action=get_blueprint blueprint_name="{{ cluster_name }}" 39 | 40 | - name: record blueprint 41 | copy: content="{{ blueprint }}" dest={{ blueprint_folder }}/{{ cluster_name }}_blueprint.sent.json backup=yes 42 | sudo: yes 43 | changed_when: no 44 | 45 | - name: get list of clusters 46 | blueprints: action=get_cluster_names 47 | register: bp_cluster_names 48 | 49 | - name: copy cluster creation template to ambari server 50 | template: src=blueprints/{{ cluster_definition_file }} dest={{ blueprint_folder }}/{{ cluster_name }}_cluster.json backup=yes 51 | sudo: yes 52 | 53 | - name: post cluster to ambari 54 | blueprints: > 55 | action=post_cluster path={{ blueprint_folder }}/{{ cluster_name }}_cluster.json cluster_name="{{ cluster_name }}" wait_for_build=yes 56 | register: cluster_post 57 | 58 | - name: set cluster names fact 59 | blueprints: action=get_cluster_names 60 | ignore_errors: yes 61 | -------------------------------------------------------------------------------- /roles/ambariserver/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | java_home: "/usr/java/default" 3 | ambari_resource_folder: "/var/lib/ambari-server/resources/" 4 | ambari_drop_ddl_file: "Ambari-DDL-MySQL-DROP.sql" 5 | ambari_create_ddl_file: "Ambari-DDL-MySQL-CREATE.sql" 6 | -------------------------------------------------------------------------------- /roles/ambariserver/files/ambari-server.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=ambari-server service 3 | After=xe-linux-distribution.service 4 | 5 | [Service] 6 | Type=forking 7 | ExecStart=/usr/sbin/ambari-server start 8 | ExecStop=/usr/sbin/ambari-server stop 9 | 10 | [Install] 11 | WantedBy=multi-user.target 12 | -------------------------------------------------------------------------------- /roles/ambariserver/tasks/ambari_ddl.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: test is ddl has been applied 3 | shell: mysql -e "show tables" "{{ mysql_ambari_database }}" | wc -l 4 | sudo: yes 5 | delegate_to: "{{ groups[cluster_name+'_mysql'][0] }}" 6 | changed_when: no 7 | register: ddl_ran 8 | 9 | - name: create directory structure for ddl 10 | file: path="{{ ambari_resource_folder }}" state=directory 11 | delegate_to: "{{ groups[cluster_name+'_mysql'][0] }}" 12 | when: "ddl_ran.stdout == '0'" 13 | sudo: yes 14 | 15 | - name: copy ambari create ddl to local tmp 16 | fetch: src={{ ambari_resource_folder }}/{{ ambari_create_ddl_file }} dest=/tmp/{{ ambari_create_ddl_file }}.tmp flat=yes fail_on_missing=yes 17 | when: "ddl_ran.stdout == '0'" 18 | 19 | - name: copy ambari ddl to mysql server 20 | copy: src=/tmp/{{ ambari_create_ddl_file }}.tmp dest={{ ambari_resource_folder }}/{{ ambari_create_ddl_file }} 21 | delegate_to: "{{ groups[cluster_name+'_mysql'][0] }}" 22 | when: "ddl_ran.stdout == '0'" 23 | run_once: yes 24 | sudo: yes 25 | 26 | - name: run ambari create ddl 27 | shell: mysql --database={{ mysql_ambari_database }} < {{ ambari_resource_folder }}/{{ ambari_create_ddl_file }} 28 | delegate_to: "{{ groups[cluster_name+'_mysql'][0] }}" 29 | when: "ddl_ran.stdout == '0'" 30 | run_once: yes 31 | sudo: yes 32 | 33 | - name: remove ddl file from local 34 | file: path=/tmp/{{ ambari_create_ddl_file }}.tmp state=absent 35 | delegate_to: 127.0.0.1 36 | when: "ddl_ran.stdout == '0'" 37 | -------------------------------------------------------------------------------- /roles/ambariserver/tasks/ambari_setup_start.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create java directory 3 | file: path=/usr/share/java/ state=directory 4 | sudo: yes 5 | 6 | - name: check if connector jar exists 7 | stat: path="/usr/share/java/mysql-connector-java-{{ mysql_connector_version }}-bin.jar" 8 | register: connector 9 | 10 | - name: download mysql connector 11 | get_url: url="http://cdn.mysql.com/Downloads/Connector-J/mysql-connector-java-{{ mysql_connector_version }}.tar.gz" dest="/tmp/mysql-connector-java-{{ mysql_connector_version }}.tar.gz" 12 | when: not connector.stat.exists 13 | 14 | - name: unpack the connector 15 | unarchive: src="/tmp/mysql-connector-java-{{ mysql_connector_version }}.tar.gz" dest=/tmp/ copy=no 16 | when: not connector.stat.exists 17 | 18 | - name: copy connector jar file 19 | command: cp "/tmp/mysql-connector-java-{{ mysql_connector_version }}/mysql-connector-java-{{ mysql_connector_version }}-bin.jar" "/usr/share/java/mysql-connector-java-{{ mysql_connector_version }}-bin.jar" 20 | when: not connector.stat.exists 21 | sudo: yes 22 | 23 | - name: create link to connector 24 | file: src="/usr/share/java/mysql-connector-java-{{ mysql_connector_version }}-bin.jar" dest=/usr/share/java/mysql-connector-java.jar state=link 25 | when: not connector.stat.exists 26 | sudo: yes 27 | 28 | - name: remove jar tars and dirs 29 | file: path="{{ item }}" state=absent 30 | with_items: 31 | - "/tmp/mysql-connector-java-{{ mysql_connector_version }}.tar.gz" 32 | - "/tmp/mysql-connector-java-{{ mysql_connector_version }}/" 33 | when: not connector.stat.exists 34 | 35 | - name: ambari server setup 36 | shell: > 37 | ambari-server setup -s -j "{{ java_home }}" --database=mysql --databasehost="{{ groups[cluster_name+'_mysql'][0] }}" 38 | --databaseport={{ mysql_port }} --databasename={{ mysql_ambari_database }} 39 | --databaseusername={{ mysql_ambari_user }} --databasepassword={{ mysql_ambari_password }} 40 | args: 41 | creates: /etc/ambari-server/conf/setup_complete.ansible 42 | sudo: yes 43 | register: ambari_server_setup 44 | 45 | - name: create marker for ambari setup 46 | file: path=/etc/ambari-server/conf/setup_complete.ansible state=touch 47 | when: ambari_server_setup.changed 48 | sudo: yes 49 | 50 | - name: ambari server setup for conenctor 51 | command: ambari-server setup --jdbc-db=mysql --jdbc-driver=/usr/share/java/mysql-connector-java.jar 52 | args: 53 | creates: /etc/ambari-server/conf/mysql_setup_complete.ansible 54 | sudo: yes 55 | register: ambari_server_mysql_setup 56 | 57 | - name: set cluster names fact 58 | blueprints: action=get_cluster_names ignore_get_error=true 59 | register: cluster_names_bp 60 | 61 | - name: create marker for ambari mysql setup 62 | file: path=/etc/ambari-server/conf/mysql_setup_complete.ansible state=touch 63 | when: ambari_server_mysql_setup.changed 64 | sudo: yes 65 | 66 | #Systemd hack 67 | - name: check if systemd file exists 68 | stat: path=/usr/lib/systemd/system/ambari-server.service 69 | when: 'ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"' 70 | register: systemdstat 71 | 72 | - name: copy over systemd file 73 | copy: src=ambari-server.service dest=/usr/lib/systemd/system/ambari-server.service owner=root group=root mode=644 74 | sudo: yes 75 | when: 'ansible_distribution == "CentOS" and ansible_distribution_major_version == "7" and not systemdstat.stat.exists' 76 | 77 | - name: Check whether ambari server has truststore conf 78 | shell: cat /etc/ambari-server/conf/ambari.properties | grep trustStore | wc -l 79 | register: amb_trust 80 | ignore_errors: True 81 | changed_when: False 82 | 83 | - name: stop ambari server for trust store 84 | service: name=ambari-server state=stopped 85 | sudo: yes 86 | when: 'amb_trust.stdout == "0"' 87 | 88 | - name: include cert vars 89 | include_vars: vars/cert_vars.yml 90 | 91 | - name: put truststore details in file 92 | lineinfile: dest=/etc/ambari-server/conf/ambari.properties regexp="^{{ item.key }}=" line="{{ item.key }}={{ item.value }}" 93 | sudo: yes 94 | when: 'amb_trust.stdout == "0"' 95 | with_items: 96 | - key: ssl.trustStore.type 97 | value: jks 98 | - key: ssl.trustStore.path 99 | value: "{{ cert_dir }}/clusterTrustStore.jks" 100 | - key: ssl.trustStore.password 101 | value: "{{ truststore_default_pass }}" 102 | 103 | - name: start ambari server 104 | service: name=ambari-server state=started enabled=yes 105 | sudo: yes 106 | 107 | - name: restart ambari server 108 | service: name=ambari-server state=restarted 109 | sudo: yes 110 | when: ambari_server_mysql_setup.changed or ambari_server_setup.changed 111 | -------------------------------------------------------------------------------- /roles/ambariserver/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include mysql grants option 3 | include: mysql_grants.yml 4 | 5 | - name: copy ambari repo file 6 | copy: > 7 | src=repo_files/ambari-{{ ambari_version }}.{{ ansible_distribution | lower }}.{{ ansible_distribution_major_version }}.repo 8 | dest=/etc/yum.repos.d/ambari.repo 9 | sudo: yes 10 | 11 | - name: install ambari-server 12 | yum: name=ambari-server state=present 13 | sudo: yes 14 | 15 | - name: include ambari ddl run 16 | include: ambari_ddl.yml 17 | 18 | - name: include ambari setup and start 19 | include: ambari_setup_start.yml 20 | -------------------------------------------------------------------------------- /roles/ambariserver/tasks/mysql_grants.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create databases for hive, oozie and ambari 3 | mysql_db: name="{{ item }}" state=present 4 | delegate_to: "{{ groups[cluster_name+'_mysql'][0] }}" 5 | sudo: yes 6 | with_items: 7 | - "{{ mysql_ambari_database }}" 8 | - "{{ mysql_hive_database }}" 9 | - "{{ mysql_oozie_database }}" 10 | - "{{ mysql_ranger_database }}" 11 | - "{{ mysql_rangeraudit_database }}" 12 | 13 | - name: grant to ambari database 14 | mysql_user: > 15 | name="{{ mysql_ambari_user }}" password="{{ mysql_ambari_password }}" 16 | priv="{{ mysql_ambari_database }}.*:ALL" state=present host="{{ item }}" 17 | append_privs=yes 18 | delegate_to: "{{ groups[cluster_name+'_mysql'][0] }}" 19 | with_items: 20 | - localhost 21 | - "{{ groups[cluster_name+'_mysql'][0] }}" 22 | - "{{ groups[cluster_name+'_ambariserver'][0] }}" 23 | sudo: yes 24 | 25 | - name: grant to hive database 26 | mysql_user: > 27 | name="{{ mysql_hive_user }}" password="{{ mysql_hive_password }}" 28 | priv="{{ mysql_hive_database }}.*:ALL" state=present host="{{ item }}" 29 | append_privs=yes 30 | delegate_to: "{{ groups[cluster_name+'_mysql'][0] }}" 31 | with_items: 32 | - localhost 33 | - "{{ groups[cluster_name+'_mysql'][0] }}" 34 | - "{{ groups[cluster_name+'_ambariserver'][0] }}" 35 | sudo: yes 36 | 37 | - name: grant to oozie database 38 | mysql_user: > 39 | name="{{ mysql_oozie_user }}" password="{{ mysql_oozie_password }}" 40 | priv="{{ mysql_oozie_database }}.*:ALL" state=present host="{{ item }}" 41 | append_privs=yes 42 | delegate_to: "{{ groups[cluster_name+'_mysql'][0] }}" 43 | with_items: 44 | - localhost 45 | - "{{ groups[cluster_name+'_mysql'][0] }}" 46 | - "{{ groups[cluster_name+'_ambariserver'][0] }}" 47 | sudo: yes 48 | 49 | - name: grant to ranger database 50 | mysql_user: > 51 | name="{{ mysql_ranger_user }}" password="{{ mysql_ranger_password }}" 52 | priv="{{ mysql_ranger_database }}.*:ALL" state=present host="{{ item }}" 53 | append_privs=yes 54 | delegate_to: "{{ groups[cluster_name+'_mysql'][0] }}" 55 | with_items: 56 | - localhost 57 | - "{{ groups[cluster_name+'_mysql'][0] }}" 58 | - "{{ groups[cluster_name+'_ambariserver'][0] }}" 59 | sudo: yes 60 | 61 | - name: grant to rangeraudit database 62 | mysql_user: > 63 | name="{{ mysql_rangeraudit_user }}" password="{{ mysql_rangeraudit_password }}" 64 | priv="{{ mysql_rangeraudit_database }}.*:ALL" state=present host="{{ item }}" 65 | append_privs=yes 66 | delegate_to: "{{ groups[cluster_name+'_mysql'][0] }}" 67 | with_items: 68 | - localhost 69 | - "{{ groups[cluster_name+'_mysql'][0] }}" 70 | - "{{ groups[cluster_name+'_ambariserver'][0] }}" 71 | sudo: yes 72 | 73 | - name: grant to select acccess to mysql db for ranger 74 | mysql_user: > 75 | name="{{ mysql_root_user }}" password="{{ mysql_root_password }}" 76 | priv="*.*:ALL,GRANT" state=present host="{{ groups[cluster_name+'_ambariserver'][0] }}" 77 | append_privs=yes 78 | delegate_to: "{{ groups[cluster_name+'_mysql'][0] }}" 79 | sudo: yes 80 | -------------------------------------------------------------------------------- /roles/create_kerberos_users/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include vars for users 3 | include_vars: vars/users.yml 4 | 5 | - name: create kerberos principals for users 6 | shell: kadmin.local -q "addprinc -pw {{ item.password }} {{ item.username }}" 7 | sudo: yes 8 | register: kadminuser 9 | with_items: users 10 | changed_when: "'created' in kadminuser.stdout" 11 | 12 | -------------------------------------------------------------------------------- /roles/etc_krb5/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install krb5 workstation 3 | yum: name=krb5-workstation 4 | sudo: yes 5 | 6 | - name: place krb5 conf file 7 | template: src=krb5.conf.j2 dest=/etc/krb5.conf owner=root group=root mode=0644 8 | sudo: yes 9 | -------------------------------------------------------------------------------- /roles/etc_krb5/templates/krb5.conf.j2: -------------------------------------------------------------------------------- 1 | [logging] 2 | default = FILE:/var/log/krb5libs.log 3 | kdc = FILE:/var/log/krb5kdc.log 4 | admin_server = FILE:/var/log/kadmind.log 5 | 6 | [libdefaults] 7 | default_realm = {{ krb_realm }} 8 | dns_lookup_realm = false 9 | dns_lookup_kdc = false 10 | ticket_lifetime = 30d 11 | renew_lifetime = 7d 12 | forwardable = true 13 | udp_preference_limit = 1 14 | 15 | [realms] 16 | {{ krb_realm }} = { 17 | kdc = {{ groups[cluster_name+'_directoryservices'][0] }} 18 | admin_server = {{ groups[cluster_name+'_directoryservices'][0] }} 19 | } 20 | 21 | [domain_realm] 22 | .{{ krb_domain }} = {{ krb_realm }} 23 | {{ krb_domain }} = {{ krb_realm }} 24 | -------------------------------------------------------------------------------- /roles/hadoop_os_configuration/files/90-nproc.conf: -------------------------------------------------------------------------------- 1 | # Default limit for number of user's processes to prevent 2 | # accidental fork bombs. 3 | # See rhbz #432903 for reasoning. 4 | 5 | * soft nproc 32768 6 | root soft nproc unlimited 7 | -------------------------------------------------------------------------------- /roles/hadoop_os_configuration/files/limits.conf: -------------------------------------------------------------------------------- 1 | * - nofile 32768 2 | * - nproc 65536 3 | -------------------------------------------------------------------------------- /roles/hadoop_os_configuration/files/security_limits.conf: -------------------------------------------------------------------------------- 1 | # /etc/security/limits.conf 2 | # 3 | #Each line describes a limit for a user in the form: 4 | # 5 | # 6 | # 7 | #Where: 8 | # can be: 9 | # - a user name 10 | # - a group name, with @group syntax 11 | # - the wildcard *, for default entry 12 | # - the wildcard %, can be also used with %group syntax, 13 | # for maxlogin limit 14 | # 15 | # can have the two values: 16 | # - "soft" for enforcing the soft limits 17 | # - "hard" for enforcing hard limits 18 | # 19 | # can be one of the following: 20 | # - core - limits the core file size (KB) 21 | # - data - max data size (KB) 22 | # - fsize - maximum filesize (KB) 23 | # - memlock - max locked-in-memory address space (KB) 24 | # - nofile - max number of open file descriptors 25 | # - rss - max resident set size (KB) 26 | # - stack - max stack size (KB) 27 | # - cpu - max CPU time (MIN) 28 | # - nproc - max number of processes 29 | # - as - address space limit (KB) 30 | # - maxlogins - max number of logins for this user 31 | # - maxsyslogins - max number of logins on the system 32 | # - priority - the priority to run user process with 33 | # - locks - max number of file locks the user can hold 34 | # - sigpending - max number of pending signals 35 | # - msgqueue - max memory used by POSIX message queues (bytes) 36 | # - nice - max nice priority allowed to raise to values: [-20, 19] 37 | # - rtprio - max realtime priority 38 | # 39 | # 40 | # 41 | 42 | #* soft core 0 43 | #* hard rss 10000 44 | #@student hard nproc 20 45 | #@faculty soft nproc 20 46 | #@faculty hard nproc 50 47 | #ftp hard nproc 0 48 | #@student - maxlogins 4 49 | * soft memlock unlimited 50 | * hard memlock unlimited 51 | * hard nofile 65536 52 | * soft nofile 32768 53 | * hard nproc 65536 54 | * soft nproc 32768 55 | # End of file 56 | -------------------------------------------------------------------------------- /roles/hadoop_os_configuration/tasks/config_files.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: copy limits files into locations 3 | copy: src={{ item.src_name }} dest={{ item.dest_name }} owner=root group=root 4 | sudo: yes 5 | with_items: 6 | - src_name: limits.conf 7 | dest_name: /etc/limits.conf 8 | - src_name: security_limits.conf 9 | dest_name: /etc/security/limits.conf 10 | - src_name: 90-nproc.conf 11 | dest_name: /etc/security/limits.d/90-nproc.conf 12 | -------------------------------------------------------------------------------- /roles/hadoop_os_configuration/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install ntp 3 | yum: name=ntp state=installed 4 | sudo: yes 5 | 6 | - name: start ntpd 7 | service: name=ntpd state=started enabled=yes 8 | sudo: yes 9 | 10 | - name: stop iptables 11 | service: name={{ item }} state=stopped enabled=no 12 | sudo: yes 13 | with_items: 14 | - iptables 15 | - ip6tables 16 | when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "6" 17 | 18 | - name: stop firewalld 19 | service: name=firewalld state=stopped enabled=no 20 | sudo: yes 21 | when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7" 22 | 23 | - name: include task for page files 24 | include: page_files.yml 25 | 26 | - name: include task for config files 27 | include: config_files.yml 28 | -------------------------------------------------------------------------------- /roles/hadoop_os_configuration/tasks/page_files.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set line facts for pagefiles 3 | set_fact: pagelines="[ 'echo never > /sys/kernel/mm/transparent_hugepage/enabled', 'echo never > /sys/kernel/mm/transparent_hugepage/defrag' ]" 4 | 5 | - name: make sure transparent page files are disabled 6 | lineinfile: dest=/etc/rc.local line="{{ item }}" 7 | sudo: yes 8 | with_items: pagelines 9 | register: page_changes 10 | 11 | - name: run transparent page lines 12 | shell: "{{ item.0 }}" 13 | when: "item.1.changed" 14 | sudo: yes 15 | with_together: 16 | - pagelines 17 | - page_changes.results 18 | -------------------------------------------------------------------------------- /roles/haveged/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Minimum amount of entropy required not to require haveged 3 | min_entropy: 1000 4 | -------------------------------------------------------------------------------- /roles/haveged/tasks/install_haveged.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check to see if haveged is installed 3 | command: rpm -q haveged 4 | register: haveged_check 5 | failed_when: haveged_check.rc > 1 6 | changed_when: no 7 | 8 | - name: install epel 9 | yum: name=epel-release state=present 10 | sudo: yes 11 | when: haveged_check.rc == 1 12 | 13 | - name: install haveged 14 | yum: name=haveged state=present 15 | sudo: yes 16 | when: haveged_check.rc == 1 17 | 18 | - name: remove epel 19 | yum: name=epel-release state=absent 20 | sudo: yes 21 | when: haveged_check.rc == 1 22 | 23 | - name: start haveged 24 | service: name=haveged state=started 25 | sudo: yes 26 | -------------------------------------------------------------------------------- /roles/haveged/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check entropy available 3 | command: cat /proc/sys/kernel/random/entropy_avail 4 | register: entropy_avail 5 | changed_when: no 6 | 7 | - name: install haveged if needed 8 | include: install_haveged.yml 9 | when: entropy_avail|int < min_entropy 10 | -------------------------------------------------------------------------------- /roles/hosts_file/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: put host file entries in file 3 | lineinfile: dest=/etc/hosts line="{{ hostvars[item]['ip'] }} {{ item }}" state=present 4 | when: "not dns_enabled and hostvars[item]['ip'] is defined" 5 | with_items: groups[cluster_name] 6 | sudo: yes 7 | -------------------------------------------------------------------------------- /roles/kdcmaster/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kerberos_ticket_max_life: "0d 12h 0m 0s" 3 | kerberos_ticket_max_renewable_life: "0d 12h 0m 0s" 4 | -------------------------------------------------------------------------------- /roles/kdcmaster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include kdc config vars 3 | include_vars: vars/kdc_config 4 | 5 | - name: check whether kdc has been configured 6 | stat: path=/var/kerberos/krb5kdc/principal 7 | register: kdc_check 8 | 9 | - name: install krb server 10 | yum: name=krb5-server,expect state=present 11 | sudo: yes 12 | when: "not kdc_check.stat.exists" 13 | 14 | - name: place kdc conf file 15 | template: src=kdc.conf.j2 dest=/var/kerberos/krb5kdc/kdc.conf owner=root group=root mode=0600 16 | sudo: yes 17 | when: "not kdc_check.stat.exists" 18 | 19 | - name: install expect (replace with ansible expect after v2) 20 | yum: name=expect state=present 21 | sudo: yes 22 | when: "not kdc_check.stat.exists" 23 | 24 | - name: place expect script 25 | template: src=kdb5_create.exp.j2 dest=/tmp/kdb5_create.exp owner=root group=root mode=0700 26 | sudo: yes 27 | when: "not kdc_check.stat.exists" 28 | 29 | - name: run kdb5 create script 30 | command: /usr/bin/expect /tmp/kdb5_create.exp 31 | sudo: yes 32 | when: "not kdc_check.stat.exists" 33 | 34 | - name: remove expect script 35 | file: path=/tmp/kdb5_create.exp state=absent 36 | sudo: yes 37 | when: "not kdc_check.stat.exists" 38 | 39 | - name: start krb5kdc 40 | service: name=krb5kdc state=started enabled=yes 41 | sudo: yes 42 | 43 | - name: start kadmin 44 | service: name=kadmin state=started enabled=yes 45 | sudo: yes 46 | 47 | - name: create admin principal 48 | shell: kadmin.local -q "addprinc -pw {{ kdc_admin_password }} {{ kdc_admin_username }}/admin" 49 | sudo: yes 50 | register: kadminlocal 51 | changed_when: "'created' in kadminlocal.stdout" 52 | 53 | - name: remove example acl 54 | lineinfile: dest=/var/kerberos/krb5kdc/kadm5.acl regexp="^\*/admin@EXAMPLE.COM.*" state=absent 55 | sudo: yes 56 | register: acl_example 57 | 58 | - name: add admin acl 59 | lineinfile: dest=/var/kerberos/krb5kdc/kadm5.acl line="{{ kdc_admin_username }}/admin@{{ krb_realm }} *" state=present 60 | sudo: yes 61 | register: acl_admin 62 | 63 | - name: restart kadmin 64 | service: name=kadmin state=restarted 65 | sudo: yes 66 | when: "acl_example.changed or acl_admin.changed" 67 | -------------------------------------------------------------------------------- /roles/kdcmaster/templates/kdb5_create.exp.j2: -------------------------------------------------------------------------------- 1 | #!/bin/expect 2 | set timeout 20 3 | spawn sh -c {kdb5_util create -s} 4 | expect "Enter KDC database master key:" 5 | send "{{ kdc_master_key }}\r" 6 | expect "Re-enter KDC database master key to verify:" 7 | send "{{ kdc_master_key }}\r" 8 | expect eof 9 | 10 | lassign [wait] pid spawnid os_error_flag value 11 | 12 | exit $value 13 | -------------------------------------------------------------------------------- /roles/kdcmaster/templates/kdc.conf.j2: -------------------------------------------------------------------------------- 1 | [kdcdefaults] 2 | kdc_ports = 88 3 | kdc_tcp_ports = 88 4 | 5 | [realms] 6 | {{ krb_realm }} = { 7 | database_name = /var/kerberos/krb5kdc/principal 8 | dict_file = /usr/share/dict/words 9 | admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab 10 | acl_file = /var/kerberos/krb5kdc/kadm5.acl 11 | key_stash_file = /var/kerberos/krb5kdc/.k5.{{ krb_realm }} 12 | max_life = {{ kerberos_ticket_max_life }} 13 | max_renewable_life = {{ kerberos_ticket_max_renewable_life }} 14 | supported_enctypes = aes256-cts:normal aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal 15 | } 16 | -------------------------------------------------------------------------------- /roles/kerberisecluster/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | config_folder: /etc/ambari-server/conf/configurations 3 | -------------------------------------------------------------------------------- /roles/kerberisecluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include kdc config vars for kerberise play 3 | include_vars: vars/kdc_config 4 | 5 | - name: wait for ambari api to be ready for kerberisation 6 | blueprints: action=wait_for_ambari_api 7 | register: apiwait 8 | 9 | - name: post service to cluster 10 | kerberise: action=get_security_type cluster_name="{{ cluster_name }}" 11 | register: test_kerb 12 | 13 | - name: post service to cluster 14 | kerberise: action=1_add_service cluster_name="{{ cluster_name }}" 15 | register: step1 16 | when: 'cluster_security == "NONE"' 17 | 18 | - name: post service component to cluster 19 | kerberise: action=2_add_service_component cluster_name="{{ cluster_name }}" 20 | register: step2 21 | when: 'cluster_security == "NONE"' 22 | 23 | - name: create config folder 24 | file: path={{ config_folder }} state=directory 25 | sudo: yes 26 | when: 'cluster_security == "NONE"' 27 | 28 | - name: copy config to ambari server 29 | template: src=desired_config.j2 dest={{ config_folder }}/{{ cluster_name }}_desired_configs.json backup=yes 30 | sudo: yes 31 | when: 'cluster_security == "NONE"' 32 | 33 | - name: post config to cluster 34 | kerberise: action=3_post_config cluster_name="{{ cluster_name }}" config_path="{{ config_folder }}/{{ cluster_name }}_desired_configs.json" 35 | register: step3 36 | when: 'cluster_security == "NONE"' 37 | 38 | - name: post hosts components 39 | kerberise: action=4_post_hosts_component cluster_name="{{ cluster_name }}" hosts="{{ groups[cluster_name]|join(',') }}" 40 | register: step4 41 | when: 'cluster_security == "NONE"' 42 | 43 | - name: install kerberos service 44 | kerberise: action=5_install_kerberos_service cluster_name="{{ cluster_name }}" wait=yes 45 | register: step5 46 | when: 'cluster_security == "NONE"' 47 | 48 | - name: stop all services 49 | kerberise: action=6_stop_all_services cluster_name="{{ cluster_name }}" wait=yes 50 | register: step6 51 | when: 'cluster_security == "NONE"' 52 | 53 | - name: enable kerberos 54 | kerberise: action=7_enable_kerberos cluster_name="{{ cluster_name }}" kdc_admin_principal="{{ kdc_admin_username }}/admin" kdc_admin_password="{{ kdc_admin_password }}" wait=yes 55 | register: step7 56 | when: 'cluster_security == "NONE"' 57 | 58 | - name: start all services 59 | kerberise: action=8_start_all_services cluster_name="{{ cluster_name }}" wait=yes timeout=500 60 | register: step8 61 | when: 'cluster_security == "NONE"' 62 | -------------------------------------------------------------------------------- /roles/kerberisecluster/templates/desired_config.j2: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "Clusters": { 4 | "desired_config": { 5 | "type": "krb5-conf", 6 | "tag": "version2", 7 | "properties": { 8 | "domains":"", 9 | "manage_krb5_conf": "false", 10 | "conf_dir":"/etc", 11 | {% raw %} 12 | "content" : "[libdefaults]\n renew_lifetime = 7d\n forwardable= true\n default_realm = {{realm|upper()}}\n ticket_lifetime = 24h\n dns_lookup_realm = false\n dns_lookup_kdc = false\n #default_tgs_enctypes = {{encryption_types}}\n #default_tkt_enctypes ={{encryption_types}}\n\n{% if domains %}\n[domain_realm]\n{% for domain in domains.split(',') %}\n {{domain}} = {{realm|upper()}}\n{% endfor %}\n{%endif %}\n\n[logging]\n default = FILE:/var/log/krb5kdc.log\nadmin_server = FILE:/var/log/kadmind.log\n kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n {{realm}} = {\n admin_server = {{admin_server_host|default(kdc_host, True)}}\n kdc = {{kdc_host}}\n }\n\n{# Append additional realm declarations below #}\n" 13 | {% endraw %} 14 | } 15 | } 16 | } 17 | }, 18 | { 19 | "Clusters": { 20 | "desired_config": { 21 | "type": "kerberos-env", 22 | "tag": "version2", 23 | "properties": { 24 | "kdc_type": "mit-kdc", 25 | "manage_identities": "true", 26 | "install_packages": "true", 27 | "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 28 | "realm" : "{{ krb_realm }}", 29 | "kdc_host" : "{{ groups[cluster_name+'_directoryservices'][0] }}", 30 | "admin_server_host" : "{{ groups[cluster_name+'_directoryservices'][0] }}", 31 | "executable_search_paths" : "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 32 | "password_length": "20", 33 | "password_min_lowercase_letters": "1", 34 | "password_min_uppercase_letters": "1", 35 | "password_min_digits": "1", 36 | "password_min_punctuation": "1", 37 | "password_min_whitespace": "0", 38 | "service_check_principal_name" : "${cluster_name}-${short_date}", 39 | "case_insensitive_username_rules" : "false" 40 | } 41 | } 42 | } 43 | } 44 | ] 45 | -------------------------------------------------------------------------------- /roles/mysql_server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mysql_community_7_url: "http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm" 3 | -------------------------------------------------------------------------------- /roles/mysql_server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: install mysql community release package 2 | yum: name="{{ mysql_community_7_url }}" state=present 3 | sudo: yes 4 | when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7" 5 | 6 | - name: install mysql 7 | yum: name=mysql-server state=present 8 | sudo: yes 9 | 10 | - name: start mysql 11 | service: name=mysqld state=started enabled=yes 12 | sudo: yes 13 | 14 | - name: check to see if mysql python is installed 15 | command: pip show mysql-python 16 | register: mysql_python_check 17 | failed_when: mysql_python_check.rc > 2 18 | changed_when: no 19 | 20 | - name: install epel release to get pip 21 | yum: name=epel-release state=present 22 | when: "mysql_python_check.rc == 1 or mysql_python_check.rc == 2" 23 | sudo: yes 24 | 25 | - name: install requirements for mysql-python 26 | yum: name=python-pip,python-devel,mysql-devel,gcc state=present 27 | when: "mysql_python_check.rc == 1 or mysql_python_check.rc == 2" 28 | sudo: yes 29 | 30 | - name: install mysql-python 31 | pip: name=mysql-python 32 | when: "mysql_python_check.rc == 1 or mysql_python_check.rc == 2" 33 | sudo: yes 34 | 35 | - name: remove epel release to get mysql server 36 | yum: name=epel-release state=absent 37 | when: "mysql_python_check.rc == 1 or mysql_python_check.rc == 2" 38 | sudo: yes 39 | -------------------------------------------------------------------------------- /roles/openldap_server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ldif_folder: /etc/openldap/ldif 3 | ldif_files: 4 | - dn.ldif 5 | - ou.ldif 6 | - users.ldif 7 | - groups.ldif 8 | olc_database_file: '/etc/openldap/slapd.d/cn=config/olcDatabase={2}bdb.ldif' 9 | olc_database_file_cent7: '/etc/openldap/slapd.d/cn=config/olcDatabase={2}hdb.ldif' 10 | -------------------------------------------------------------------------------- /roles/openldap_server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include ldap config 3 | include_vars: vars/ldap_config 4 | 5 | - name: include user config 6 | include_vars: vars/users.yml 7 | 8 | - name: install server and clients 9 | yum: name=openldap,openldap-clients,openldap-servers state=present 10 | sudo: yes 11 | register: ldap_installed 12 | 13 | - name: get slappd password hash 14 | command: /usr/sbin/slappasswd -s "{{ ldap_root_pass }}" 15 | changed_when: no 16 | register: slappd_hash 17 | 18 | - name: set database file when centos 7 19 | set_fact: olc_database_file="{{ olc_database_file_cent7 }}" 20 | when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7" 21 | 22 | #Replace this with ldif files 23 | - name: change olcdb bdb file 24 | lineinfile: > 25 | dest="{{ olc_database_file }}" 26 | regexp="{{ item.linestart }}" line="{{ item.line }}" 27 | sudo: yes 28 | with_items: 29 | - linestart: '^olcRootPW:' 30 | line: "olcRootPW: {{ slappd_hash.stdout }}" 31 | - linestart: '^olcSuffix:' 32 | line: "olcSuffix: {{ olcSuffix }}" 33 | - linestart: '^olcRootDN:' 34 | line: "olcRootDN: {{ olcRootDN }}" 35 | - linestart: '^olcAccess: \{0\}' 36 | line: 'olcAccess: {0}to attrs=userPassword by self write by dn.base="{{ olcRootDN }}" write by anonymous auth by * none' 37 | - linestart: '^olcAccess: \{1\}' 38 | line: 'olcAccess: {1}to * by dn.base="{{ olcRootDN }}" write by self write by * read' 39 | when: ldap_installed.changed 40 | 41 | - name: change olcdb monitor file 42 | replace: > 43 | dest='/etc/openldap/slapd.d/cn=config/olcDatabase={1}monitor.ldif' 44 | regexp='cn=manager,dc=my-domain,dc=com' replace="{{ olcRootDN }}" 45 | sudo: yes 46 | 47 | - name: start slapd service 48 | service: name=slapd state=started enabled=yes 49 | sudo: yes 50 | 51 | - name: make config folder 52 | file: path="{{ ldif_folder }}" state=directory owner=root group=root mode=700 53 | sudo: yes 54 | 55 | - name: load in schema ldifs 56 | shell: ldapadd -Y EXTERNAL -H ldapi:// -f "{{ item }}" 57 | sudo: yes 58 | with_items: 59 | - /etc/openldap/schema/cosine.ldif 60 | - /etc/openldap/schema/nis.ldif 61 | - /etc/openldap/schema/inetorgperson.ldif 62 | register: schemaldif 63 | ignore_errors: yes 64 | failed_when: "schemaldif.rc != 0 and not 'Duplicate attributeType' in schemaldif.stderr" 65 | changed_when: "not 'Duplicate attributeType' in schemaldif.stderr" 66 | 67 | - name: place over ldif files 68 | template: src="{{ item }}.j2" dest="{{ ldif_folder }}/{{ item }}" owner=root group=root mode=600 69 | sudo: yes 70 | with_items: ldif_files 71 | register: ldif_copy 72 | 73 | - name: add the ldifs to ldap 74 | shell: ldapadd -f "{{ ldif_folder }}/{{ item.0 }}" -D "{{ olcRootDN }}" -w "{{ ldap_root_pass }}" 75 | sudo: yes 76 | with_together: 77 | - ldif_files 78 | - ldif_copy.results 79 | when: item.1.changed 80 | register: ldap_add 81 | -------------------------------------------------------------------------------- /roles/openldap_server/templates/dn.ldif.j2: -------------------------------------------------------------------------------- 1 | dn: {{ olcSuffix }} 2 | objectClass: dcObject 3 | objectClass: organization 4 | dc: {{ ldap_dc }} 5 | o : {{ ldap_o }} 6 | -------------------------------------------------------------------------------- /roles/openldap_server/templates/groups.ldif.j2: -------------------------------------------------------------------------------- 1 | {% for grp in user_groups %} 2 | 3 | dn: cn={{ grp.cn }},ou={{ ldap_ou }},{{ olcSuffix }} 4 | cn: {{ grp.cn }} 5 | gidnumber: {{ grp.gid }} 6 | {% for user in grp.users %} 7 | memberuid: {{ user }} 8 | {% endfor %} 9 | objectclass: posixGroup 10 | objectclass: top 11 | 12 | {% endfor %} 13 | -------------------------------------------------------------------------------- /roles/openldap_server/templates/ou.ldif.j2: -------------------------------------------------------------------------------- 1 | dn: ou={{ ldap_ou }},{{ olcSuffix }} 2 | objectClass: organizationalUnit 3 | ou: {{ ldap_ou }} 4 | -------------------------------------------------------------------------------- /roles/openldap_server/templates/users.ldif.j2: -------------------------------------------------------------------------------- 1 | {% for user in users %} 2 | 3 | dn: cn={{ user.cn }},ou={{ ldap_ou }},{{ olcSuffix }} 4 | cn: {{ user.cn }} 5 | sn: {{ user.sn }} 6 | objectClass: inetOrgPerson 7 | objectClass: posixAccount 8 | uid: {{ user.username }} 9 | uidNumber: {{ user.uid }} 10 | gidNumber: {{ user.gid }} 11 | homeDirectory: /home/{{ user.username }}/ 12 | loginshell: /bin/bash 13 | 14 | {% endfor %} 15 | -------------------------------------------------------------------------------- /roles/oracle-java/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jce_security_path: /usr/java/default/jre/lib/security/ 3 | -------------------------------------------------------------------------------- /roles/oracle-java/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include set vars task 3 | include: set_vars.yml 4 | 5 | - name: check to see if jdk is installed 6 | command: rpm -q jdk 7 | register: jdk_check 8 | failed_when: jdk_check.rc > 1 9 | changed_when: no 10 | 11 | - name: download oracle jdk if not installed 12 | command: curl -L -H 'Cookie:oraclelicense=accept-securebackup-cookie' -o /tmp/{{ jdk_tarball_file }}.rpm {{ jdk_tarball_url }}.rpm 13 | when: jdk_check.rc == 1 14 | 15 | - name: install oracle jdk if not installed 16 | yum: name=/tmp/{{ jdk_tarball_file }}.rpm state=present 17 | when: jdk_check.rc == 1 18 | sudo: yes 19 | 20 | - name: remove oracle jdk file 21 | file: path=/tmp/{{ jdk_tarball_file }}.rpm state=absent 22 | when: jdk_check.rc == 1 23 | 24 | - name: create JCE path 25 | file: path="{{ jce_security_path }}" state=directory mode=0755 owner=root group=root 26 | sudo: yes 27 | 28 | - name: check to see if unlimited jce has been done 29 | stat: path="{{ jce_security_path }}/unlimited_jce_applied.ansible" 30 | register: jce_test 31 | 32 | - name: download unlimited jce 33 | command: curl -L -H 'Cookie:oraclelicense=accept-securebackup-cookie' -o "/tmp/UnlimitedJCEPolicyJDK{{ java_version }}.zip" "{{ unlimited_jce_url }}" 34 | when: "not jce_test.stat.exists" 35 | 36 | - name: install unzip 37 | yum: name=unzip state=present 38 | sudo: yes 39 | 40 | - name: untar the jce policies 41 | command: unzip -o "/tmp/UnlimitedJCEPolicyJDK{{ java_version }}.zip" 42 | args: 43 | chdir: /tmp/ 44 | when: "not jce_test.stat.exists" 45 | 46 | - name: copy policies into jce security dir 47 | command: cp "/tmp/{{ unlimited_jce_path }}/{{ item }}" "{{ jce_security_path }}/{{ item }}" 48 | when: "not jce_test.stat.exists" 49 | sudo: yes 50 | with_items: 51 | - local_policy.jar 52 | - US_export_policy.jar 53 | 54 | - name: copy policies into jce security dir 55 | file: path="{{ jce_security_path }}/{{ item }}" owner=root group=root mode=444 56 | when: "not jce_test.stat.exists" 57 | sudo: yes 58 | with_items: 59 | - local_policy.jar 60 | - US_export_policy.jar 61 | 62 | - name: remove jce files 63 | file: path="{{ item }}" state=absent 64 | with_items: 65 | - "/tmp/UnlimitedJCEPolicyJDK{{ java_version }}.zip" 66 | - "/tmp/{{ unlimited_jce_path }}" 67 | 68 | - name: place marker file when done 69 | file: path="{{ jce_security_path }}/unlimited_jce_applied.ansible" state=touch 70 | when: "not jce_test.stat.exists" 71 | sudo: yes 72 | -------------------------------------------------------------------------------- /roles/oracle-java/tasks/set_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Variables for oracle java install 3 | 4 | - name: set internal vars for 1.8.0_65 5 | set_fact: 6 | jdk_version: 1.8.0_65 7 | jdk_tarball_file: jdk-8u65-linux-x64 8 | jdk_tarball_url: http://download.oracle.com/otn-pub/java/jdk/8u65-b17/jdk-8u65-linux-x64 9 | unlimited_jce_url: http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip 10 | unlimited_jce_path: UnlimitedJCEPolicyJDK8 11 | when: java_version == 8 and java_subversion == 65 12 | 13 | - name: set internal vars for 1.8.0_60 14 | set_fact: 15 | jdk_version: 1.8.0_60 16 | jdk_tarball_file: jdk-8u60-linux-x64 17 | jdk_tarball_url: http://download.oracle.com/otn-pub/java/jdk/8u60-b27/jdk-8u60-linux-x64 18 | unlimited_jce_url: http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip 19 | unlimited_jce_path: UnlimitedJCEPolicyJDK8 20 | when: java_version == 8 and java_subversion == 60 21 | 22 | - name: set internal vars for 1.8.0_51 23 | set_fact: 24 | jdk_version: 1.8.0_51 25 | jdk_tarball_file: jdk-8u51-linux-x64 26 | jdk_tarball_url: http://download.oracle.com/otn-pub/java/jdk/8u51-b16/jdk-8u51-linux-x64 27 | unlimited_jce_url: http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip 28 | unlimited_jce_path: UnlimitedJCEPolicyJDK8 29 | when: java_version == 8 and java_subversion == 51 30 | 31 | - name: set internal vars for 1.8.0_45 32 | set_fact: 33 | jdk_version: 1.8.0_45 34 | jdk_tarball_file: jdk-8u45-linux-x64 35 | jdk_tarball_url: http://download.oracle.com/otn-pub/java/jdk/8u45-b14/jdk-8u45-linux-x64 36 | unlimited_jce_url: http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip 37 | unlimited_jce_path: UnlimitedJCEPolicyJDK8 38 | when: java_version == 8 and java_subversion == 45 39 | 40 | - name: set internal vars for 1.8.0_31 41 | set_fact: 42 | jdk_version: 1.8.0_31 43 | jdk_tarball_file: jdk-8u31-linux-x64 44 | jdk_tarball_url: http://download.oracle.com/otn-pub/java/jdk/8u31-b13/jdk-8u31-linux-x64 45 | unlimited_jce_url: http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip 46 | unlimited_jce_path: UnlimitedJCEPolicyJDK8 47 | when: java_version == 8 and java_subversion == 31 48 | 49 | 50 | 51 | 52 | - name: set internal vars for 1.7.0_80 53 | set_fact: 54 | jdk_version: 1.7.0_80 55 | jdk_tarball_file: jdk-7u80-linux-x64 56 | jdk_tarball_url: http://download.oracle.com/otn-pub/java/jdk/7u80-b15/jdk-7u80-linux-x64 57 | unlimited_jce_url: http://download.oracle.com/otn-pub/java/jce/7/UnlimitedJCEPolicyJDK7.zip 58 | unlimited_jce_path: UnlimitedJCEPolicy 59 | when: java_version == 7 and java_subversion == 80 60 | 61 | - name: set internal vars for 1.7.0_75 62 | set_fact: 63 | jdk_version: 1.7.0_75 64 | jdk_tarball_file: jdk-7u75-linux-x64 65 | jdk_tarball_url: http://download.oracle.com/otn-pub/java/jdk/7u75-b13/jdk-7u75-linux-x64 66 | unlimited_jce_url: http://download.oracle.com/otn-pub/java/jce/7/UnlimitedJCEPolicyJDK7.zip 67 | unlimited_jce_path: UnlimitedJCEPolicy 68 | when: java_version == 7 and java_subversion == 75 69 | -------------------------------------------------------------------------------- /roles/ssl_certs/tasks/gen_certs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include cert vars 3 | include_vars: vars/cert_vars.yml 4 | 5 | - name: create server keys dir on all nodes 6 | file: path="{{ cert_dir }}" state=directory 7 | sudo: yes 8 | register: create_key_dir 9 | 10 | - name: find keytool application 11 | shell: find / -name keytool 2>/dev/null | grep "1.{{ java_version }}.0_{{ java_subversion }}" | head -1 | grep keytool 12 | register: keytool_find 13 | changed_when: false 14 | 15 | - name: create ssl keystores 16 | shell: "{{ keytool_find['stdout'] }} -genkey -keyalg RSA -alias {{ item }} -keystore {{ cert_dir }}/{{ item }}.jks -storepass {{ keystore_default_pass }} -validity 360 -keysize 2048 -dname cn={{ item }},ou=Bigdata,o=Hadoop,c=UK -keypass {{ key_default_pass }}" 17 | with_flattened: 18 | - groups.all 19 | - ranger_certs 20 | sudo: yes 21 | args: 22 | creates: "{{ cert_dir }}/{{ item }}.jks" 23 | when: "inventory_hostname == groups['directoryservices'][0] or item != '127.0.0.1'" 24 | register: create_keys 25 | 26 | - name: create ssl certificates and import into truststore 27 | shell: "{{ keytool_find['stdout'] }} -export -keystore {{ cert_dir }}/{{ item }}.jks -alias {{ item }} -file {{ cert_dir }}/{{ item }}.crt -storepass {{ keystore_default_pass }}; {{ keytool_find['stdout'] }} -import -file {{ cert_dir }}/{{ item }}.crt -alias {{ item }} -keystore {{ cert_dir }}/clusterTrustStore.jks -storepass {{ truststore_default_pass }} -trustcacerts -noprompt" 28 | with_flattened: 29 | - groups.all 30 | - ranger_certs 31 | sudo: yes 32 | args: 33 | creates: "{{ cert_dir }}/{{ item }}.crt" 34 | when: "inventory_hostname == groups['directoryservices'][0] or item != '127.0.0.1'" 35 | register: create_keys 36 | 37 | - name: create temp folder on remote fs 38 | file: path="{{ temp_remote_path }}" state=directory 39 | when: "create_keys.changed and inventory_hostname == groups['directoryservices'][0]" 40 | 41 | - name: tar up certs if they have changed 42 | shell: "tar -czf {{ temp_remote_path }}/{{ tar_filename }} -C {{ cert_dir }} {*.jks,*.crt}" 43 | args: 44 | chdir: "{{ cert_dir }}" 45 | when: "create_keys.changed and inventory_hostname == groups['directoryservices'][0]" 46 | 47 | - name: create temp folder on local fs 48 | file: path="{{ temp_local_path }}" state=directory 49 | when: "create_keys.changed and inventory_hostname == groups['directoryservices'][0]" 50 | delegate_to: 127.0.0.1 51 | 52 | - name: fetch the cert bundle 53 | fetch: dest="{{ temp_local_path }}" src="{{ temp_remote_path }}/{{ tar_filename }}" flat=yes fail_on_missing=yes 54 | when: "create_keys.changed and inventory_hostname == groups['directoryservices'][0]" 55 | 56 | - name: check if we have a tar bundle to work on 57 | stat: path="{{ temp_local_path }}/{{ tar_filename }}" 58 | delegate_to: 127.0.0.1 59 | register: tar_stat 60 | 61 | - name: unpack tar to remote nodes 62 | unarchive: src="{{ temp_local_path }}/{{ tar_filename }}" dest="{{ cert_dir }}" 63 | sudo: yes 64 | when: tar_stat.stat.exists 65 | 66 | - name: remove temp folder on remote fs 67 | file: path="{{ temp_remote_path }}" state=absent 68 | when: "create_keys.changed and inventory_hostname == groups['directoryservices'][0]" 69 | 70 | - name: create temp folder on local fs 71 | file: path="{{ temp_local_path }}" state=absent 72 | when: "create_keys.changed and inventory_hostname == groups['directoryservices'][0]" 73 | delegate_to: 127.0.0.1 74 | 75 | - name: import certs into java truststore 76 | shell: "{{ keytool_find['stdout'] }} -list -v -keystore {{ java_truststore_location }} -storepass {{ java_truststore_default_pass }} | grep {{ item }} || {{ keytool_find['stdout'] }} -import -file {{ cert_dir }}/{{ item }}.crt -alias {{ item }} -keystore {{ java_truststore_location }} -storepass {{ java_truststore_default_pass }} -trustcacerts -noprompt" 77 | with_flattened: 78 | - groups.all 79 | - ranger_certs 80 | sudo: yes 81 | register: java_truststore 82 | changed_when: "'Certificate was added to keystore' in java_truststore.stderr" 83 | when: "item != '127.0.0.1'" 84 | -------------------------------------------------------------------------------- /roles/ssl_certs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include certs tasks when we have some ssl services 3 | include: gen_certs.yml 4 | when: ssl_services != "" 5 | -------------------------------------------------------------------------------- /roles/sssd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include ldap config 3 | include_vars: vars/ldap_config 4 | 5 | - name: install required packages for sssd 6 | yum: name="{{ item }}" state=present 7 | sudo: yes 8 | with_items: 9 | - sssd-client 10 | - sssd-common 11 | - sssd-common-pac 12 | - sssd-krb5 13 | - sssd-ldap 14 | - sssd-proxy 15 | - python-sssdconfig 16 | - authconfig 17 | - authconfig-gtk 18 | 19 | - name: run authconfig to configure sssd 20 | shell: > 21 | authconfig --enablesssd --enablesssdauth --enablelocauthorize --enableldap --enablekrb5 22 | --ldapserver=ldap://{{ groups[cluster_name+'_directoryservices'][0] }}:389 --disableldaptls 23 | --ldapbasedn={{ olcSuffix }} --enablemkhomedir --enablecachecreds --update 24 | --krb5kdc={{ groups[cluster_name+'_directoryservices'][0] }} --krb5adminserver={{ groups[cluster_name+'_directoryservices'][0] }} 25 | --krb5realm={{ krb_realm }} 26 | args: 27 | creates: /etc/sssd/sssd.conf 28 | sudo: yes 29 | register: authconf 30 | 31 | - name: change schema version 32 | lineinfile: dest=/etc/sssd/sssd.conf regexp='^ldap_schema' line='ldap_schema = rfc2307' 33 | sudo: yes 34 | register: schemachange 35 | 36 | - name: remove cache db file 37 | file: path=/var/lib/sss/db/cache_default.ldb state=absent 38 | sudo: yes 39 | when: authconf.changed or schemachange.changed 40 | 41 | - name: restart sssd for changes to come into effect 42 | service: name=sssd state=restarted enabled=yes 43 | when: authconf.changed or schemachange.changed 44 | sudo: yes 45 | -------------------------------------------------------------------------------- /vars/cert_vars.yml: -------------------------------------------------------------------------------- 1 | keystore_default_pass: bigdata 2 | truststore_default_pass: changeit 3 | key_default_pass: bigdata 4 | cert_dir: /etc/security/serverkeys 5 | ranger_certs: 6 | - rangerHdfsAgent 7 | - rangerHiveAgent 8 | - rangerHbaseAgent 9 | - rangerStormAgent 10 | - rangerKafkaAgent 11 | temp_local_path: /tmp/ansible_hdp_certs_local 12 | temp_remote_path: /tmp/ansible_hdp_certs_remote 13 | tar_filename: certs.tar.gz 14 | java_truststore_default_pass: changeit 15 | #Often /etc/pki/java/cacerts 16 | java_truststore_location: "/usr/java/jdk1.{{ java_version }}.0_{{ java_subversion }}/jre/lib/security/cacerts" 17 | -------------------------------------------------------------------------------- /vars/kdc_config: -------------------------------------------------------------------------------- 1 | --- 2 | kdc_master_key: kdcmaster 3 | kdc_admin_username: kdcadmin 4 | kdc_admin_password: kdcadmin 5 | -------------------------------------------------------------------------------- /vars/ldap_config: -------------------------------------------------------------------------------- 1 | --- 2 | ldap_dc: hadoop 3 | ldap_o: "{{ ldap_dc }}" 4 | ldap_root_pass: "openldap" 5 | olcSuffix: dc={{ ldap_dc }},dc=test 6 | olcRootDN: "dc=Manager,{{ olcSuffix }}" 7 | ldap_ou: Users 8 | -------------------------------------------------------------------------------- /vars/users.yml: -------------------------------------------------------------------------------- 1 | --- 2 | users: 3 | - username: test_user 4 | password: test_user 5 | cn: Test User 6 | sn: User 7 | uid: 50001 8 | gid: 100 9 | - username: test_user1 10 | password: test_user 11 | cn: Test User1 12 | sn: User1 13 | uid: 50002 14 | gid: 100 15 | 16 | user_groups: 17 | - cn: access1 18 | gid: 100001 19 | users: 20 | - test_user1 21 | --------------------------------------------------------------------------------