├── .gitignore ├── README.md ├── Vagrantfile ├── bin └── profile-builder.py ├── environments └── structor │ └── manifests │ └── site.pp ├── files ├── init.d │ ├── hbase-master │ ├── hbase-regionserver │ └── knox-gateway └── repos │ ├── centos6.ambari.repo.2.1.0 │ ├── centos6.ambari.repo.2.4.2 │ ├── centos6.hdp.repo.2.2.4 │ ├── centos6.hdp.repo.2.2.6 │ ├── centos6.hdp.repo.2.4.2 │ ├── centos6.hdp.repo.2.5.1 │ ├── centos6.hdp.repo.2.5.3 │ └── centos6.hdp.repo.2.6.1 ├── fileserver.conf ├── modules ├── ambari_agent │ ├── manifests │ │ └── init.pp │ └── templates │ │ └── ambari-agent.erb ├── ambari_server │ └── manifests │ │ └── init.pp ├── certification │ └── manifests │ │ └── init.pp ├── dev │ └── manifests │ │ └── init.pp ├── hadoop_server │ └── manifests │ │ └── init.pp ├── hbase_client │ ├── manifests │ │ └── init.pp │ └── templates │ │ ├── hbase-env.sh.erb │ │ ├── hbase-site.xml.erb │ │ ├── log4j.properties.erb │ │ ├── regionservers.erb │ │ └── zk-jaas.erb ├── hbase_master │ └── manifests │ │ └── init.pp ├── hbase_regionserver │ └── manifests │ │ └── init.pp ├── hbase_server │ └── manifests │ │ └── init.pp ├── hdfs_client │ ├── manifests │ │ └── init.pp │ └── templates │ │ ├── commons-logging.erb │ │ ├── configuration.erb │ │ ├── core-site.erb │ │ ├── hadoop-env.erb │ │ ├── hadoop-metrics2.erb │ │ ├── hadoop-policy.erb │ │ ├── hdfs-site.erb │ │ ├── log4j.erb │ │ └── ssl-client.erb ├── hdfs_datanode │ └── manifests │ │ └── init.pp ├── hdfs_namenode │ └── manifests │ │ └── init.pp ├── hdp_select │ └── manifests │ │ └── init.pp ├── hive_client │ ├── manifests │ │ └── init.pp │ └── templates │ │ ├── hive-env.erb │ │ ├── hive-log4j.erb │ │ └── hive-site.erb ├── hive_db │ ├── files │ │ ├── add-remote-root.sh │ │ ├── secure-mysql.txt │ │ └── setup-hive.txt │ └── manifests │ │ └── init.pp ├── hive_hs2 │ └── manifests │ │ └── init.pp ├── hive_meta │ ├── manifests │ │ └── init.pp │ └── templates │ │ └── hive-metastore.erb ├── install_hdfs_tarballs │ └── manifests │ │ └── init.pp ├── ip_setup │ ├── manifests │ │ └── init.pp │ └── templates │ │ └── hosts.erb ├── jdk │ ├── manifests │ │ └── init.pp │ └── templates │ │ └── java.erb ├── kafka_server │ ├── files │ │ └── kafka │ ├── manifests │ │ └── init.pp │ └── templates │ │ ├── consumer.properties.erb │ │ ├── create_test_topic.sh.erb │ │ ├── producer.properties.erb │ │ └── server.properties.erb ├── kerberos_client │ ├── manifests │ │ └── init.pp │ └── templates │ │ └── krb5.erb ├── kerberos_http │ ├── manifests │ │ └── init.pp │ └── templates │ │ ├── create-cert.erb │ │ └── ssl-server.erb ├── kerberos_kdc │ ├── manifests │ │ └── init.pp │ └── templates │ │ ├── create-kerberos-kdc.erb │ │ └── kdc.erb ├── knox_gateway │ └── manifests │ │ └── init.pp ├── load_hdfs_keytab │ └── manifests │ │ └── init.pp ├── ntp │ └── manifests │ │ └── init.pp ├── oozie_client │ ├── manifests │ │ └── init.pp │ └── templates │ │ └── oozie.erb ├── oozie_server │ ├── manifests │ │ └── init.pp │ └── templates │ │ ├── adminusers.erb │ │ ├── create-oozie-db-user.erb │ │ ├── oozie-env.erb │ │ ├── oozie-service.erb │ │ └── oozie-site.erb ├── pig_client │ ├── manifests │ │ └── init.pp │ └── templates │ │ ├── log4j.erb │ │ ├── pig-env.erb │ │ └── pig.erb ├── repos_setup │ └── manifests │ │ └── init.pp ├── selinux │ ├── manifests │ │ └── init.pp │ └── templates │ │ └── selinux.erb ├── spark_client │ └── manifests │ │ └── init.pp ├── ssl_ca │ ├── files │ │ └── ca-info.txt │ └── manifests │ │ └── init.pp ├── storm_server │ ├── manifests │ │ └── init.pp │ └── templates │ │ ├── storm-init.epp │ │ └── storm-yaml.erb ├── tez_client │ ├── manifests │ │ └── init.pp │ └── templates │ │ ├── tez-env.erb │ │ └── tez-site.erb ├── vm_users │ └── manifests │ │ └── init.pp ├── weak_random │ └── manifests │ │ └── init.pp ├── yarn_client │ ├── manifests │ │ └── init.pp │ └── templates │ │ ├── capacity-scheduler.erb │ │ ├── mapred-env.erb │ │ ├── mapred-site.erb │ │ ├── task-log4j.erb │ │ ├── yarn-env.erb │ │ └── yarn-site.erb ├── yarn_node_manager │ ├── manifests │ │ └── init.pp │ └── templates │ │ └── container-executor.erb ├── yarn_resource_manager │ └── manifests │ │ └── init.pp ├── zookeeper_client │ ├── manifests │ │ └── init.pp │ └── templates │ │ ├── log4j.erb │ │ ├── zoo.erb │ │ ├── zookeeper-client.erb │ │ └── zookeeper-env.erb └── zookeeper_server │ ├── manifests │ └── init.pp │ └── templates │ ├── configuration.erb │ ├── myid.erb │ └── zookeeper-server.erb ├── profiles ├── 1node-hbase-nonsecure.profile ├── 1node-hbase-secure.profile ├── 1node-nonsecure.profile ├── 1node-secure.profile ├── 3node-analytics-nonsecure.profile ├── 3node-hbase-nonsecure.profile ├── 3node-hbase-secure.profile ├── 3node-nonsecure.profile ├── 3node-odpi.profile ├── 3node-secure.profile ├── 3node-spark-nonsecure.profile ├── 5node-nonsecure.profile ├── ambari-nonsecure-2-node.profile ├── ambari-nonsecure-4-nodes.profile └── knox-nonsecure.profile └── puppet └── parser └── functions └── hasrole.rb /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | id_rsa.pem 3 | current.profile 4 | generated/ 5 | *~ 6 | *.swp 7 | -------------------------------------------------------------------------------- /environments/structor/manifests/site.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | include repos_setup 17 | include vm_users 18 | include ip_setup 19 | include selinux 20 | include weak_random 21 | include ntp 22 | 23 | # determine the required modules based on the roles. 24 | 25 | if $security == "true" { 26 | include kerberos_client 27 | } 28 | 29 | if $security == "true" and hasrole($roles, 'kdc') { 30 | include kerberos_kdc 31 | } 32 | 33 | if hasrole($roles, 'ambari-agent') { 34 | include ambari_agent 35 | } 36 | 37 | if hasrole($roles, 'ambari-server') { 38 | include ambari_server 39 | } 40 | 41 | if hasrole($roles, 'cert') { 42 | include certification 43 | } 44 | 45 | if hasrole($roles, 'client') { 46 | if hasrole($clients, 'hbase') { 47 | include hbase_client 48 | } 49 | if hasrole($clients, 'spark') { 50 | include spark_client 51 | } 52 | if hasrole($clients, 'hdfs') { 53 | include hdfs_client 54 | } 55 | if hasrole($clients, 'hive') { 56 | include hive_client 57 | } 58 | if hasrole($clients, 'oozie') { 59 | include oozie_client 60 | } 61 | if hasrole($clients, 'pig') { 62 | include pig_client 63 | } 64 | if hasrole($clients, 'tez') { 65 | include tez_client 66 | } 67 | if hasrole($clients, 'yarn') { 68 | include yarn_client 69 | } 70 | if hasrole($clients, 'zk') { 71 | include zookeeper_client 72 | } 73 | } 74 | 75 | if hasrole($roles, 'dev') { 76 | include dev 77 | } 78 | 79 | if hasrole($roles, 'hbase-master') { 80 | include hbase_master 81 | } 82 | 83 | if hasrole($roles, 'hbase-regionserver') { 84 | include hbase_regionserver 85 | } 86 | 87 | if hasrole($roles, 'hive-db') { 88 | include hive_db 89 | } 90 | 91 | if hasrole($roles, 'hive-meta') { 92 | include hive_meta 93 | } 94 | 95 | if hasrole($roles, 'hive-hs2') { 96 | include hive_hs2 97 | } 98 | 99 | if hasrole($roles, 'knox') { 100 | include knox_gateway 101 | } 102 | 103 | if hasrole($roles, 'nn') { 104 | include hdfs_namenode 105 | } 106 | 107 | if hasrole($roles, 'oozie') { 108 | include oozie_server 109 | } 110 | 111 | if hasrole($roles, 'slave') { 112 | include hdfs_datanode 113 | include yarn_node_manager 114 | } 115 | 116 | if hasrole($roles, 'yarn') { 117 | include yarn_resource_manager 118 | } 119 | 120 | if hasrole($roles, 'zk') { 121 | include zookeeper_server 122 | } 123 | 124 | if hasrole($roles, 'kafka') { 125 | include kafka_server 126 | } 127 | 128 | if hasrole($roles, 'storm_nimbus') { 129 | include storm_server 130 | } 131 | 132 | if hasrole($roles, 'storm_worker') { 133 | include storm_server 134 | } 135 | 136 | if islastslave($nodes, $hostname) { 137 | include install_hdfs_tarballs 138 | } 139 | 140 | # Ensure the kdc is brought up before the namenode and hive metastore 141 | if $security == "true" and hasrole($roles, 'kdc') { 142 | if hasrole($roles, 'nn') { 143 | Class['kerberos_kdc'] -> Class['hdfs_namenode'] 144 | } 145 | 146 | if hasrole($roles, 'hive-meta') { 147 | Class['kerberos_kdc'] -> Class['hive_meta'] 148 | } 149 | 150 | if hasrole($roles, 'hbase-master') { 151 | Class['kerberos_kdc'] -> Class['hbase_master'] 152 | } 153 | 154 | if hasrole($roles, 'hbase-regionserver') { 155 | Class['kerberos_kdc'] -> Class['hbase_regionserver'] 156 | } 157 | } 158 | 159 | # Ensure the namenode is brought up before the slaves, jobtracker, metastore, 160 | # and oozie 161 | if hasrole($roles, 'nn') { 162 | if hasrole($roles, 'slave') { 163 | Class['hdfs_namenode'] -> Class['hdfs_datanode'] 164 | } 165 | 166 | if hasrole($roles, 'yarn') { 167 | Class['hdfs_namenode'] -> Class['yarn_resource_manager'] 168 | } 169 | 170 | if hasrole($roles, 'hive-meta') { 171 | Class['hdfs_namenode'] -> Class['hive_meta'] 172 | } 173 | 174 | if hasrole($roles, 'oozie') { 175 | Class['hdfs_namenode'] -> Class['oozie_server'] 176 | } 177 | 178 | if hasrole($roles, 'hbase-master') { 179 | Class['hdfs_namenode'] -> Class['hbase_master'] 180 | } 181 | 182 | if hasrole($roles, 'hbase-regionserver') { 183 | Class['hdfs_namenode'] -> Class['hbase_regionserver'] 184 | } 185 | } 186 | 187 | # Ensure the db is started before oozie and hive metastore 188 | if hasrole($roles, 'hive-db') { 189 | if hasrole($roles, 'hive-meta') { 190 | Class['hive_db'] -> Class['hive_meta'] 191 | } 192 | 193 | if hasrole($roles, 'oozie') { 194 | Class['hive_db'] -> Class['oozie_server'] 195 | } 196 | } 197 | 198 | # Ensure oozie runs after the datanode on the same node 199 | if hasrole($roles, 'slave') and hasrole($roles, 'oozie') { 200 | Class['hdfs_datanode'] -> Class['oozie_server'] 201 | } 202 | 203 | if hasrole($roles, 'hbase-master') { 204 | if hasrole($roles, 'hbase-regionserver') { 205 | Class['hbase_master'] -> Class['hbase_regionserver'] 206 | } 207 | 208 | # The master needs a datanode before it can start up 209 | if hasrole($roles, 'slave') { 210 | Class['hdfs_datanode'] -> Class['hbase_master'] 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /files/init.d/hbase-master: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed to the Apache Software Foundation (ASF) under one or more 4 | # contributor license agreements. See the NOTICE file distributed with 5 | # this work for additional information regarding copyright ownership. 6 | # The ASF licenses this file to You under the Apache License, Version 2.0 7 | # (the "License"); you may not use this file except in compliance with 8 | # the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | # Starts the local HBase Master 19 | # 20 | # chkconfig: 345 85 15 21 | # description: HBase Master 22 | # 23 | ### BEGIN INIT INFO 24 | # Provides: hbase-master 25 | # Short-Description: HBase Master 26 | # Default-Start: 3 4 5 27 | # Default-Stop: 0 1 2 6 28 | # Required-Start: $syslog $remote_fs 29 | # Required-Stop: $syslog $remote_fs 30 | # Should-Start: 31 | # Should-Stop: 32 | ### END INIT INFO 33 | 34 | . /lib/lsb/init-functions 35 | 36 | RETVAL_SUCCESS=0 37 | 38 | STATUS_RUNNING=0 39 | STATUS_DEAD=1 40 | STATUS_DEAD_AND_LOCK=2 41 | STATUS_NOT_RUNNING=3 42 | STATUS_OTHER_ERROR=102 43 | 44 | 45 | ERROR_PROGRAM_NOT_INSTALLED=5 46 | ERROR_PROGRAM_NOT_CONFIGURED=6 47 | 48 | 49 | RETVAL=0 50 | SLEEP_TIME=5 51 | PROC_NAME="java" 52 | 53 | DAEMON="hbase-master" 54 | DESC="HBase Master" 55 | EXEC_PATH="/usr/hdp/current/hbase-master/bin/hbase-daemon.sh" 56 | DAEMON_FLAGS="master" 57 | CONF_DIR="/etc/hbase/conf" 58 | LOCKDIR="/var/lock/subsys" 59 | LOCKFILE="$LOCKDIR/hbase-master" 60 | WORKING_DIR="/var/run/hbase" 61 | SVC_USER=hbase 62 | 63 | # set user since the pid/log dir often depend on it 64 | USER="$SVC_USER" 65 | . $CONF_DIR/hbase-env.sh 66 | PIDFILE="$HBASE_PID_DIR/hbase-$HBASE_IDENT_STRING-master.pid" 67 | 68 | if [ -n "$HBASE_SECURE_DN_USER" ]; then 69 | TARGET_USER=root 70 | else 71 | TARGET_USER=${HBASE_REGIONSERVER_USER:-hbase} 72 | fi 73 | 74 | install -d -m 0755 -o hbase -g hbase /var/lib/hadoop-hbase 1>/dev/null 2>&1 || : 75 | [ -d "$LOCKDIR" ] || install -d -m 0755 $LOCKDIR 1>/dev/null 2>&1 || : 76 | start() { 77 | [ -x $EXEC_PATH ] || exit $ERROR_PROGRAM_NOT_INSTALLED 78 | [ -d $CONF_DIR ] || exit $ERROR_PROGRAM_NOT_CONFIGURED 79 | log_success_msg "Starting ${DESC}: " 80 | 81 | su -s /bin/bash $TARGET_USER -c "$EXEC_PATH --config '$CONF_DIR' start $DAEMON_FLAGS" 82 | 83 | # Some processes are slow to start 84 | sleep $SLEEP_TIME 85 | checkstatusofproc 86 | RETVAL=$? 87 | 88 | [ $RETVAL -eq $RETVAL_SUCCESS ] && touch $LOCKFILE 89 | return $RETVAL 90 | } 91 | 92 | stop() { 93 | log_success_msg "Stopping ${DESC}: " 94 | su -s /bin/bash $TARGET_USER \ 95 | -c "$EXEC_PATH --config $CONF_DIR stop $DAEMON_FLAGS" 96 | RETVAL=$? 97 | 98 | [ $RETVAL -eq $RETVAL_SUCCESS ] && rm -f $LOCKFILE $PIDFILE 99 | } 100 | 101 | restart() { 102 | stop 103 | start 104 | } 105 | 106 | checkstatusofproc(){ 107 | pidofproc -p $PIDFILE $PROC_NAME > /dev/null 108 | } 109 | 110 | checkstatus(){ 111 | checkstatusofproc 112 | status=$? 113 | 114 | case "$status" in 115 | $STATUS_RUNNING) 116 | log_success_msg "${DESC} is running" 117 | ;; 118 | $STATUS_DEAD) 119 | log_failure_msg "${DESC} is dead and pid file exists" 120 | ;; 121 | $STATUS_DEAD_AND_LOCK) 122 | log_failure_msg "${DESC} is dead and lock file exists" 123 | ;; 124 | $STATUS_NOT_RUNNING) 125 | log_failure_msg "${DESC} is not running" 126 | ;; 127 | *) 128 | log_failure_msg "${DESC} status is unknown" 129 | ;; 130 | esac 131 | return $status 132 | } 133 | 134 | condrestart(){ 135 | [ -e $LOCKFILE ] && restart || : 136 | } 137 | 138 | check_for_root() { 139 | if [ $(id -ur) -ne 0 ]; then 140 | echo 'Error: root user required' 141 | echo 142 | exit 1 143 | fi 144 | } 145 | 146 | service() { 147 | case "$1" in 148 | start) 149 | check_for_root 150 | start 151 | ;; 152 | stop) 153 | check_for_root 154 | stop 155 | ;; 156 | status) 157 | checkstatus 158 | RETVAL=$? 159 | ;; 160 | restart) 161 | check_for_root 162 | restart 163 | ;; 164 | condrestart|try-restart) 165 | check_for_root 166 | condrestart 167 | ;; 168 | rollback) 169 | DAEMON_FLAGS="$DAEMON_FLAGS -${1}" 170 | start 171 | ;; 172 | *) 173 | echo $"Usage: $0 {start|stop|status|restart|try-restart|condrestart|rollback}" 174 | exit 1 175 | esac 176 | } 177 | 178 | service "$1" 179 | 180 | exit $RETVAL 181 | -------------------------------------------------------------------------------- /files/init.d/hbase-regionserver: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed to the Apache Software Foundation (ASF) under one or more 4 | # contributor license agreements. See the NOTICE file distributed with 5 | # this work for additional information regarding copyright ownership. 6 | # The ASF licenses this file to You under the Apache License, Version 2.0 7 | # (the "License"); you may not use this file except in compliance with 8 | # the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | # Starts the local HBase Regionserver 19 | # 20 | # chkconfig: 345 85 15 21 | # description: HBase Regionserver 22 | # 23 | ### BEGIN INIT INFO 24 | # Provides: hbase-regionserver 25 | # Short-Description: HBase Regionserver 26 | # Default-Start: 3 4 5 27 | # Default-Stop: 0 1 2 6 28 | # Required-Start: $syslog $remote_fs 29 | # Required-Stop: $syslog $remote_fs 30 | # Should-Start: 31 | # Should-Stop: 32 | ### END INIT INFO 33 | 34 | . /lib/lsb/init-functions 35 | 36 | RETVAL_SUCCESS=0 37 | 38 | STATUS_RUNNING=0 39 | STATUS_DEAD=1 40 | STATUS_DEAD_AND_LOCK=2 41 | STATUS_NOT_RUNNING=3 42 | STATUS_OTHER_ERROR=102 43 | 44 | 45 | ERROR_PROGRAM_NOT_INSTALLED=5 46 | ERROR_PROGRAM_NOT_CONFIGURED=6 47 | 48 | 49 | RETVAL=0 50 | SLEEP_TIME=5 51 | PROC_NAME="java" 52 | 53 | DAEMON="hbase-regionserver" 54 | DESC="HBase Regionserver" 55 | EXEC_PATH="/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh" 56 | DAEMON_FLAGS="regionserver" 57 | CONF_DIR="/etc/hbase/conf" 58 | LOCKDIR="/var/lock/subsys" 59 | LOCKFILE="$LOCKDIR/hbase-regionserver" 60 | WORKING_DIR="/var/run/hbase" 61 | SVC_USER=hbase 62 | 63 | # set user since the pid/log dir often depend on it 64 | USER="$SVC_USER" 65 | . $CONF_DIR/hbase-env.sh 66 | PIDFILE="$HBASE_PID_DIR/hbase-$HBASE_IDENT_STRING-regionserver.pid" 67 | 68 | if [ -n "$HBASE_SECURE_DN_USER" ]; then 69 | TARGET_USER=root 70 | else 71 | TARGET_USER=${HBASE_REGIONSERVER_USER:-hbase} 72 | fi 73 | 74 | install -d -m 0755 -o hbase -g hbase /var/lib/hadoop-hbase 1>/dev/null 2>&1 || : 75 | [ -d "$LOCKDIR" ] || install -d -m 0755 $LOCKDIR 1>/dev/null 2>&1 || : 76 | start() { 77 | [ -x $EXEC_PATH ] || exit $ERROR_PROGRAM_NOT_INSTALLED 78 | [ -d $CONF_DIR ] || exit $ERROR_PROGRAM_NOT_CONFIGURED 79 | log_success_msg "Starting ${DESC}: " 80 | 81 | su -s /bin/bash $TARGET_USER -c "$EXEC_PATH --config '$CONF_DIR' start $DAEMON_FLAGS" 82 | 83 | # Some processes are slow to start 84 | sleep $SLEEP_TIME 85 | checkstatusofproc 86 | RETVAL=$? 87 | 88 | [ $RETVAL -eq $RETVAL_SUCCESS ] && touch $LOCKFILE 89 | return $RETVAL 90 | } 91 | 92 | stop() { 93 | log_success_msg "Stopping ${DESC}: " 94 | su -s /bin/bash $TARGET_USER \ 95 | -c "$EXEC_PATH --config $CONF_DIR stop $DAEMON_FLAGS" 96 | RETVAL=$? 97 | 98 | [ $RETVAL -eq $RETVAL_SUCCESS ] && rm -f $LOCKFILE $PIDFILE 99 | } 100 | 101 | restart() { 102 | stop 103 | start 104 | } 105 | 106 | checkstatusofproc(){ 107 | pidofproc -p $PIDFILE $PROC_NAME > /dev/null 108 | } 109 | 110 | checkstatus(){ 111 | checkstatusofproc 112 | status=$? 113 | 114 | case "$status" in 115 | $STATUS_RUNNING) 116 | log_success_msg "${DESC} is running" 117 | ;; 118 | $STATUS_DEAD) 119 | log_failure_msg "${DESC} is dead and pid file exists" 120 | ;; 121 | $STATUS_DEAD_AND_LOCK) 122 | log_failure_msg "${DESC} is dead and lock file exists" 123 | ;; 124 | $STATUS_NOT_RUNNING) 125 | log_failure_msg "${DESC} is not running" 126 | ;; 127 | *) 128 | log_failure_msg "${DESC} status is unknown" 129 | ;; 130 | esac 131 | return $status 132 | } 133 | 134 | condrestart(){ 135 | [ -e $LOCKFILE ] && restart || : 136 | } 137 | 138 | check_for_root() { 139 | if [ $(id -ur) -ne 0 ]; then 140 | echo 'Error: root user required' 141 | echo 142 | exit 1 143 | fi 144 | } 145 | 146 | service() { 147 | case "$1" in 148 | start) 149 | check_for_root 150 | start 151 | ;; 152 | stop) 153 | check_for_root 154 | stop 155 | ;; 156 | status) 157 | checkstatus 158 | RETVAL=$? 159 | ;; 160 | restart) 161 | check_for_root 162 | restart 163 | ;; 164 | condrestart|try-restart) 165 | check_for_root 166 | condrestart 167 | ;; 168 | rollback) 169 | DAEMON_FLAGS="$DAEMON_FLAGS -${1}" 170 | start 171 | ;; 172 | *) 173 | echo $"Usage: $0 {start|stop|status|restart|try-restart|condrestart|rollback}" 174 | exit 1 175 | esac 176 | } 177 | 178 | service "$1" 179 | 180 | exit $RETVAL 181 | -------------------------------------------------------------------------------- /files/init.d/knox-gateway: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 4 | # Licensed to the Apache Software Foundation (ASF) under one or more 5 | # contributor license agreements. See the NOTICE file distributed with 6 | # this work for additional information regarding copyright ownership. 7 | # The ASF licenses this file to You under the Apache License, Version 2.0 8 | # (the "License"); you may not use this file except in compliance with 9 | # the License. You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | # 19 | # Starts a Knox Gateway 20 | # 21 | # chkconfig: 345 85 15 22 | # description: Knox Gateway 23 | # 24 | ### BEGIN INIT INFO 25 | # Provides: knox-gateway 26 | # Short-Description: Knox Gateway 27 | # Default-Start: 3 4 5 28 | # Default-Stop: 0 1 2 6 29 | # Required-Start: $syslog $remote_fs 30 | # Required-Stop: $syslog $remote_fs 31 | # Should-Start: 32 | # Should-Stop: 33 | ### END INIT INFO 34 | 35 | #Knox PID 36 | PID=0 37 | 38 | # Start, stop, status, clean or setup 39 | KNOX_LAUNCH_COMMAND=$1 40 | 41 | # User Name for setup parameter 42 | KNOX_LAUNCH_USER=$2 43 | 44 | #App names 45 | KNOX_NAME=knox 46 | GATEWAY_NAME=gateway 47 | 48 | # start/stop script location 49 | KNOX_SCRIPT_DIR="/usr/hdp/current/knox-server/bin" 50 | 51 | #Name of PID file 52 | PID_DIR="/var/run/$KNOX_NAME" 53 | PID_FILE="$PID_DIR/$GATEWAY_NAME.pid" 54 | 55 | #Name of LOG/OUT/ERR file 56 | LOG_DIR="/var/log/$KNOX_NAME" 57 | OUT_FILE="$LOG_DIR/$$GATEWAY_NAME.out" 58 | ERR_FILE="$LOG_DIR/$$GATEWAY_NAME.err" 59 | 60 | #The max time to wait 61 | MAX_WAIT_TIME=10 62 | 63 | function main { 64 | case "$1" in 65 | start) 66 | knoxStart 67 | ;; 68 | stop) 69 | knoxStop 70 | ;; 71 | status) 72 | knoxStatus 73 | ;; 74 | *) 75 | printf "Usage: $0 {start|stop|status}\n" 76 | ;; 77 | esac 78 | } 79 | 80 | function knoxStart { 81 | getPID 82 | if [ $? -eq 0 ]; then 83 | printf "Knox is already running with PID=$PID.\n" 84 | exit 0 85 | fi 86 | 87 | printf "Starting Knox " 88 | 89 | rm -f $PID_FILE 90 | 91 | su -l knox -c "$KNOX_SCRIPT_DIR/gateway.sh start" 92 | 93 | getPID 94 | knoxIsRunning $PID 95 | if [ $? -ne 1 ]; then 96 | printf "failed.\n" 97 | exit 1 98 | fi 99 | 100 | printf "succeeded with PID=$PID.\n" 101 | return 0 102 | } 103 | 104 | function knoxStop { 105 | getPID 106 | knoxIsRunning $PID 107 | if [ $? -eq 0 ]; then 108 | printf "Knox is not running.\n" 109 | return 0 110 | fi 111 | 112 | printf "Stopping Knox [$PID] " 113 | knoxKill $PID >>$OUT_FILE 2>>$ERR_FILE 114 | 115 | if [ $? -ne 0 ]; then 116 | printf "failed. \n" 117 | exit 1 118 | else 119 | rm -f $PID_FILE 120 | printf "succeeded.\n" 121 | return 0 122 | fi 123 | } 124 | 125 | function knoxStatus { 126 | printf "Knox " 127 | getPID 128 | if [ $? -eq 1 ]; then 129 | printf "is not running. No pid file found.\n" 130 | return 1 131 | fi 132 | 133 | knoxIsRunning $PID 134 | if [ $? -eq 1 ]; then 135 | printf "is running with PID=$PID.\n" 136 | exit 0 137 | else 138 | printf "is not running.\n" 139 | return 1 140 | fi 141 | } 142 | 143 | 144 | # Returns 0 if the Knox is running and sets the $PID variable. 145 | function getPID { 146 | if [ ! -d $PID_DIR ]; then 147 | printf "Can't find pid dir. Run sudo $0 setup.\n" 148 | exit 1 149 | fi 150 | if [ ! -f $PID_FILE ]; then 151 | PID=0 152 | return 1 153 | fi 154 | 155 | PID="$(<$PID_FILE)" 156 | return 0 157 | } 158 | 159 | function knoxIsRunning { 160 | if [ $1 -eq 0 ]; then return 0; fi 161 | 162 | ps -p $1 > /dev/null 163 | 164 | if [ $? -eq 1 ]; then 165 | return 0 166 | else 167 | return 1 168 | fi 169 | } 170 | 171 | function knoxKill { 172 | local localPID=$1 173 | kill $localPID || return 1 174 | for ((i=0; i installed 25 | } 26 | -> 27 | file { "${tmp_dir}/ambari-agent": 28 | ensure => directory, 29 | owner => 'root', 30 | group => 'root', 31 | mode => '755', 32 | } 33 | -> 34 | file { "${conf_dir}/ambari-agent.ini": 35 | ensure => file, 36 | content => template('ambari_agent/ambari-agent.erb'), 37 | owner => 'root', 38 | group => 'root', 39 | mode => '755', 40 | } 41 | -> 42 | exec { "ambari-agent-start": 43 | command => "/usr/sbin/ambari-agent start" 44 | } 45 | 46 | } 47 | -------------------------------------------------------------------------------- /modules/ambari_agent/templates/ambari-agent.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | 18 | <% 19 | @namenode = eval(@nodes).select {|node| node[:roles].include? 'ambari-server' }[0][:hostname] + "." + @domain; 20 | -%> 21 | 22 | [server] 23 | hostname=<%= @namenode %> 24 | url_port=8440 25 | secured_url_port=8441 26 | 27 | [agent] 28 | prefix=/var/lib/ambari-agent/data 29 | ;loglevel=(DEBUG/INFO) 30 | loglevel=INFO 31 | data_cleanup_interval=86400 32 | data_cleanup_max_age=2592000 33 | ping_port=8670 34 | cache_dir=/var/lib/ambari-agent/cache 35 | tolerate_download_failures=true 36 | tmp_dir=/tmp/ambari 37 | logdir=/var/log/ambari-agent 38 | piddir=/var/run/ambari-agent 39 | 40 | [puppet] 41 | puppetmodules=/var/lib/ambari-agent/puppet 42 | ruby_home=/usr/lib/ambari-agent/lib/ruby-1.8.7-p370 43 | puppet_home=/usr/lib/ambari-agent/lib/puppet-2.7.9 44 | facter_home=/usr/lib/ambari-agent/lib/facter-1.6.10 45 | 46 | [command] 47 | maxretries=2 48 | sleepBetweenRetries=1 49 | 50 | [security] 51 | keysdir=/var/lib/ambari-agent/keys 52 | server_crt=ca.crt 53 | passphrase_env_var_name=AMBARI_PASSPHRASE 54 | 55 | [services] 56 | pidLookupPath=/var/run/ 57 | 58 | [heartbeat] 59 | state_interval=6 60 | dirs=/etc/hadoop,/etc/hadoop/conf,/etc/hbase,/etc/hcatalog,/etc/hive,/etc/oozie, 61 | /etc/sqoop,/etc/ganglia,/etc/nagios, 62 | /var/run/hadoop,/var/run/zookeeper,/var/run/hbase,/var/run/templeton,/var/run/oozie, 63 | /var/log/hadoop,/var/log/zookeeper,/var/log/hbase,/var/run/templeton,/var/log/hive, 64 | /var/log/nagios 65 | rpms=nagios,ganglia, 66 | hadoop,hadoop-lzo,hbase,oozie,sqoop,pig,zookeeper,hive,libconfuse,ambari-log4j 67 | ; 0 - unlimited 68 | log_lines_count=300 69 | idle_interval_min=1 70 | idle_interval_max=10 71 | -------------------------------------------------------------------------------- /modules/ambari_server/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class ambari_server { 17 | 18 | require repos_setup 19 | require jdk 20 | 21 | package { "ambari-server": 22 | ensure => installed 23 | } 24 | -> 25 | exec { "ambari-server-setup": 26 | command => "/usr/sbin/ambari-server setup --silent" 27 | } 28 | -> 29 | exec { "ambari-server-start": 30 | command => "/usr/sbin/ambari-server start --silent" 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /modules/certification/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class certification { 17 | 18 | package { "pytest": 19 | ensure => installed 20 | } 21 | -> 22 | exec { "known hosts": 23 | command => "ssh-keyscan github.com >> ~/.ssh/known_hosts", 24 | path => "/usr/bin/:/bin/", 25 | user => vagrant 26 | } 27 | -> 28 | exec { "get cert repo": 29 | command => "git clone git@github.com:hortonworks/certification.git", 30 | path => "/usr/bin/:/bin/", 31 | user => vagrant 32 | } 33 | 34 | } 35 | -------------------------------------------------------------------------------- /modules/dev/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class dev { 17 | $home = "/usr/lib/jvm/java" 18 | 19 | package { "git": 20 | ensure => installed, 21 | } 22 | 23 | } 24 | -------------------------------------------------------------------------------- /modules/hadoop_server/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # This class creates the various hadoop users and the log and pid directories 17 | # that are required for hdfs and yarn servers. 18 | class hadoop_server { 19 | require hdfs_client 20 | require yarn_client 21 | 22 | $path="/bin:/usr/bin" 23 | 24 | group { 'hadoop': 25 | ensure => present, 26 | } 27 | -> 28 | group { 'mapred': 29 | ensure => present, 30 | } 31 | -> 32 | group { 'yarn': 33 | ensure => present, 34 | } 35 | -> 36 | group { 'oozie': 37 | ensure => present, 38 | } 39 | -> 40 | user { 'hdfs': 41 | ensure => present, 42 | gid => hadoop, 43 | } 44 | -> 45 | user { 'mapred': 46 | ensure => present, 47 | groups => ['mapred'], 48 | gid => hadoop, 49 | } 50 | -> 51 | user { 'yarn': 52 | ensure => present, 53 | groups => ['yarn', 'mapred'], 54 | gid => hadoop, 55 | } 56 | -> 57 | user { 'hive': 58 | ensure => present, 59 | gid => hadoop, 60 | } 61 | -> 62 | user { 'oozie': 63 | ensure => present, 64 | groups => ['hadoop'], 65 | gid => 'oozie', 66 | } 67 | -> 68 | user { 'hbase': 69 | ensure => present, 70 | gid => 'hadoop', 71 | } 72 | 73 | file { "${hdfs_client::data_dir}": 74 | ensure => directory, 75 | owner => 'root', 76 | group => 'root', 77 | mode => '755', 78 | } 79 | 80 | file { "${hdfs_client::data_dir}/hdfs": 81 | ensure => directory, 82 | owner => 'hdfs', 83 | group => 'hadoop', 84 | mode => '700', 85 | } 86 | 87 | file { "${hdfs_client::data_dir}/yarn": 88 | ensure => directory, 89 | owner => 'yarn', 90 | group => 'hadoop', 91 | mode => '755', 92 | } 93 | 94 | file { "${hdfs_client::pid_dir}": 95 | ensure => directory, 96 | owner => 'root', 97 | group => 'root', 98 | mode => '755', 99 | } 100 | 101 | file { "${hdfs_client::pid_dir}/hdfs": 102 | ensure => directory, 103 | owner => 'hdfs', 104 | group => 'hadoop', 105 | mode => '700', 106 | } 107 | 108 | file { "${hdfs_client::pid_dir}/mapred": 109 | ensure => directory, 110 | owner => 'mapred', 111 | group => 'hadoop', 112 | mode => '700', 113 | } 114 | 115 | file { "${hdfs_client::pid_dir}/yarn": 116 | ensure => directory, 117 | owner => 'yarn', 118 | group => 'hadoop', 119 | mode => '700', 120 | } 121 | 122 | file { "${hdfs_client::log_dir}": 123 | ensure => directory, 124 | owner => 'root', 125 | group => 'root', 126 | mode => '755', 127 | } 128 | 129 | file { "${hdfs_client::log_dir}/hdfs": 130 | ensure => directory, 131 | owner => 'hdfs', 132 | group => 'hadoop', 133 | mode => '700', 134 | } 135 | 136 | file { "${hdfs_client::log_dir}/mapred": 137 | ensure => directory, 138 | owner => 'mapred', 139 | group => 'hadoop', 140 | mode => '755', 141 | } 142 | 143 | file { "${hdfs_client::log_dir}/yarn": 144 | ensure => directory, 145 | owner => 'yarn', 146 | group => 'hadoop', 147 | mode => '755', 148 | } 149 | 150 | file { "${hdfs_client::log_dir}/hbase": 151 | ensure => directory, 152 | owner => 'hbase', 153 | group => 'hadoop', 154 | mode => '755', 155 | } 156 | 157 | file { "${hdfs_client::pid_dir}/hbase": 158 | ensure => directory, 159 | owner => 'hbase', 160 | group => 'hadoop', 161 | mode => '755', 162 | } 163 | } -------------------------------------------------------------------------------- /modules/hbase_client/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class hbase_client { 17 | require hdfs_client 18 | require zookeeper_client 19 | 20 | file { "/etc/profile.d/hbase.sh": 21 | content => "export HBASE_CONF_PATH=/etc/hbase/conf\n" 22 | } 23 | 24 | if $security == "true" { 25 | file { "/etc/hbase": 26 | ensure => directory, 27 | } 28 | -> 29 | file { "/etc/hbase/conf": 30 | ensure => directory, 31 | } 32 | -> 33 | file { "/etc/hbase/conf/zk-jaas.conf": 34 | ensure => file, 35 | content => template('hbase_client/zk-jaas.erb'), 36 | } 37 | -> 38 | Package["hbase${package_version}"] 39 | } 40 | 41 | package { "hbase${package_version}": 42 | ensure => installed, 43 | } 44 | -> 45 | package { "phoenix${package_version}": 46 | ensure => installed, 47 | } 48 | -> 49 | file { '/etc/hbase/conf/hbase-env.sh': 50 | ensure => file, 51 | content => template('hbase_client/hbase-env.sh.erb'), 52 | } 53 | -> 54 | file { '/etc/hbase/conf/hbase-site.xml': 55 | ensure => file, 56 | content => template('hbase_client/hbase-site.xml.erb'), 57 | } 58 | -> 59 | file { '/etc/hbase/conf/log4j.properties': 60 | ensure => file, 61 | content => template('hbase_client/log4j.properties.erb'), 62 | } 63 | -> 64 | file { '/etc/hbase/conf/regionservers': 65 | ensure => file, 66 | content => template('hbase_client/regionservers.erb'), 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /modules/hbase_client/templates/hbase-env.sh.erb: -------------------------------------------------------------------------------- 1 | # Set environment variables here. 2 | 3 | # The java implementation to use. Java 1.6 required. 4 | export JAVA_HOME=<%= scope.lookupvar('jdk::home') %> 5 | 6 | # HBase Configuration directory 7 | export HBASE_CONF_DIR=${HBASE_CONF_DIR:-/usr/hdp/current/hbase-client/conf} 8 | 9 | # The maximum amount of heap to use, in MB. Default is 1000. 10 | export HBASE_HEAPSIZE=<%= @server_mem %> 11 | 12 | # Extra Java runtime options. 13 | # Below are what we set by default. May only work with SUN JVM. 14 | # For more on why as well as other possible settings, 15 | # see http://wiki.apache.org/hadoop/PerformanceTuning 16 | export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/var/log/hbase/gc.log-`date +'%Y%m%d%H%M'`" 17 | # Uncomment below to enable java garbage collection logging. 18 | # export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log" 19 | 20 | # Uncomment and adjust to enable JMX exporting 21 | # See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access. 22 | # More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html 23 | # 24 | # export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" 25 | # If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size 26 | # export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103" 27 | # export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104" 28 | 29 | # File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default. 30 | export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers 31 | 32 | # Extra ssh options. Empty by default. 33 | # export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR" 34 | 35 | # Where log files are stored. $HBASE_HOME/logs by default. 36 | export HBASE_LOG_DIR=<%= scope.lookupvar('hdfs_client::log_dir') %>/hbase 37 | 38 | # A string representing this instance of hbase. $USER by default. 39 | export HBASE_IDENT_STRING=hbase 40 | 41 | # The scheduling priority for daemon processes. See 'man nice'. 42 | # export HBASE_NICENESS=10 43 | 44 | # The directory where pid files are stored. /tmp by default. 45 | export HBASE_PID_DIR=<%= scope.lookupvar('hdfs_client::pid_dir') %>/hbase 46 | 47 | # Seconds to sleep between slave commands. Unset by default. This 48 | # can be useful in large clusters, where, e.g., slave rsyncs can 49 | # otherwise arrive faster than the master can service them. 50 | # export HBASE_SLAVE_SLEEP=0.1 51 | 52 | # Tell HBase whether it should manage it's own instance of Zookeeper or not. 53 | export HBASE_MANAGES_ZK=false 54 | 55 | export HBASE_OPTS="$HBASE_OPTS -XX:ErrorFile=$HBASE_LOG_DIR/hs_err_pid%p.log -Djava.security.auth.login.config=/etc/hbase/conf/zk-jaas.conf" 56 | export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx<%= @server_mem %>m" 57 | export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmx<%= @server_mem %>m" 58 | -------------------------------------------------------------------------------- /modules/hbase_client/templates/hbase-site.xml.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | <% @namenode = eval(@nodes). 18 | select {|node| node[:roles].include? 'nn'}[0][:hostname] + "." + @domain; 19 | @zookeeper_servers = eval(@nodes). 20 | select {|node| node[:roles].include? 'zk'}. 21 | map{|node| node[:hostname] + "." + @domain + ":2181"}.join(","); 22 | -%> 23 | 24 | 25 | 26 | 27 | 28 | hbase.rootdir 29 | hdfs://<%= @namenode %>/apps/hbase 30 | 31 | 32 | 33 | phoenix.functions.allowUserDefinedFunctions 34 | true 35 | 36 | 37 | 38 | phoenix.query.timeoutMs 39 | 60000 40 | 41 | 42 | 43 | phoenix.sequence.saltBuckets 44 | 8 45 | 46 | 47 | 48 | hbase.regionserver.wal.codec 49 | org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec 50 | 51 | 52 | 53 | hbase.cluster.distributed 54 | true 55 | 56 | 57 | 58 | hbase.zookeeper.quorum 59 | <%= @zookeeper_servers %> 60 | 61 | 62 | 63 | zookeeper.znode.parent 64 | /hbase 65 | 66 | 67 | <% if @security == "true" -%> 68 | 69 | hbase.security.authentication 70 | kerberos 71 | 72 | 73 | 74 | hbase.security.authorization 75 | true 76 | 77 | 78 | 79 | hbase.rpc.engine 80 | org.apache.hadoop.hbase.ipc.SecureRpcEngine 81 | 82 | 83 | 84 | hbase.master.keytab.file 85 | <%= scope.lookupvar('hdfs_client::keytab_dir') %>/hbase.keytab 86 | 87 | 88 | 89 | hbase.master.kerberos.principal 90 | hbase/_HOST@<%= @realm %> 91 | 92 | 93 | 94 | hbase.regionserver.keytab.file 95 | <%= scope.lookupvar('hdfs_client::keytab_dir') %>/hbase.keytab 96 | 97 | 98 | 99 | hbase.regionserver.kerberos.principal 100 | hbase/_HOST@<%= @realm %> 101 | 102 | 103 | 104 | hbase.superuser 105 | hbase 106 | 107 | 108 | 109 | hbase.coprocessor.master.classes 110 | org.apache.hadoop.hbase.security.access.AccessController 111 | 112 | 113 | 114 | hbase.coprocessor.region.classes 115 | org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.AccessController 116 | 117 | <% end -%> 118 | 119 | 120 | -------------------------------------------------------------------------------- /modules/hbase_client/templates/log4j.properties.erb: -------------------------------------------------------------------------------- 1 | 2 | # Licensed to the Apache Software Foundation (ASF) under one 3 | # or more contributor license agreements. See the NOTICE file 4 | # distributed with this work for additional information 5 | # regarding copyright ownership. The ASF licenses this file 6 | # to you under the Apache License, Version 2.0 (the 7 | # "License"); you may not use this file except in compliance 8 | # with the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | 19 | # Define some default values that can be overridden by system properties 20 | hbase.root.logger=INFO,console 21 | hbase.security.logger=INFO,console 22 | hbase.log.dir=. 23 | hbase.log.file=hbase.log 24 | 25 | # Define the root logger to the system property "hbase.root.logger". 26 | log4j.rootLogger=${hbase.root.logger} 27 | 28 | # Logging Threshold 29 | log4j.threshold=ALL 30 | 31 | # 32 | # Daily Rolling File Appender 33 | # 34 | log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender 35 | log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file} 36 | 37 | # Rollver at midnight 38 | log4j.appender.DRFA.DatePattern=.yyyy-MM-dd 39 | 40 | # 30-day backup 41 | #log4j.appender.DRFA.MaxBackupIndex=30 42 | log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout 43 | 44 | # Pattern format: Date LogLevel LoggerName LogMessage 45 | log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n 46 | 47 | # Rolling File Appender properties 48 | hbase.log.maxfilesize=256MB 49 | hbase.log.maxbackupindex=20 50 | 51 | # Rolling File Appender 52 | log4j.appender.RFA=org.apache.log4j.RollingFileAppender 53 | log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file} 54 | 55 | log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize} 56 | log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex} 57 | 58 | log4j.appender.RFA.layout=org.apache.log4j.PatternLayout 59 | log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n 60 | 61 | # 62 | # Security audit appender 63 | # 64 | hbase.security.log.file=SecurityAuth.audit 65 | hbase.security.log.maxfilesize=256MB 66 | hbase.security.log.maxbackupindex=20 67 | log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 68 | log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file} 69 | log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize} 70 | log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex} 71 | log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout 72 | log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n 73 | log4j.category.SecurityLogger=${hbase.security.logger} 74 | log4j.additivity.SecurityLogger=false 75 | #log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE 76 | 77 | # 78 | # Null Appender 79 | # 80 | log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender 81 | 82 | # 83 | # console 84 | # Add "console" to rootlogger above if you want to use this 85 | # 86 | log4j.appender.console=org.apache.log4j.ConsoleAppender 87 | log4j.appender.console.target=System.err 88 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 89 | log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n 90 | 91 | # Custom Logging levels 92 | 93 | log4j.logger.org.apache.zookeeper=INFO 94 | #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG 95 | log4j.logger.org.apache.hadoop.hbase=INFO 96 | # Make these two classes INFO-level. Make them DEBUG to see more zk debug. 97 | log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO 98 | log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO 99 | #log4j.logger.org.apache.hadoop.dfs=DEBUG 100 | # Set this class to log INFO only otherwise its OTT 101 | # Enable this to get detailed connection error/retry logging. 102 | # log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE 103 | 104 | 105 | # Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output) 106 | #log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG 107 | 108 | # Uncomment the below if you want to remove logging of client region caching' 109 | # and scan of .META. messages 110 | # log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO 111 | # log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO 112 | 113 | -------------------------------------------------------------------------------- /modules/hbase_client/templates/regionservers.erb: -------------------------------------------------------------------------------- 1 | <% 2 | regionservers = eval(@nodes). 3 | select {|node| node[:roles].include? 'hbase-regionserver'}. 4 | map{|node| node[:hostname] + "." + @domain}; 5 | -%> 6 | <% regionservers.each do |node| -%> 7 | <%= node %> 8 | <% end -%> 9 | -------------------------------------------------------------------------------- /modules/hbase_client/templates/zk-jaas.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | Client { 18 | com.sun.security.auth.module.Krb5LoginModule required 19 | useKeyTab=true 20 | useTicketCache=false 21 | keyTab="<%= scope.lookupvar('hdfs_client::keytab_dir') %>/hbase.keytab" 22 | principal="hbase/<%= @hostname %>.<%= @domain %>@<%= @realm %>"; 23 | }; -------------------------------------------------------------------------------- /modules/hbase_master/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class hbase_master { 17 | require hbase_server 18 | 19 | $path="/usr/bin" 20 | 21 | package { "hbase${package_version}-master" : 22 | ensure => installed, 23 | } 24 | -> 25 | exec { "hdp-select set hbase-master ${hdp_version}": 26 | cwd => "/", 27 | path => "$path", 28 | } 29 | -> 30 | file { "/etc/init.d/hbase-master": 31 | ensure => file, 32 | source => "puppet:///files/init.d/hbase-master", 33 | owner => root, 34 | group => root, 35 | mode => '755', 36 | # TODO when we move to 2.5 grab Carter's change to use script from package 37 | } 38 | -> 39 | service {"hbase-master": 40 | ensure => running, 41 | enable => true, 42 | subscribe => File['/etc/hbase/conf/hbase-site.xml'], 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /modules/hbase_regionserver/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class hbase_regionserver { 17 | require hbase_server 18 | 19 | $path="/usr/bin" 20 | 21 | package { "hbase${package_version}-regionserver" : 22 | ensure => installed, 23 | } 24 | -> 25 | exec { "hdp-select set hbase-regionserver ${hdp_version}": 26 | cwd => "/", 27 | path => "$path", 28 | } 29 | -> 30 | file { "/etc/init.d/hbase-regionserver": 31 | ensure => file, 32 | source => "puppet:///files/init.d/hbase-regionserver", 33 | owner => root, 34 | group => root, 35 | mode => '755', 36 | } 37 | -> 38 | service {"hbase-regionserver": 39 | ensure => running, 40 | enable => true, 41 | subscribe => File['/etc/hbase/conf/hbase-site.xml'], 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /modules/hbase_server/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class hbase_server { 17 | require hdfs_client 18 | require zookeeper_client 19 | require hbase_client 20 | require hadoop_server 21 | 22 | $keytab_dir="/etc/security/hadoop" 23 | 24 | if $security == "true" { 25 | require kerberos_http 26 | 27 | file { "${keytab_dir}/hbase.keytab": 28 | ensure => file, 29 | source => "/vagrant/generated/keytabs/${hostname}/hbase.keytab", 30 | owner => hbase, 31 | group => hadoop, 32 | mode => '400', 33 | } 34 | } 35 | 36 | } 37 | -------------------------------------------------------------------------------- /modules/hdfs_client/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class hdfs_client { 17 | require repos_setup 18 | require hdp_select 19 | require jdk 20 | 21 | $conf_dir="/etc/hadoop/hdp" 22 | $path="${jdk::home}/bin:/bin:/usr/bin" 23 | $log_dir="/var/log/hadoop" 24 | $data_dir="/var/lib/hadoop" 25 | $pid_dir="/var/run/pid" 26 | $keytab_dir="/etc/security/hadoop" 27 | 28 | package { "hadoop${package_version}": 29 | ensure => installed, 30 | } 31 | -> 32 | exec { "hdp-select set hadoop-client ${hdp_version}": 33 | cwd => "/", 34 | path => "$path", 35 | } 36 | 37 | package { "hadoop${package_version}-libhdfs": 38 | ensure => installed, 39 | require => Package["hadoop${package_version}"], 40 | } 41 | 42 | package { "hadoop${package_version}-client": 43 | ensure => installed, 44 | require => Package["hadoop${package_version}"], 45 | } 46 | 47 | package { "hadooplzo${package_version}": 48 | ensure => installed, 49 | require => Package["hadoop${package_version}"], 50 | } 51 | -> 52 | package { "hadooplzo${package_version}-native": 53 | ensure => installed, 54 | } 55 | 56 | package { 'openssl': 57 | ensure => installed, 58 | } 59 | 60 | package { 'snappy': 61 | ensure => installed, 62 | } 63 | 64 | package { 'lzo': 65 | ensure => installed, 66 | } 67 | 68 | file { '/etc/hadoop': 69 | ensure => 'directory', 70 | } 71 | 72 | file { "${conf_dir}": 73 | ensure => 'directory', 74 | } 75 | 76 | file { '/etc/hadoop/conf': 77 | ensure => 'link', 78 | target => "${conf_dir}", 79 | require => Package["hadoop${package_version}"], 80 | force => true 81 | } 82 | 83 | file { "${conf_dir}/commons-logging.properties": 84 | ensure => file, 85 | content => template('hdfs_client/commons-logging.erb'), 86 | } 87 | 88 | file { "${conf_dir}/configuration.xsl": 89 | ensure => file, 90 | content => template('hdfs_client/configuration.erb'), 91 | } 92 | 93 | file { "${conf_dir}/core-site.xml": 94 | ensure => file, 95 | content => template('hdfs_client/core-site.erb'), 96 | } 97 | 98 | file { "${conf_dir}/dfs.exclude": 99 | ensure => file, 100 | content => "", 101 | } 102 | 103 | file { "${conf_dir}/hadoop-env.sh": 104 | ensure => file, 105 | content => template('hdfs_client/hadoop-env.erb'), 106 | } 107 | 108 | file { "${conf_dir}/hadoop-metrics2.properties": 109 | ensure => file, 110 | content => template('hdfs_client/hadoop-metrics2.erb'), 111 | } 112 | 113 | file { "${conf_dir}/hadoop-policy.xml": 114 | ensure => file, 115 | content => template('hdfs_client/hadoop-policy.erb'), 116 | } 117 | 118 | file { "${conf_dir}/hdfs-site.xml": 119 | ensure => file, 120 | content => template('hdfs_client/hdfs-site.erb'), 121 | } 122 | 123 | file { "${conf_dir}/log4j.properties": 124 | ensure => file, 125 | content => template('hdfs_client/log4j.erb'), 126 | } 127 | 128 | if $security == "true" { 129 | require kerberos_client 130 | require ssl_ca 131 | 132 | # bless the generated ca cert for java clients 133 | exec {"keytool -importcert -noprompt -alias horton-ca -keystore ${jdk::home}/jre/lib/security/cacerts -storepass changeit -file ca.crt": 134 | cwd => "/vagrant/generated/ssl-ca", 135 | path => "$path", 136 | unless => "keytool -list -alias horton-ca -keystore ${jdk::home}/jre/lib/security/cacerts -storepass changeit", 137 | } 138 | 139 | file {"${conf_dir}/ssl-client.xml": 140 | ensure => file, 141 | content => template("hdfs_client/ssl-client.erb"), 142 | } 143 | } 144 | 145 | } 146 | -------------------------------------------------------------------------------- /modules/hdfs_client/templates/commons-logging.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | #Logging Implementation 18 | 19 | #Log4J 20 | org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger 21 | -------------------------------------------------------------------------------- /modules/hdfs_client/templates/configuration.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /modules/hdfs_client/templates/core-site.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | <% @namenode = 18 | eval(@nodes).select {|node| node[:roles].include? 'nn'}[0][:hostname] + 19 | "." + @domain; 20 | # The list of roles and hive servers that need to proxy for others 21 | @hive_roles = ['hive-meta', 'hive-hs2']; 22 | @hive_servers = eval(@nodes). 23 | select {|node| (node[:roles] & @hive_roles).length > 0}. 24 | map{|node| node[:hostname] + "." + @domain}.join(","); 25 | @hcat_servers = eval(@nodes).select {|node| node[:roles].include? 'hcat' 26 | }.map{|node| node[:hostname] + "." + @domain}.join(","); 27 | @oozie_servers = eval(@nodes). 28 | select {|node| node[:roles].include? 'oozie'}. 29 | map{|node| node[:hostname] + "." + @domain}.join(","); 30 | -%> 31 | 32 | 33 | 34 | 35 | 36 | 37 | io.serializations 38 | org.apache.hadoop.io.serializer.WritableSerialization 39 | 40 | 41 | 42 | hadoop.security.authentication 43 | <%= if @security == "true" 44 | then "kerberos" else "simple" end %> 45 | 46 | 47 | 48 | fs.defaultFS 49 | hdfs://<%= @namenode %>:8020 50 | 51 | 52 | 53 | fs.trash.interval 54 | 360 55 | 56 | 57 | 58 | io.compression.codecs 59 | org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec 60 | 61 | 62 | 63 | io.compression.codec.lzo.class 64 | com.hadoop.compression.lzo.LzoCodec 65 | 66 | 67 | 68 | hadoop.security.authorization 69 | true 70 | 71 | 72 | 73 | ipc.client.connect.max.retries 74 | 50 75 | 76 | 77 | 78 | ipc.client.idlethreshold 79 | 8000 80 | 81 | 82 | 83 | io.file.buffer.size 84 | 131072 85 | 86 | 87 | 88 | ipc.client.connection.maxidletime 89 | 30000 90 | 91 | 92 | <% if @oozie_servers.length > 0 -%> 93 | 94 | hadoop.proxyuser.oozie.groups 95 | oozie,users 96 | 97 | 98 | 99 | hadoop.proxyuser.oozie.hosts 100 | <%= @oozie_servers %> 101 | 102 | <% end -%> 103 | 104 | <% if @hive_servers.length > 0 -%> 105 | 106 | hadoop.proxyuser.hive.groups 107 | hive,users 108 | 109 | 110 | 111 | hadoop.proxyuser.hive.hosts 112 | <%= @hive_servers %> 113 | 114 | <% end -%> 115 | 116 | <% if @security == "true" -%> 117 | 118 | hadoop.security.auth_to_local 119 | RULE:[2:$1@$0]([rn]m@<%= @realm %>)s/.*/yarn/ 120 | RULE:[2:$1@$0]([nd]n@<%= @realm %>)s/.*/hdfs/ 121 | RULE:[2:$1@$0](jhs@<%= @realm %>)s/.*/mapred/ 122 | DEFAULT 123 | 124 | 125 | 126 | hadoop.proxyuser.HTTP.groups 127 | users 128 | 129 | 130 | 131 | hadoop.proxyuser.HTTP.hosts 132 | <%= @hcat_servers %> 133 | 134 | 135 | 136 | hadoop.proxyuser.hcat.groups 137 | users 138 | 139 | 140 | 141 | hadoop.proxyuser.hcat.hosts 142 | <%= @hcat_servers %> 143 | 144 | 145 | 146 | hadoop.http.filter.initializers 147 | org.apache.hadoop.security.AuthenticationFilterInitializer 148 | 149 | 150 | 151 | hadoop.http.authentication.type 152 | kerberos 153 | 154 | 155 | 156 | hadoop.http.authentication.cookie.domain 157 | <%= @domain %> 158 | 159 | 160 | 161 | hadoop.http.authentication.kerberos.principal 162 | HTTP/_HOST@<%= @realm %> 163 | 164 | 165 | 166 | hadoop.http.authentication.kerberos.keytab 167 | <%= @keytab_dir %>/http.keytab 168 | 169 | 170 | 171 | hadoop.http.authentication.signature.secret.file 172 | <%= @keytab_dir %>/http-secret 173 | 174 | <% end -%> 175 | 176 | -------------------------------------------------------------------------------- /modules/hdfs_client/templates/hadoop-env.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | # Set Hadoop-specific environment variables here. 18 | 19 | # The java implementation to use. Required. 20 | export JAVA_HOME=<%= scope.lookupvar('jdk::home') %> 21 | export HADOOP_HOME_WARN_SUPPRESS=1 22 | 23 | # The maximum amount of heap to use, in MB. Default is 1000. 24 | export HADOOP_HEAPSIZE=<%= @client_mem %> 25 | 26 | # Extra Java runtime options. Empty by default. 27 | export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}" 28 | 29 | # Command specific options appended to HADOOP_OPTS when specified 30 | export HADOOP_NAMENODE_OPTS="-server -Xmx<%= @server_mem %>m -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}" 31 | 32 | HADOOP_DATANODE_OPTS="-Xmx<%= @server_mem %>m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}" 33 | 34 | HADOOP_BALANCER_OPTS="-server -Xmx<%= @server_mem %>m ${HADOOP_BALANCER_OPTS}" 35 | 36 | export HADOOP_SECONDARYNAMENODE_OPTS="-server -Xmx<%= @server_mem %>m -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}" 37 | 38 | # The following applies to multiple commands (fs, dfs, fsck, distcp etc) 39 | export HADOOP_CLIENT_OPTS="-Xmx<%= @client_mem %>m ${HADOOP_CLIENT_OPTS}" 40 | 41 | # Where log files are stored. $HADOOP_HOME/logs by default. 42 | export HADOOP_LOG_DIR=<%= @log_dir %>/$USER 43 | 44 | # History server logs 45 | export HADOOP_MAPRED_LOG_DIR=<%= @log_dir %>/$USER 46 | 47 | # Where log files are stored in the secure data environment. 48 | export HADOOP_SECURE_DN_LOG_DIR=<%= @log_dir %>/$HADOOP_SECURE_DN_USER 49 | 50 | # The directory where pid files are stored. 51 | export HADOOP_PID_DIR=<%= @pid_dir %>/$USER 52 | export HADOOP_SECURE_DN_PID_DIR=<%= @pid_dir %>/$HADOOP_SECURE_DN_USER 53 | 54 | # History server pid 55 | export HADOOP_MAPRED_PID_DIR=<%= @pid_dir %>/$USER 56 | 57 | YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY" 58 | 59 | # A string representing this instance of hadoop. 60 | export HADOOP_IDENT_STRING="$USER" 61 | 62 | # Use libraries from standard classpath 63 | JAVA_JDBC_LIBS="" 64 | #Add libraries required by mysql connector 65 | for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null` 66 | do 67 | JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile 68 | done 69 | #Add libraries required by oracle connector 70 | for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null` 71 | do 72 | JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile 73 | done 74 | #Add libraries required by nodemanager 75 | TEZ_LIBS=/etc/tez/conf:/usr/hdp/${HDP_VERSION}/tez/*:/usr/hdp/${HDP_VERSION}/tez/lib/* 76 | export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${TEZ_LIBS} 77 | 78 | <% if @security == "true" -%> 79 | # Path to jsvc required by secure HDP 2.0 datanode 80 | export JSVC_HOME=/usr/lib/bigtop-utils 81 | 82 | # On secure datanodes, user to run the datanode as after dropping privileges 83 | export HADOOP_SECURE_DN_USER=hdfs 84 | <% end -%> 85 | -------------------------------------------------------------------------------- /modules/hdfs_client/templates/hadoop-metrics2.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | # syntax: [prefix].[source|sink|jmx].[instance].[options] 18 | # See package.html for org.apache.hadoop.metrics2 for details 19 | 20 | *.period=60 21 | 22 | *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31 23 | *.sink.ganglia.period=180 24 | 25 | # default for supportsparse is false 26 | *.sink.ganglia.supportsparse=true 27 | 28 | .sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both 29 | .sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 30 | 31 | # Hook up to the server 32 | namenode.sink.ganglia.servers=0.0.0.0:8660 33 | datanode.sink.ganglia.servers=0.0.0.0:8660 34 | jobtracker.sink.ganglia.servers=0.0.0.0:8660 35 | tasktracker.sink.ganglia.servers=0.0.0.0:8660 36 | maptask.sink.ganglia.servers=0.0.0.0:8660 37 | reducetask.sink.ganglia.servers=0.0.0.0:8660 38 | -------------------------------------------------------------------------------- /modules/hdfs_client/templates/ssl-client.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | 18 | 19 | 20 | 21 | 22 | 23 | ssl.client.truststore.location 24 | <%= scope.lookupvar('jdk::home') %>/jre/lib/security/cacerts 25 | Truststore to be used by clients like distcp. Must be 26 | specified. 27 | 28 | 29 | 30 | 31 | ssl.client.truststore.password 32 | changeit 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /modules/hdfs_datanode/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class hdfs_datanode { 17 | require hdfs_client 18 | require hadoop_server 19 | 20 | $path="/usr/bin" 21 | 22 | if $security == "true" { 23 | require kerberos_http 24 | 25 | file { "${hdfs_client::keytab_dir}/dn.keytab": 26 | ensure => file, 27 | source => "/vagrant/generated/keytabs/${hostname}/dn.keytab", 28 | owner => hdfs, 29 | group => hadoop, 30 | mode => '400', 31 | } 32 | -> 33 | Package["hadoop${package_version}-hdfs-datanode"] 34 | } 35 | 36 | package { "hadoop${package_version}-hdfs-datanode" : 37 | ensure => installed, 38 | } 39 | -> 40 | exec { "hdp-select set hadoop-hdfs-datanode ${hdp_version}": 41 | cwd => "/", 42 | path => "$path", 43 | } 44 | -> 45 | file { "/etc/init.d/hadoop-hdfs-datanode": 46 | ensure => 'link', 47 | target => "/usr/hdp/current/hadoop-hdfs-datanode/etc/rc.d/init.d/hadoop-hdfs-datanode", 48 | } 49 | -> 50 | service {"hadoop-hdfs-datanode": 51 | ensure => running, 52 | enable => true, 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /modules/hdp_select/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class hdp_select { 17 | require repos_setup 18 | 19 | package { 'hdp-select': 20 | ensure => installed, 21 | } 22 | } -------------------------------------------------------------------------------- /modules/hive_client/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class hive_client { 17 | require yarn_client 18 | 19 | package { "hive${package_version}": 20 | ensure => installed, 21 | } 22 | 23 | file { '/etc/hive': 24 | ensure => 'directory', 25 | } 26 | 27 | file { '/etc/hive/hdp': 28 | ensure => 'directory', 29 | } 30 | 31 | file { '/etc/hive/conf': 32 | ensure => 'link', 33 | target => '/etc/hive/hdp', 34 | require => Package["hive${package_version}"], 35 | force => true 36 | } 37 | 38 | file { '/etc/hive/hdp/hive-env.sh': 39 | ensure => file, 40 | content => template('hive_client/hive-env.erb'), 41 | } 42 | 43 | file { '/etc/hive/hdp/hive-site.xml': 44 | ensure => file, 45 | content => template('hive_client/hive-site.erb'), 46 | } 47 | 48 | file { '/etc/hive/hdp/hive-log4j.properties': 49 | ensure => file, 50 | content => template('hive_client/hive-log4j.erb'), 51 | } 52 | 53 | package { 'mysql-connector-java': 54 | ensure => installed, 55 | } 56 | 57 | file { "/usr/hdp/${hdp_version}/hive/lib/mysql-connector-java.jar": 58 | ensure => 'link', 59 | target => '/usr/share/java/mysql-connector-java.jar', 60 | require => Package["hive${package_version}"], 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /modules/hive_client/templates/hive-env.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | 18 | # Set Hive and Hadoop environment variables here. These variables can be used 19 | # to control the execution of Hive. It should be used by admins to configure 20 | # the Hive installation (so that users do not have to set environment variables 21 | # or set command line parameters to get correct behavior). 22 | # 23 | # The hive service being invoked (CLI/HWI etc.) is available via the environment 24 | # variable SERVICE 25 | 26 | # Hive Client memory usage can be an issue if a large number of clients 27 | # are running at the same time. The flags below have been useful in 28 | # reducing memory usage: 29 | # 30 | if [ "$SERVICE" = "cli" ]; then 31 | if [ -z "$DEBUG" ]; then 32 | export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit" 33 | else 34 | export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit" 35 | fi 36 | fi 37 | 38 | # The heap size of the jvm stared by hive shell script can be controlled via: 39 | HBASE_CONF_DIR=${HBASE_CONF_DIR:-/etc/hbase/conf} 40 | export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-<%= @server_mem.to_i %>} 41 | 42 | # Larger heap size may be required when running queries over large number of files or partitions. 43 | # By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be 44 | # appropriate for hive server (hwi etc). 45 | 46 | # Hive Configuration Directory can be controlled by: 47 | export HIVE_CONF_DIR=/etc/hive/conf 48 | export JAVA_HOME=<%= scope.lookupvar('jdk::home') %> 49 | # Folder containing extra ibraries required for hive compilation/execution can be controlled by: 50 | export HIVE_AUX_JARS_PATH= 51 | -------------------------------------------------------------------------------- /modules/hive_client/templates/hive-log4j.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | # Define some default values that can be overridden by system properties 18 | hive.log.threshold=ALL 19 | hive.root.logger=INFO,DRFA 20 | hive.log.dir=/tmp/${user.name} 21 | hive.log.file=hive.log 22 | 23 | # Define the root logger to the system property "hadoop.root.logger". 24 | log4j.rootLogger=${hive.root.logger} 25 | 26 | # Logging Threshold 27 | log4j.threshold=${hive.log.threshold} 28 | 29 | # 30 | # Daily Rolling File Appender 31 | # 32 | # Use the PidDailyerRollingFileAppend class instead if you want to use separate log files 33 | # for different CLI session. 34 | # 35 | # log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender 36 | 37 | log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender 38 | 39 | log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file} 40 | 41 | # Rollver at midnight 42 | log4j.appender.DRFA.DatePattern=.yyyy-MM-dd 43 | 44 | # 30-day backup 45 | #log4j.appender.DRFA.MaxBackupIndex=30 46 | log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout 47 | 48 | # Pattern format: Date LogLevel LoggerName LogMessage 49 | #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n 50 | # Debugging Pattern format 51 | log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n 52 | 53 | 54 | # 55 | # console 56 | # Add "console" to rootlogger above if you want to use this 57 | # 58 | 59 | log4j.appender.console=org.apache.log4j.ConsoleAppender 60 | log4j.appender.console.target=System.err 61 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 62 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n 63 | log4j.appender.console.encoding=UTF-8 64 | 65 | #custom logging levels 66 | #log4j.logger.xxx=DEBUG 67 | 68 | # 69 | # Event Counter Appender 70 | # Sends counts of logging messages at different severity levels to Hadoop Metrics. 71 | # 72 | #log4j.appender.EventCounter=org.apache.hadoop.metrics.EventCounter 73 | 74 | 75 | #log4j.category.DataNucleus.Query=DEBUG,console 76 | #log4j.category.DataNucleus=ERROR,DRFA 77 | #log4j.category.Datastore=ERROR,DRFA 78 | #log4j.category.Datastore.Schema=ERROR,DRFA 79 | #log4j.category.JPOX.Datastore=ERROR,DRFA 80 | #log4j.category.JPOX.Plugin=ERROR,DRFA 81 | #log4j.category.JPOX.MetaData=ERROR,DRFA 82 | #log4j.category.JPOX.Query=ERROR,DRFA 83 | #log4j.category.JPOX.General=ERROR,DRFA 84 | #log4j.category.JPOX.Enhancer=ERROR,DRFA 85 | -------------------------------------------------------------------------------- /modules/hive_db/files/add-remote-root.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | mysql -u root -pvagrant << EOF 3 | create user 'root'@'%' identified by 'vagrant'; 4 | grant all privileges on *.* to 'root'@'%' with grant option; 5 | EOF 6 | -------------------------------------------------------------------------------- /modules/hive_db/files/secure-mysql.txt: -------------------------------------------------------------------------------- 1 | 2 | y 3 | vagrant 4 | vagrant 5 | y 6 | y 7 | y 8 | y 9 | -------------------------------------------------------------------------------- /modules/hive_db/files/setup-hive.txt: -------------------------------------------------------------------------------- 1 | create user 'hive'@'%' identified by 'vagrant'; 2 | create database hive; 3 | grant all privileges on hive.* to 'hive'@'%'; 4 | flush privileges; 5 | -------------------------------------------------------------------------------- /modules/hive_db/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class hive_db { 17 | $path = "/bin:/usr/bin" 18 | 19 | package { 'mysql-server': 20 | ensure => installed, 21 | } 22 | -> 23 | service { 'mysqld': 24 | ensure => running, 25 | enable => true, 26 | } 27 | -> 28 | exec { "secure-mysqld": 29 | command => "mysql_secure_installation < /vagrant/modules/hive_db/files/secure-mysql.txt", 30 | path => "${path}", 31 | cwd => "/tmp", 32 | onlyif => "mysql -u root -e ';'", 33 | } 34 | -> 35 | exec { "add-remote-root": 36 | command => "/vagrant/modules/hive_db/files/add-remote-root.sh", 37 | path => $path, 38 | } 39 | -> 40 | exec { "create-hivedb": 41 | command => "mysql -u root --password=vagrant < files/setup-hive.txt", 42 | path => "${path}", 43 | cwd => "/vagrant/modules/hive_db", 44 | creates => "/var/lib/mysql/hive", 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /modules/hive_hs2/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class hive_hs2 { 17 | require hive_client 18 | 19 | $path="/bin:/usr/bin" 20 | 21 | if $security == "true" { 22 | file { "${hdfs_client::keytab_dir}/hive.keytab": 23 | ensure => file, 24 | source => "/vagrant/generated/keytabs/${hostname}/hive.keytab", 25 | owner => hive, 26 | group => hadoop, 27 | mode => '400', 28 | } 29 | -> 30 | Package["hive${package_version}-server2"] 31 | } 32 | 33 | package { "hive${package_version}-server2": 34 | ensure => installed, 35 | } 36 | -> 37 | file { '/etc/init.d/hive-server2': 38 | ensure => file, 39 | source => "/usr/hdp/${hdp_version}/hive/etc/rc.d/init.d/hive-server2", 40 | mode => 'a+rx', 41 | } 42 | -> 43 | exec { "hdp-select set hive-server2 ${hdp_version}": 44 | cwd => "/", 45 | path => "$path", 46 | } 47 | -> 48 | service { 'hive-server2': 49 | ensure => running, 50 | enable => true, 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /modules/hive_meta/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class hive_meta { 17 | require hive_client 18 | require hive_db 19 | 20 | $path="/bin:/usr/bin" 21 | 22 | if $security == "true" { 23 | file { "${hdfs_client::keytab_dir}/hive.keytab": 24 | ensure => file, 25 | source => "/vagrant/generated/keytabs/${hostname}/hive.keytab", 26 | owner => hive, 27 | group => hadoop, 28 | mode => '400', 29 | } 30 | -> 31 | Package["hive${package_version}-metastore"] 32 | } 33 | 34 | package { "hive${package_version}-metastore": 35 | ensure => installed, 36 | } 37 | -> 38 | exec { "hdp-select set hive-metastore ${hdp_version}": 39 | cwd => "/", 40 | path => "$path", 41 | } 42 | -> 43 | file { '/etc/init.d/hive-metastore': 44 | ensure => file, 45 | content => template('hive_meta/hive-metastore.erb'), 46 | mode => 'a+rx', 47 | } 48 | -> 49 | exec { "schematool -dbType mysql -initSchema": 50 | user => "hive", 51 | cwd => "/", 52 | path => "/usr/hdp/current/hive-metastore/bin:$path", 53 | } 54 | -> 55 | service { 'hive-metastore': 56 | ensure => running, 57 | enable => true, 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /modules/hive_meta/templates/hive-metastore.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | #!/bin/bash 18 | # 19 | # Starts a Hive Metastore 20 | # 21 | # chkconfig: 345 85 15 22 | # description: Hive Metastore 23 | # 24 | ### BEGIN INIT INFO 25 | # Provides: hive-metastore 26 | # Short-Description: Hive Metastore 27 | # Default-Start: 3 4 5 28 | # Default-Stop: 0 1 2 6 29 | # Required-Start: $syslog $remote_fs 30 | # Required-Stop: $syslog $remote_fs 31 | # Should-Start: 32 | # Should-Stop: 33 | ### END INIT INFO 34 | 35 | . /lib/lsb/init-functions 36 | BIGTOP_DEFAULTS_DIR=${BIGTOP_DEFAULTS_DIR-/etc/default} 37 | [ -n "${BIGTOP_DEFAULTS_DIR}" -a -r ${BIGTOP_DEFAULTS_DIR}/hadoop ] && . ${BIGTOP_DEFAULTS_DIR}/hadoop 38 | [ -n "${BIGTOP_DEFAULTS_DIR}" -a -r ${BIGTOP_DEFAULTS_DIR}/hive-metastore ] && . ${BIGTOP_DEFAULTS_DIR}/hive-metastore 39 | 40 | export JAVA_HOME=<%= scope.lookupvar('jdk::home') %> 41 | 42 | RETVAL_SUCCESS=0 43 | 44 | STATUS_RUNNING=0 45 | STATUS_DEAD=1 46 | STATUS_DEAD_AND_LOCK=2 47 | STATUS_NOT_RUNNING=3 48 | STATUS_OTHER_ERROR=102 49 | 50 | 51 | ERROR_PROGRAM_NOT_INSTALLED=5 52 | ERROR_PROGRAM_NOT_CONFIGURED=6 53 | 54 | 55 | RETVAL=0 56 | SLEEP_TIME=5 57 | PROC_NAME="java" 58 | 59 | DAEMON="hive-metastore" 60 | DESC="Hive Metastore" 61 | EXEC_PATH="/usr/hdp/current/hive-metastore/bin/hive" 62 | SVC_USER="hive" 63 | DAEMON_FLAGS="" 64 | CONF_DIR="/etc/hive/conf" 65 | PIDFILE="/var/run/hive/hive-metastore.pid" 66 | LOCKDIR="/var/lock/subsys" 67 | LOCKFILE="$LOCKDIR/hive-metastore" 68 | WORKING_DIR="/var/lib/hive" 69 | 70 | install -d -m 0755 -o hive -g hive /var/lib/hive 1>/dev/null 2>&1 || : 71 | [ -d "$LOCKDIR" ] || install -d -m 0755 $LOCKDIR 1>/dev/null 2>&1 || : 72 | start() { 73 | [ -x $EXE_FILE ] || exit $ERROR_PROGRAM_NOT_INSTALLED 74 | log_success_msg "Starting $DESC (${DAEMON}): " 75 | 76 | checkstatusofproc 77 | status=$? 78 | if [ "$status" -eq "$STATUS_RUNNING" ]; then 79 | log_success_msg "${DESC} is running" 80 | exit 0 81 | fi 82 | 83 | LOG_FILE=/var/log/hive/${DAEMON}.out 84 | 85 | exec_env="HADOOP_OPTS=\"-Dhive.log.dir=`dirname $LOG_FILE` -Dhive.log.file=${DAEMON}.log -Dhive.log.threshold=INFO\"" 86 | 87 | # Autodetect JDBC drivers for metastore 88 | exec_env="HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${BIGTOP_CLASSPATH} $exec_env" 89 | 90 | su -s /bin/bash $SVC_USER -c "$exec_env nohup nice -n 0 \ 91 | $EXEC_PATH --service metastore $PORT \ 92 | > $LOG_FILE 2>&1 < /dev/null & "'echo $! '"> $PIDFILE" 93 | sleep 3 94 | 95 | checkstatusofproc 96 | RETVAL=$? 97 | [ $RETVAL -eq $STATUS_RUNNING ] && touch $LOCKFILE 98 | return $RETVAL 99 | } 100 | stop() { 101 | log_success_msg "Stopping $DESC (${DAEMON}): " 102 | killproc -p $PIDFILE java 103 | RETVAL=$? 104 | 105 | [ $RETVAL -eq $RETVAL_SUCCESS ] && rm -f $LOCKFILE $PIDFILE 106 | return $RETVAL 107 | } 108 | restart() { 109 | stop 110 | start 111 | } 112 | 113 | checkstatusofproc(){ 114 | pidofproc -p $PIDFILE $PROC_NAME > /dev/null 115 | } 116 | 117 | checkstatus(){ 118 | checkstatusofproc 119 | status=$? 120 | 121 | case "$status" in 122 | $STATUS_RUNNING) 123 | log_success_msg "${DESC} is running" 124 | ;; 125 | $STATUS_DEAD) 126 | log_failure_msg "${DESC} is dead and pid file exists" 127 | ;; 128 | $STATUS_DEAD_AND_LOCK) 129 | log_failure_msg "${DESC} is dead and lock file exists" 130 | ;; 131 | $STATUS_NOT_RUNNING) 132 | log_failure_msg "${DESC} is not running" 133 | ;; 134 | *) 135 | log_failure_msg "${DESC} status is unknown" 136 | ;; 137 | esac 138 | return $status 139 | } 140 | 141 | condrestart(){ 142 | [ -e $LOCKFILE ] && restart || : 143 | } 144 | 145 | check_for_root() { 146 | if [ $(id -ur) -ne 0 ]; then 147 | echo 'Error: root user required' 148 | echo 149 | exit 1 150 | fi 151 | } 152 | 153 | service() { 154 | case "$1" in 155 | start) 156 | check_for_root 157 | start 158 | ;; 159 | stop) 160 | check_for_root 161 | stop 162 | ;; 163 | status) 164 | checkstatus 165 | RETVAL=$? 166 | ;; 167 | restart) 168 | check_for_root 169 | restart 170 | ;; 171 | condrestart|try-restart) 172 | check_for_root 173 | condrestart 174 | ;; 175 | *) 176 | echo $"Usage: $0 {start|stop|status|restart|try-restart|condrestart}" 177 | exit 1 178 | esac 179 | } 180 | 181 | service "$1" 182 | 183 | exit $RETVAL 184 | -------------------------------------------------------------------------------- /modules/install_hdfs_tarballs/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class install_hdfs_tarballs { 17 | require hdfs_datanode 18 | 19 | $path="/bin:/usr/bin" 20 | 21 | if $security == "true" { 22 | require load_hdfs_keytab 23 | } 24 | 25 | if hasrole($clients, 'yarn') { 26 | require yarn_client 27 | 28 | exec {"install-mr-tarball": 29 | command => "hadoop fs -put /usr/hdp/${hdp_version}/hadoop/mapreduce.tar.gz /hdp/apps/${hdp_version}/mapreduce/", 30 | unless => 31 | "hadoop fs -test -e /hdp/apps/${hdp_version}/mapreduce/mapreduce.tar.gz", 32 | path => "$path", 33 | user => "hdfs", 34 | } 35 | } 36 | 37 | if hasrole($clients, 'tez') { 38 | require tez_client 39 | 40 | exec {"install-tez-tarball": 41 | command => "hadoop fs -put /usr/hdp/${hdp_version}/tez/lib/tez.tar.gz /hdp/apps/${hdp_version}/tez/", 42 | unless => "hadoop fs -test -e /hdp/apps/${hdp_version}/tez/tez.tar.gz", 43 | path => "$path", 44 | user => "hdfs", 45 | } 46 | } 47 | 48 | 49 | } 50 | -------------------------------------------------------------------------------- /modules/ip_setup/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class ip_setup { 17 | 18 | service {"iptables": 19 | ensure => stopped, 20 | enable => false, 21 | } 22 | 23 | service {"ip6tables": 24 | ensure => stopped, 25 | enable => false, 26 | } 27 | 28 | file { '/etc/hosts': 29 | ensure => file, 30 | content => template('ip_setup/hosts.erb'), 31 | } 32 | } -------------------------------------------------------------------------------- /modules/ip_setup/templates/hosts.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | <%# for each node in nodes generate: 18 | $ip $hostname.$domain $hostname 19 | -%> 20 | <% eval(@nodes).each do |node| -%> 21 | <%= node[:ip] %> <%= node[:hostname] %>.<%= @domain %> <%= node[:hostname] %> 22 | <% end -%> 23 | 127.0.0.1 localhost 24 | ::1 localhost 25 | -------------------------------------------------------------------------------- /modules/jdk/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class jdk { 17 | $home = "/usr/lib/jvm/java" 18 | 19 | package { "java-1.7.0-openjdk": 20 | ensure => installed, 21 | } 22 | 23 | package { "java-1.7.0-openjdk-devel": 24 | ensure => installed, 25 | } 26 | 27 | file { "/etc/profile.d/java.sh": 28 | ensure => "file", 29 | content => template('jdk/java.erb'), 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /modules/jdk/templates/java.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | export JAVA_HOME=<%= @home %> 18 | export PATH=$JAVA_HOME/bin:$PATH 19 | -------------------------------------------------------------------------------- /modules/kafka_server/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class kafka_server { 17 | require repos_setup 18 | require zookeeper_client 19 | require jdk 20 | 21 | $path="/bin:/usr/bin" 22 | 23 | # Install and enable. 24 | package { "kafka" : 25 | ensure => installed, 26 | } 27 | -> 28 | service { 'kafka': 29 | ensure => running, 30 | enable => true, 31 | } 32 | 33 | # Configure. 34 | file { '/etc/kafka/conf/consumer.properties': 35 | ensure => file, 36 | content => template('kafka_server/consumer.properties.erb'), 37 | require => Package['kafka'], 38 | before => Service['kafka'], 39 | } 40 | file { '/etc/kafka/conf/producer.properties': 41 | ensure => file, 42 | content => template('kafka_server/producer.properties.erb'), 43 | require => Package['kafka'], 44 | before => Service['kafka'], 45 | } 46 | file { '/etc/kafka/conf/server.properties': 47 | ensure => file, 48 | content => template('kafka_server/server.properties.erb'), 49 | require => Package['kafka'], 50 | before => Service['kafka'], 51 | } 52 | 53 | # Create a topic called test. 54 | # file { "/tmp/create_test_topic.sh": 55 | # ensure => "file", 56 | # mode => '755', 57 | # content => template('kafka_server/create_test_topic.sh.erb'), 58 | # } 59 | 60 | # Startup. 61 | if ($operatingsystem == "centos" and $operatingsystemmajrelease == "7") { 62 | file { "/etc/systemd/system/kafka.service": 63 | ensure => 'file', 64 | source => "/vagrant/files/systemd/kafka.service", 65 | require => Package['kafka'], 66 | before => Service["kafka"], 67 | } 68 | file { "/etc/systemd/system/kafka.service.d": 69 | ensure => 'directory', 70 | } -> 71 | file { "/etc/systemd/system/kafka.service.d/default.conf": 72 | ensure => 'file', 73 | source => "/vagrant/files/systemd/kafka.service.d/default.conf", 74 | require => Package['kafka'], 75 | before => Service["kafka"], 76 | } 77 | } else { 78 | file { "/etc/init.d/kafka": 79 | ensure => file, 80 | source => 'puppet:///modules/kafka_server/kafka', 81 | replace => true, 82 | require => Package['kafka'], 83 | before => Service['kafka'], 84 | mode => '755', 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /modules/kafka_server/templates/consumer.properties.erb: -------------------------------------------------------------------------------- 1 | <% 2 | @zookeeper_servers = eval(@nodes). 3 | select {|node| node[:roles].include? 'zk'}. 4 | map{|node| node[:hostname] + "." + @domain + ":2181"}.join(","); 5 | -%> 6 | # Licensed to the Apache Software Foundation (ASF) under one or more 7 | # contributor license agreements. See the NOTICE file distributed with 8 | # this work for additional information regarding copyright ownership. 9 | # The ASF licenses this file to You under the Apache License, Version 2.0 10 | # (the "License"); you may not use this file except in compliance with 11 | # the License. You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | # see kafka.consumer.ConsumerConfig for more details 21 | 22 | # Zookeeper connection string 23 | # comma separated host:port pairs, each corresponding to a zk 24 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" 25 | zookeeper.connect=<%= @zookeeper_servers %> 26 | 27 | # timeout in ms for connecting to zookeeper 28 | zookeeper.connection.timeout.ms=1000000 29 | 30 | #consumer group id 31 | group.id=test-consumer-group 32 | 33 | #consumer timeout 34 | #consumer.timeout.ms=5000 35 | -------------------------------------------------------------------------------- /modules/kafka_server/templates/create_test_topic.sh.erb: -------------------------------------------------------------------------------- 1 | <% 2 | @zookeeper_servers = eval(@nodes). 3 | select {|node| node[:roles].include? 'zk'}. 4 | map{|node| node[:hostname] + "." + @domain + ":2181"}.join(","); 5 | -%> 6 | #!/bin/sh 7 | 8 | # Wait to settle a bit. 9 | sleep 3 10 | 11 | /usr/hdp/current/kafka-broker/bin/kafka-topics.sh --create --topic test --replication-factor 1 --partitions 1 --zookeeper <%= @zookeeper_servers %> 12 | -------------------------------------------------------------------------------- /modules/kafka_server/templates/producer.properties.erb: -------------------------------------------------------------------------------- 1 | <% 2 | @kafka_servers = eval(@nodes). 3 | select {|node| node[:roles].include? 'kafka'}. 4 | map{|node| node[:hostname] + "." + @domain + ":9092"}.join(","); 5 | -%> 6 | # Licensed to the Apache Software Foundation (ASF) under one or more 7 | # contributor license agreements. See the NOTICE file distributed with 8 | # this work for additional information regarding copyright ownership. 9 | # The ASF licenses this file to You under the Apache License, Version 2.0 10 | # (the "License"); you may not use this file except in compliance with 11 | # the License. You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | # see kafka.producer.ProducerConfig for more details 21 | 22 | ############################# Producer Basics ############################# 23 | 24 | # list of brokers used for bootstrapping knowledge about the rest of the cluster 25 | # format: host1:port1,host2:port2 ... 26 | metadata.broker.list=<%= @kafka_servers %> 27 | 28 | # name of the partitioner class for partitioning events; default partition spreads data randomly 29 | #partitioner.class= 30 | 31 | # specifies whether the messages are sent asynchronously (async) or synchronously (sync) 32 | producer.type=sync 33 | 34 | # specify the compression codec for all data generated: none , gzip, snappy. 35 | # the old config values work as well: 0, 1, 2 for none, gzip, snappy, respectivally 36 | compression.codec=none 37 | 38 | # message encoder 39 | serializer.class=kafka.serializer.DefaultEncoder 40 | 41 | # allow topic level compression 42 | #compressed.topics= 43 | 44 | ############################# Async Producer ############################# 45 | # maximum time, in milliseconds, for buffering data on the producer queue 46 | #queue.buffering.max.ms= 47 | 48 | # the maximum size of the blocking queue for buffering on the producer 49 | #queue.buffering.max.messages= 50 | 51 | # Timeout for event enqueue: 52 | # 0: events will be enqueued immediately or dropped if the queue is full 53 | # -ve: enqueue will block indefinitely if the queue is full 54 | # +ve: enqueue will block up to this many milliseconds if the queue is full 55 | #queue.enqueue.timeout.ms= 56 | 57 | # the number of messages batched at the producer 58 | #batch.num.messages= 59 | -------------------------------------------------------------------------------- /modules/kerberos_client/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class kerberos_client { 17 | require ntp 18 | 19 | 20 | package { 'krb5-auth-dialog': 21 | ensure => installed, 22 | } 23 | 24 | package { 'krb5-workstation': 25 | ensure => installed, 26 | } 27 | 28 | package { 'krb5-libs': 29 | ensure => installed, 30 | } 31 | -> 32 | file { '/etc/krb5.conf': 33 | ensure => file, 34 | content => template('kerberos_client/krb5.erb'), 35 | } 36 | } -------------------------------------------------------------------------------- /modules/kerberos_client/templates/krb5.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | <% @kdc = 18 | eval(@nodes).select {|node| node[:roles].include? 'kdc'}[0][:hostname] + 19 | "." + @domain; 20 | -%> 21 | [logging] 22 | default = FILE:/var/log/krb5libs.log 23 | kdc = FILE:/var/log/krb5kdc.log 24 | admin_server = FILE:/var/log/kadmind.log 25 | 26 | [libdefaults] 27 | default_realm = <%= @realm %> 28 | dns_lookup_realm = false 29 | dns_lookup_kdc = false 30 | ticket_lifetime = 24h 31 | forwardable = yes 32 | udp_preference_limit=1 33 | 34 | [realms] 35 | <%= @realm %> = { 36 | kdc = <%= @kdc %>:88 37 | admin_server = <%= @kdc %>:749 38 | default_domain = <%= @domain %> 39 | } 40 | 41 | [domain_realm] 42 | .<%= @domain %> = <%= @realm %> 43 | <%= @domain %> = <%= @realm %> 44 | 45 | [appdefaults] 46 | pam = { 47 | debug = false 48 | ticket_lifetime = 36000 49 | renew_lifetime = 36000 50 | forwardable = true 51 | krb4_convert = false 52 | } 53 | -------------------------------------------------------------------------------- /modules/kerberos_http/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class kerberos_http { 17 | require hadoop_server 18 | require kerberos_client 19 | require ssl_ca 20 | 21 | if hasrole($roles, 'kdc') { 22 | Class['kerberos_kdc'] -> Class['kerberos_http'] 23 | } 24 | 25 | $path = "${jdk::home}/bin:/bin:/usr/bin" 26 | 27 | file { "${hdfs_client::keytab_dir}": 28 | ensure => directory, 29 | owner => 'root', 30 | group => 'hadoop', 31 | mode => '750', 32 | } 33 | -> 34 | file { "${hdfs_client::keytab_dir}/http-secret": 35 | ensure => file, 36 | # this needs to be a cluster wide secret 37 | content => vagrant, 38 | owner => root, 39 | group => hadoop, 40 | mode => '440', 41 | } 42 | -> 43 | file { "${hdfs_client::keytab_dir}/http.keytab": 44 | ensure => file, 45 | source => "/vagrant/generated/keytabs/${hostname}/HTTP.keytab", 46 | owner => 'root', 47 | group => 'hadoop', 48 | mode => '440', 49 | } 50 | -> 51 | file { "/tmp/create-cert": 52 | ensure => file, 53 | content => template('kerberos_http/create-cert.erb'), 54 | mode => '700', 55 | } 56 | -> 57 | exec { '/tmp/create-cert': 58 | creates => "${hdfs_client::keytab_dir}/server.crt", 59 | cwd => "${hdfs_client::keytab_dir}", 60 | path => '$path', 61 | provider => shell, 62 | } 63 | 64 | file { "${hdfs_client::conf_dir}/ssl-server.xml": 65 | ensure => file, 66 | owner => 'root', 67 | group => 'hadoop', 68 | mode => '640', 69 | content => template('kerberos_http/ssl-server.erb'), 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /modules/kerberos_http/templates/create-cert.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | #!/bin/bash 18 | export PATH=<%= @path %> 19 | umask 077 20 | keytool -genkey -alias http -keyalg RSA -keystore server.jks -keysize 4096 \ 21 | -validity 36525 -storepass changeit << EOF 22 | <%= @hostname %>.<%= @domain %> 23 | <%= @hostname %> 24 | Example Corp 25 | Palo Alto 26 | CA 27 | US 28 | yes 29 | EOF 30 | keytool -certreq -alias http -keystore server.jks -storepass changeit \ 31 | -file server.csr 32 | openssl x509 -req -days 36525 -in server.csr \ 33 | -CA /vagrant/generated/ssl-ca/ca.crt -CAkey /vagrant/generated/ssl-ca/ca.key \ 34 | -CAserial /vagrant/generated/ssl-ca/ca.ser -out server.crt 35 | keytool -import -alias http -keystore server.jks \ 36 | -storepass changeit -trustcacerts -file server.crt 37 | chown root:hadoop server.jks server.crt 38 | chmod 640 server.jks server.crt 39 | -------------------------------------------------------------------------------- /modules/kerberos_http/templates/ssl-server.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | 18 | 19 | 20 | 21 | 22 | 23 | ssl.server.truststore.location 24 | <%= scope.lookupvar('jdk::home') %>/jre/lib/security/cacerts 25 | Truststore to be used by NN and DN. 26 | 27 | 28 | 29 | ssl.server.truststore.password 30 | changeit 31 | 32 | 33 | 34 | ssl.server.keystore.location 35 | <%= scope.lookupvar('hdfs_client::keytab_dir') %>/server.jks 36 | Keystore to be used by NN and DN. 37 | 38 | 39 | 40 | ssl.server.keystore.password 41 | changeit 42 | 43 | 44 | 45 | ssl.server.keystore.keypassword 46 | changeit 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /modules/kerberos_kdc/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class kerberos_kdc { 17 | require kerberos_client 18 | $path="/bin:/usr/bin:/sbin:/usr/sbin" 19 | $password="vagrant" 20 | 21 | package { 'krb5-server': 22 | ensure => installed, 23 | } 24 | -> 25 | file { '/var/kerberos/krb5kdc/kdc.conf': 26 | ensure => file, 27 | content => template('kerberos_kdc/kdc.erb'), 28 | } 29 | -> 30 | file { '/vagrant/generated': 31 | ensure => directory, 32 | mode => 'go-rwx', 33 | } 34 | -> 35 | file { '/vagrant/generated/create-kerberos-db': 36 | ensure => file, 37 | content => template('kerberos_kdc/create-kerberos-kdc.erb'), 38 | mode => 'u=rwx,go=', 39 | } 40 | -> 41 | exec { 'kdc-init': 42 | command => "/vagrant/generated/create-kerberos-db", 43 | creates => "/var/kerberos/krb5kdc/principal", 44 | path => $path, 45 | } 46 | -> 47 | service { 'krb5kdc': 48 | ensure => running, 49 | enable => true, 50 | } 51 | } -------------------------------------------------------------------------------- /modules/kerberos_kdc/templates/create-kerberos-kdc.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | #!/bin/bash 18 | KEYDIR=/vagrant/generated/keytabs 19 | <% require 'set'; 20 | password = scope.lookupvar('kerberos_kdc::password'); 21 | nodelist = eval(@nodes); 22 | # a map from roles to required users 23 | users = {'nn' => ['nn', 'HTTP'], 24 | 'yarn' => ['rm', 'jhs', 'HTTP'], 25 | 'slave' => ['nm', 'dn', 'HTTP'], 26 | 'hive-meta' => ['hive'], 27 | 'oozie' => ['oozie', 'HTTP'], 28 | 'hbase-master' => ['hbase', 'HTTP'], 29 | 'hbase-regionserver' => ['hbase', 'HTTP'], 30 | 'zk' => ['zookeeper']}; -%> 31 | 32 | # ensure we don't have old keytabs 33 | rm -fr $KEYDIR 34 | <% nodelist.each do |node| -%> 35 | mkdir -p $KEYDIR/<%= node[:hostname] %> 36 | <% end -%> 37 | 38 | # create a new kerberos user database 39 | kdb5_util create -s -P <%= password %> 40 | 41 | # create the accounts and keytabs 42 | kadmin.local << EOF 43 | addprinc -pw <%= password %> vagrant 44 | <% nodelist.each do |node| 45 | node[:roles].map{|r| users[r]}.flatten.to_set.select {|n| n != nil}. 46 | each do |user| -%> 47 | addprinc -randkey <%= user %>/<%= node[:hostname] %>.<%= @domain %>@<%= @realm %> 48 | xst -norandkey -k $KEYDIR/<%= node[:hostname] %>/<%= user %>.keytab <%= user %>/<%= node[:hostname] %>.<%= @domain %>@<%= @realm %> 49 | <% end 50 | end -%> 51 | EOF 52 | -------------------------------------------------------------------------------- /modules/kerberos_kdc/templates/kdc.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | [kdcdefaults] 18 | v4_mode = nopreauth 19 | kdc_ports = 88 20 | kdc_tcp_ports = 88 21 | 22 | [realms] 23 | <%= @realm %> = { 24 | acl_file = /var/kerberos/krb5kdc/kadm5.acl 25 | dict_file = /usr/share/dict/words 26 | admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab 27 | supported_enctypes = aes256-cts:normal aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal des-cbc-crc:v4 des-cbc-crc:afs3 28 | } 29 | -------------------------------------------------------------------------------- /modules/knox_gateway/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class knox_gateway { 17 | require repos_setup 18 | require jdk 19 | 20 | $java_home="${jdk::home}" 21 | 22 | package { "knox.noarch" : 23 | ensure => installed, 24 | } 25 | -> 26 | file { "/etc/init.d/knox-gateway": 27 | ensure => file, 28 | source => "puppet:///files/init.d/knox-gateway", 29 | owner => root, 30 | group => root, 31 | mode => '755', 32 | } 33 | -> 34 | exec { 'start-ldap' : 35 | path => "/usr/bin:/usr/sbin:/bin", 36 | command => "bash -x /usr/hdp/${hdp_version}/knox/bin/ldap.sh start", 37 | user => "knox", 38 | environment => "JAVA_HOME=${java_home}", 39 | } 40 | -> 41 | exec { 'setup-gateway' : 42 | path => "/usr/bin:/usr/sbin:/bin", 43 | command => "/usr/hdp/${hdp_version}/knox/bin/knoxcli.sh create-master --master test-master-secret", 44 | user => "knox", 45 | creates => "/usr/hdp/${hdp_version}/knox/data/security/master", 46 | environment => "JAVA_HOME=${java_home}", 47 | } 48 | -> 49 | service {"knox-gateway": 50 | ensure => running, 51 | enable => true, 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /modules/load_hdfs_keytab/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class load_hdfs_keytab { 17 | require hdfs_datanode 18 | $path="/bin:/usr/bin" 19 | 20 | exec { "kinit -k -t ${hdfs_client::keytab_dir}/dn.keytab dn/${hostname}.${domain}": 21 | path => $path, 22 | user => "hdfs", 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /modules/ntp/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class ntp { 17 | package { "ntp": 18 | ensure => installed, 19 | } 20 | service { "ntp": 21 | name => "ntpd", 22 | ensure => running, 23 | enable => true, 24 | } 25 | } -------------------------------------------------------------------------------- /modules/oozie_client/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class oozie_client { 17 | require repos_setup 18 | require hdp_select 19 | 20 | package { "oozie${package_version}-client": 21 | ensure => installed, 22 | } 23 | 24 | file { "/etc/profile.d/oozie.sh": 25 | ensure => file, 26 | content => template('oozie_client/oozie.erb'), 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /modules/oozie_client/templates/oozie.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | <% 18 | @oozie = eval(@nodes).select {|node| node[:roles].include? 'oozie' 19 | }[0][:hostname] + "." + @domain; 20 | -%> 21 | <% if @security == "true" -%> 22 | export OOZIE_URL=https://<%= @oozie %>:11443/oozie 23 | <% else -%> 24 | export OOZIE_URL=http://<%= @oozie %>:11000/oozie 25 | <% end -%> -------------------------------------------------------------------------------- /modules/oozie_server/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class oozie_server { 17 | require repos_setup 18 | require hdp_select 19 | require hdfs_client 20 | require hive_client 21 | 22 | $conf_dir = "/etc/oozie/conf" 23 | $keytab_dir = "/etc/security/hadoop" 24 | $path="/bin:/usr/bin:/usr/hdp/${hdp_version}/oozie/bin" 25 | $java_home="${jdk::home}" 26 | 27 | if $security == "true" { 28 | require kerberos_http 29 | 30 | file { "${keytab_dir}/oozie.keytab": 31 | ensure => file, 32 | source => "/vagrant/generated/keytabs/${hostname}/oozie.keytab", 33 | owner => oozie, 34 | group => oozie, 35 | mode => '400', 36 | } 37 | -> 38 | exec { "kinit -k -t ${keytab_dir}/oozie.keytab oozie/${hostname}.${domain}": 39 | path => $path, 40 | user => 'oozie', 41 | } 42 | -> 43 | Package["oozie${package_version}"] 44 | 45 | $prepare_war_opts = "-secure" 46 | } 47 | 48 | package { 'mysql': 49 | ensure => installed, 50 | } 51 | -> 52 | package { 'unzip': 53 | ensure => installed, 54 | } 55 | -> 56 | package { "oozie${package_version}": 57 | ensure => installed, 58 | } 59 | -> 60 | file { "/etc/oozie": 61 | ensure => directory, 62 | owner => "root", 63 | group => "oozie", 64 | mode => "0750", 65 | } 66 | -> 67 | file { "${conf_dir}/adminusers.txt": 68 | ensure => file, 69 | content => template('oozie_server/adminusers.erb'), 70 | } 71 | -> 72 | file { "${conf_dir}/oozie-site.xml": 73 | ensure => file, 74 | content => template('oozie_server/oozie-site.erb'), 75 | } 76 | -> 77 | file { "${conf_dir}/oozie-env.sh": 78 | ensure => file, 79 | content => template('oozie_server/oozie-env.erb'), 80 | } 81 | -> 82 | file { "/etc/init.d/oozie": 83 | ensure => file, 84 | content => template('oozie_server/oozie-service.erb'), 85 | mode => "0755", 86 | } 87 | -> 88 | package { "extjs": 89 | ensure => installed, 90 | } 91 | -> 92 | file { "/usr/hdp/${hdp_version}/oozie/libext/ext-2.2.zip": 93 | ensure => link, 94 | target => "/usr/share/HDP-oozie/ext-2.2.zip", 95 | } 96 | -> 97 | file { "/usr/hdp/${hdp_version}/oozie/libext/hadoop-lzo.jar": 98 | ensure => link, 99 | target => "/usr/hdp/${hdp_version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp_version}.jar", 100 | } 101 | -> 102 | file { "/usr/hdp/${hdp_version}/oozie/libext/mysql-connector-java.jar": 103 | ensure => link, 104 | target => "/usr/share/java/mysql-connector-java.jar", 105 | } 106 | -> 107 | exec { "oozie-prepare-war": 108 | path => $path, 109 | command => "oozie-setup.sh prepare-war ${prepare_war_opts}", 110 | creates => "/usr/hdp/${hdp_version}/oozie/oozie-server/webapps/oozie.war", 111 | } 112 | -> 113 | file { "/tmp/create-oozie-db-user.sh": 114 | ensure => file, 115 | owner => root, 116 | mode => '0700', 117 | content => template('oozie_server/create-oozie-db-user.erb'), 118 | } 119 | -> 120 | exec { "oozie-db-user": 121 | path => $path, 122 | command => "/tmp/create-oozie-db-user.sh", 123 | } 124 | -> 125 | exec { "oozie-createdb": 126 | path => $path, 127 | environment => "JAVA_HOME=${java_home}", 128 | command => "ooziedb.sh create -sqlfile /tmp/oozie.sql -run", 129 | creates => "/tmp/oozie.sql", 130 | user => 'oozie', 131 | group => 'oozie', 132 | } 133 | -> 134 | exec { "untar-oozie-sharelib": 135 | path => $path, 136 | cwd => "/tmp", 137 | command => "tar xzf /usr/hdp/${hdp_version}/oozie/oozie-sharelib.tar.gz", 138 | creates => "/tmp/share", 139 | user => 'oozie', 140 | group => 'oozie', 141 | } 142 | -> 143 | file { "/tmp/share/lib/hive/mysql-connector-java.jar": 144 | ensure => file, 145 | source => "/usr/share/java/mysql-connector-java.jar", 146 | owner => 'oozie', 147 | group => 'oozie', 148 | mode => "0644", 149 | } 150 | -> 151 | exec { "install-oozie-sharelib": 152 | path => $path, 153 | cwd => "/tmp", 154 | command => "hadoop fs -put share /user/oozie/", 155 | unless => 156 | "hadoop fs -test -e /user/oozie/share", 157 | user => 'oozie', 158 | } 159 | -> 160 | service { "oozie": 161 | ensure => running, 162 | enable => true, 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /modules/oozie_server/templates/adminusers.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | # Admin Users, one user by line 18 | oozie -------------------------------------------------------------------------------- /modules/oozie_server/templates/create-oozie-db-user.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | <% 18 | @db = eval(@nodes).select {|node| node[:roles].include? 'hive-db' 19 | }[0][:hostname] + "." + @domain; 20 | -%> 21 | #!/bin/bash 22 | mysql -u root -pvagrant -h <%= @db %> << EOF 23 | create database if not exists oozie; 24 | grant all privileges on oozie.* to 'oozie'@'localhost' identified by 'vagrant'; 25 | grant all privileges on oozie.* to 'oozie'@'%' identified by 'vagrant'; 26 | EOF 27 | -------------------------------------------------------------------------------- /modules/oozie_server/templates/oozie-env.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | #! /bin/bash 18 | 19 | export OOZIE_CONFIG=${OOZIE_CONFIG:-/usr/hdp/current/oozie-server/conf} 20 | export OOZIE_DATA=${OOZIE_DATA:-/var/lib/oozie/data} 21 | export OOZIE_LOG=${OOZIE_LOG:-/var/log/oozie} 22 | export CATALINA_BASE=${CATALINA_BASE:-/usr/hdp/current/oozie-server/oozie-server} 23 | export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie} 24 | export CATALINA_PID=${CATALINA_PID:-/var/run/oozie/oozie.pid} 25 | export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat 26 | <% if @security == "true" -%> 27 | export OOZIE_BASE_URL=https://<%= @hostname %>.<%= @domain %>:11443/oozie 28 | export OOZIE_HTTPS_KEYSTORE_FILE=<%= @keytab_dir %>/server.jks 29 | export OOZIE_HTTPS_KEYSTORE_PASS=changeit 30 | <% else -%> 31 | export OOZIE_BASE_URL=http://<%= @hostname %>.<%= @domain %>:11000/oozie 32 | <% end -%> -------------------------------------------------------------------------------- /modules/oozie_server/templates/oozie-service.erb: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Starts an Oozie server 4 | # 5 | # chkconfig: 345 85 15 6 | # description: Oozie Server 7 | # 8 | ### BEGIN INIT INFO 9 | # Provides: oozie-server 10 | # Short-Description: Oozie Server 11 | # Default-Start: 3 4 5 12 | # Default-Stop: 0 1 2 6 13 | # Required-Start: $syslog $remote_fs 14 | # Required-Stop: $syslog $remote_fs 15 | # Should-Start: 16 | # Should-Stop: 17 | ### END INIT INFO 18 | 19 | . /lib/lsb/init-functions 20 | export JAVA_HOME=<%= scope.lookupvar('jdk::home') %> 21 | 22 | RETVAL_SUCCESS=0 23 | 24 | STATUS_RUNNING=0 25 | STATUS_DEAD=1 26 | STATUS_DEAD_AND_LOCK=2 27 | STATUS_NOT_RUNNING=3 28 | STATUS_OTHER_ERROR=102 29 | 30 | 31 | ERROR_PROGRAM_NOT_INSTALLED=5 32 | ERROR_PROGRAM_NOT_CONFIGURED=6 33 | 34 | 35 | RETVAL=0 36 | SLEEP_TIME=5 37 | PROC_NAME="java" 38 | 39 | DAEMON="oozie" 40 | DESC="Oozie server" 41 | EXEC_PATH="/usr/hdp/current/oozie-server/bin/oozied.sh" 42 | SVC_USER="oozie" 43 | DAEMON_FLAGS="" 44 | PIDFILE="/var/run/oozie/oozie.pid" 45 | WORKING_DIR="/var/lib/oozie" 46 | 47 | install -d -m 0755 -o hive -g hive /var/lib/hive 1>/dev/null 2>&1 || : 48 | start() { 49 | [ -x $EXE_FILE ] || exit $ERROR_PROGRAM_NOT_INSTALLED 50 | log_success_msg "Starting $DESC (${DAEMON}): " 51 | 52 | checkstatusofproc 53 | status=$? 54 | if [ "$status" -eq "$STATUS_RUNNING" ]; then 55 | log_success_msg "${DESC} is running" 56 | exit 0 57 | fi 58 | 59 | LOG_FILE=/var/log/oozie/${DAEMON}.out 60 | (su -s /bin/bash $SVC_USER -c "$EXEC_PATH start") 2>&1 > $LOG_FILE 61 | sleep 3 62 | 63 | checkstatusofproc 64 | RETVAL=$? 65 | return $RETVAL 66 | } 67 | 68 | stop() { 69 | log_success_msg "Stopping $DESC (${DAEMON}): " 70 | 71 | su -s /bin/bash $SVC_USER -c "$EXEC_PATH stop" 72 | RETVAL=$? 73 | return $RETVAL 74 | } 75 | 76 | restart() { 77 | stop 78 | start 79 | } 80 | 81 | checkstatusofproc(){ 82 | pidofproc -p $PIDFILE $PROC_NAME > /dev/null 83 | } 84 | 85 | checkstatus(){ 86 | checkstatusofproc 87 | status=$? 88 | 89 | case "$status" in 90 | $STATUS_RUNNING) 91 | log_success_msg "${DESC} is running" 92 | ;; 93 | $STATUS_DEAD) 94 | log_failure_msg "${DESC} is dead and pid file exists" 95 | ;; 96 | $STATUS_DEAD_AND_LOCK) 97 | log_failure_msg "${DESC} is dead and lock file exists" 98 | ;; 99 | $STATUS_NOT_RUNNING) 100 | log_failure_msg "${DESC} is not running" 101 | ;; 102 | *) 103 | log_failure_msg "${DESC} status is unknown" 104 | ;; 105 | esac 106 | return $status 107 | } 108 | 109 | check_for_root() { 110 | if [ $(id -ur) -ne 0 ]; then 111 | echo 'Error: root user required' 112 | echo 113 | exit 1 114 | fi 115 | } 116 | 117 | service() { 118 | case "$1" in 119 | start) 120 | check_for_root 121 | start 122 | ;; 123 | stop) 124 | check_for_root 125 | stop 126 | ;; 127 | status) 128 | checkstatus 129 | RETVAL=$? 130 | ;; 131 | restart) 132 | check_for_root 133 | restart 134 | ;; 135 | *) 136 | echo $"Usage: $0 {start|stop|status|restart}" 137 | exit 1 138 | esac 139 | } 140 | 141 | service "$1" 142 | 143 | exit $RETVAL 144 | -------------------------------------------------------------------------------- /modules/pig_client/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class pig_client { 17 | require yarn_client 18 | 19 | $conf_dir="/etc/pig/hdp" 20 | $path="/usr/bin" 21 | 22 | package { "pig${package_version}": 23 | ensure => present, 24 | } 25 | 26 | file { '/etc/pig': 27 | ensure => 'directory', 28 | } 29 | 30 | file { "${conf_dir}": 31 | ensure => 'directory', 32 | } 33 | 34 | file { '/etc/pig/conf': 35 | ensure => 'link', 36 | target => "${conf_dir}", 37 | require => Package["pig${package_version}"], 38 | force => true 39 | } 40 | 41 | file { "${conf_dir}/pig-env.sh": 42 | ensure => file, 43 | content => template('pig_client/pig-env.erb'), 44 | } 45 | 46 | file { "${conf_dir}/log4j.properties": 47 | ensure => file, 48 | content => template('pig_client/log4j.erb'), 49 | } 50 | 51 | file { "${conf_dir}/pig.properties": 52 | ensure => file, 53 | content => template('pig_client/pig.erb'), 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /modules/pig_client/templates/log4j.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | # ***** Set root logger level to DEBUG and its only appender to A. 18 | log4j.logger.org.apache.pig=info, A 19 | 20 | # ***** A is set to be a ConsoleAppender. 21 | log4j.appender.A=org.apache.log4j.ConsoleAppender 22 | # ***** A uses PatternLayout. 23 | log4j.appender.A.layout=org.apache.log4j.PatternLayout 24 | log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n 25 | -------------------------------------------------------------------------------- /modules/pig_client/templates/pig-env.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | JAVA_HOME=<%= scope.lookupvar('jdk::home') %> 18 | -------------------------------------------------------------------------------- /modules/pig_client/templates/pig.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | # Pig configuration file. All values can be overwritten by command line arguments. 18 | 19 | # log4jconf log4j configuration file 20 | # log4jconf=./conf/log4j.properties 21 | 22 | # a file that contains pig script 23 | #file= 24 | 25 | # load jarfile, colon separated 26 | #jar= 27 | 28 | #verbose print all log messages to screen (default to print only INFO and above to screen) 29 | #verbose=true 30 | 31 | #exectype local|mapreduce, mapreduce is default 32 | #exectype=local 33 | 34 | #pig.logfile= 35 | 36 | #Do not spill temp files smaller than this size (bytes) 37 | #pig.spill.size.threshold=5000000 38 | #EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes) 39 | #This should help reduce the number of files being spilled. 40 | #pig.spill.gc.activation.size=40000000 41 | 42 | #the following two parameters are to help estimate the reducer number 43 | #pig.exec.reducers.bytes.per.reducer=1000000000 44 | #pig.exec.reducers.max=999 45 | 46 | #Use this option only when your Pig job will otherwise die because of 47 | #using more counter than hadoop configured limit 48 | #pig.disable.counter=true 49 | -------------------------------------------------------------------------------- /modules/repos_setup/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class repos_setup { 17 | file { '/etc/yum.repos.d/hdp.repo': 18 | ensure => file, 19 | source => "puppet:///files/repos/centos6.hdp.repo.${hdp_short_version}", 20 | } 21 | file { '/etc/yum.repos.d/ambari.repo': 22 | ensure => file, 23 | source => "puppet:///files/repos/centos6.ambari.repo.${ambari_version}", 24 | } 25 | package { 'epel-release-6-8': 26 | ensure => absent, 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /modules/selinux/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # Turns of selinux so that mapreduce's task controller will work correctly 17 | class selinux { 18 | file { '/etc/selinux/config': 19 | ensure => file, 20 | content => template('selinux/selinux.erb'), 21 | } 22 | } -------------------------------------------------------------------------------- /modules/selinux/templates/selinux.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | # This file controls the state of SELinux on the system. 18 | # SELINUX= can take one of these three values: 19 | # enforcing - SELinux security policy is enforced. 20 | # permissive - SELinux prints warnings instead of enforcing. 21 | # disabled - SELinux is fully disabled. 22 | SELINUX=disabled 23 | # SELINUXTYPE= type of policy in use. Possible values are: 24 | # targeted - Only targeted network daemons are protected. 25 | # strict - Full SELinux protection. 26 | SELINUXTYPE=targeted 27 | -------------------------------------------------------------------------------- /modules/spark_client/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class spark_client { 17 | require hdfs_client 18 | 19 | package { "spark${package_version}": 20 | ensure => installed, 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /modules/ssl_ca/files/ca-info.txt: -------------------------------------------------------------------------------- 1 | US 2 | California 3 | Palo Alto 4 | Hortonworks Certificate Authority 5 | Certificate Authority 6 | hortonworks.com 7 | . 8 | -------------------------------------------------------------------------------- /modules/ssl_ca/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # This module create a generated certificate authority that can be used to 17 | # make certificates for all of the servers. 18 | class ssl_ca { 19 | require jdk 20 | 21 | $java="/usr/java/default" 22 | $path="${java}/bin:/bin:/usr/bin" 23 | $cadir="/vagrant/generated/ssl-ca" 24 | 25 | file { "${cadir}": 26 | ensure => directory, 27 | } 28 | -> 29 | exec {'openssl genrsa -out ca.key 4096': 30 | cwd => "${cadir}", 31 | creates => "${cadir}/ca.key", 32 | path => "$path", 33 | } 34 | -> 35 | exec {"openssl req -new -x509 -days 36525 -key ca.key -out ca.crt < /vagrant/modules/ssl_ca/files/ca-info.txt": 36 | cwd => "$cadir", 37 | creates => "${cadir}/ca.crt", 38 | path => "$path", 39 | } 40 | 41 | file {"${cadir}/ca.srl": 42 | replace => no, 43 | ensure => present, 44 | content => "01", 45 | mode => "600", 46 | } 47 | 48 | file {"${cadir}/ca.ser": 49 | replace => no, 50 | ensure => present, 51 | content => "01", 52 | mode => "600", 53 | } 54 | } -------------------------------------------------------------------------------- /modules/storm_server/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class storm_server { 17 | require repos_setup 18 | require zookeeper_client 19 | require jdk 20 | 21 | $path="/bin:/usr/bin" 22 | 23 | # Install and enable. 24 | package { "storm" : 25 | ensure => installed, 26 | } 27 | -> 28 | exec { "hdp-select set storm-client ${hdp_version}": 29 | cwd => "/", 30 | path => "$path", 31 | } 32 | -> 33 | exec { "hdp-select set storm-supervisor ${hdp_version}": 34 | cwd => "/", 35 | path => "$path", 36 | } 37 | -> 38 | file { "/etc/storm/conf/storm.yaml": 39 | ensure => file, 40 | content => template('storm_server/storm-yaml.erb'), 41 | } 42 | -> 43 | file { "/etc/init.d/storm-supervisor": 44 | ensure => file, 45 | content => epp('storm_server/storm-init.epp', 46 | {'service' => 'supervisor', 'name' => 'Storm Supervisor'}), 47 | mode => '755', 48 | } 49 | -> 50 | service {"storm-supervisor": 51 | ensure => running, 52 | enable => true, 53 | } 54 | 55 | if hasrole($roles, 'storm_nimbus') { 56 | exec { "hdp-select set storm-nimbus ${hdp_version}": 57 | cwd => "/", 58 | path => "$path", 59 | } 60 | -> 61 | file { "/etc/init.d/storm-nimbus": 62 | ensure => file, 63 | content => epp('storm_server/storm-init.epp', 64 | {'service' => 'nimbus', 'name' => 'Storm Nimbus'}), 65 | mode => '755', 66 | } 67 | -> 68 | service {"storm-nimbus": 69 | ensure => running, 70 | enable => true, 71 | require => File['/etc/storm/conf/storm.yaml'], 72 | } 73 | 74 | file { "/etc/init.d/storm-ui": 75 | ensure => file, 76 | content => epp('storm_server/storm-init.epp', 77 | {'service' => 'ui', 'name' => 'Storm UI'}), 78 | mode => '755', 79 | } 80 | -> 81 | service {"storm-ui": 82 | ensure => running, 83 | enable => true, 84 | require => File['/etc/storm/conf/storm.yaml'], 85 | } 86 | 87 | file { "/etc/init.d/storm-logviewer": 88 | ensure => file, 89 | content => epp('storm_server/storm-init.epp', 90 | {'service' => 'logviewer', 'name' => 'Storm Log Viewer'}), 91 | mode => '755', 92 | } 93 | -> 94 | service {"storm-logviewer": 95 | ensure => running, 96 | enable => true, 97 | require => File['/etc/storm/conf/storm.yaml'], 98 | } 99 | 100 | file { "/etc/init.d/storm-drpc": 101 | ensure => file, 102 | content => epp('storm_server/storm-init.epp', 103 | {'service' => 'drpc', 'name' => 'Storm DRPC'}), 104 | mode => '755', 105 | } 106 | -> 107 | service {"storm-drpc": 108 | ensure => running, 109 | enable => true, 110 | require => File['/etc/storm/conf/storm.yaml'], 111 | } 112 | } 113 | 114 | # TODO deal with security 115 | 116 | 117 | } 118 | -------------------------------------------------------------------------------- /modules/storm_server/templates/storm-yaml.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | <% @nimbi = eval(@nodes).select {|node| node[:roles].include? 'storm_nimbus'}. 18 | map {|node| node[:hostname] + "." + @domain}.join(","); 19 | @zookeeper_servers = eval(@nodes). 20 | select {|node| node[:roles].include? 'zk'}. 21 | map{|node| node[:hostname] + "." + @domain}.join(","); 22 | -%> 23 | 24 | storm.zookeeper.servers: [ "<%= @zookeeper_servers %>" ] 25 | nimbus.seeds: [ "<%= @nimbi %>" ] 26 | storm.local.dir: "/var/lib/storm" 27 | logviewer.port: 8081 28 | 29 | supervistor.slots.ports: 30 | - 6700 31 | - 6701 32 | - 6702 33 | - 6703 34 | -------------------------------------------------------------------------------- /modules/tez_client/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class tez_client { 17 | require yarn_client 18 | 19 | $conf_dir="/etc/tez/hdp" 20 | 21 | package { "tez${package_version}": 22 | ensure => installed, 23 | } 24 | -> 25 | file { "/etc/tez": 26 | ensure => directory, 27 | } 28 | -> 29 | file { "${conf_dir}": 30 | ensure => 'directory', 31 | } 32 | -> 33 | file { '/etc/tez/conf': 34 | ensure => 'link', 35 | target => "${conf_dir}", 36 | force => true 37 | } 38 | 39 | file { "${conf_dir}/tez-env.sh": 40 | ensure => file, 41 | content => template('tez_client/tez-env.erb'), 42 | } 43 | 44 | file { "${conf_dir}/tez-site.xml": 45 | ensure => file, 46 | content => template('tez_client/tez-site.erb'), 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /modules/tez_client/templates/tez-env.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | # Tez specific configuration 18 | export TEZ_CONF_DIR=/etc/tez/conf 19 | 20 | # The java implementation to use. 21 | export JAVA_HOME=<%= scope.lookupvar('jdk::home') %> 22 | -------------------------------------------------------------------------------- /modules/vm_users/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # This class ensures that the vagrant user is properly setup and cleans up 17 | # the veewee user from the base box. 18 | class vm_users { 19 | 20 | user { 'veewee': 21 | ensure => absent, 22 | } 23 | 24 | user { 'vagrant': 25 | ensure => present, 26 | gid => vagrant, 27 | groups => ['users'], 28 | membership => inclusive, 29 | } 30 | 31 | } -------------------------------------------------------------------------------- /modules/weak_random/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # This class replaces /dev/random with /dev/urandom. 17 | # This should *ONLY* be used in virtual machines that don't have enough 18 | # entropy and where the generated keys won't be used in real environments. 19 | class weak_random { 20 | require repos_setup 21 | $path = "/bin:/usr/bin" 22 | 23 | exec { '/dev/random' : 24 | path => $path, 25 | command => 'rm -f /dev/random; ln -s /dev/urandom /dev/random', 26 | unless => 'test -L /dev/random', 27 | } 28 | } -------------------------------------------------------------------------------- /modules/yarn_client/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class yarn_client { 17 | require repos_setup 18 | require jdk 19 | require hdfs_client 20 | 21 | $user_logs = "/user/yarn/" 22 | $path="/usr/bin" 23 | 24 | package { "hadoop${package_version}-yarn": 25 | ensure => installed, 26 | } 27 | -> 28 | file { "/usr/hdp/${hdp_version}/hadoop-yarn/libexec": 29 | ensure => link, 30 | target => "/usr/hdp/${hdp_version}/hadoop/libexec", 31 | } 32 | 33 | package { "hadoop${package_version}-mapreduce": 34 | ensure => installed, 35 | } 36 | -> 37 | file { "/usr/hdp/${hdp_version}/hadoop-mapreduce/libexec": 38 | ensure => link, 39 | target => "/usr/hdp/${hdp_version}/hadoop/libexec", 40 | } 41 | 42 | file { "${hdfs_client::conf_dir}/capacity-scheduler.xml": 43 | ensure => file, 44 | content => template('yarn_client/capacity-scheduler.erb'), 45 | } 46 | 47 | file { "${hdfs_client::conf_dir}/mapred-env.sh": 48 | ensure => file, 49 | content => template('yarn_client/mapred-env.erb'), 50 | } 51 | 52 | file { "${hdfs_client::conf_dir}/mapred-site.xml": 53 | ensure => file, 54 | content => template('yarn_client/mapred-site.erb'), 55 | } 56 | 57 | file { "${hdfs_client::conf_dir}/task-log4j.properties": 58 | ensure => file, 59 | content => template('yarn_client/task-log4j.erb'), 60 | } 61 | 62 | file { "${hdfs_client::conf_dir}/yarn.exclude": 63 | ensure => file, 64 | content => "", 65 | } 66 | 67 | file { "${hdfs_client::conf_dir}/yarn-env.sh": 68 | ensure => file, 69 | content => template('yarn_client/yarn-env.erb'), 70 | } 71 | 72 | file { "${hdfs_client::conf_dir}/yarn-site.xml": 73 | ensure => file, 74 | content => template('yarn_client/yarn-site.erb'), 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /modules/yarn_client/templates/capacity-scheduler.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | yarn.scheduler.capacity.root.default.maximum-capacity 28 | 100 29 | 30 | 31 | 32 | yarn.scheduler.capacity.root.capacity 33 | 100 34 | 35 | 36 | 37 | yarn.scheduler.capacity.root.default.capacity 38 | 100 39 | 40 | 41 | 42 | yarn.scheduler.capacity.root.queues 43 | default 44 | 45 | 46 | 47 | yarn.scheduler.capacity.maximum-applications 48 | 10000 49 | 50 | 51 | 52 | yarn.scheduler.capacity.root.default.user-limit-factor 53 | 1 54 | 55 | 56 | 57 | yarn.scheduler.capacity.root.unfunded.capacity 58 | 50 59 | 60 | 61 | 62 | yarn.scheduler.capacity.root.default.state 63 | RUNNING 64 | 65 | 66 | 67 | yarn.scheduler.capacity.root.default.acl_submit_applications 68 | * 69 | 70 | 71 | 72 | yarn.scheduler.capacity.maximum-am-resource-percent 73 | 0.2 74 | 75 | 76 | 77 | yarn.scheduler.capacity.root.acl_administer_queue 78 | * 79 | 80 | 81 | 82 | yarn.scheduler.capacity.node-locality-delay 83 | 40 84 | 85 | 86 | 87 | yarn.scheduler.capacity.root.default.acl_administer_jobs 88 | * 89 | 90 | 91 | 92 | -------------------------------------------------------------------------------- /modules/yarn_client/templates/mapred-env.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=<%= @server_mem %> 18 | 19 | export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA 20 | export HADOOP_MAPRED_IDENT_STRING="$HADOOP_IDENT_STRING" 21 | export HADOOP_MAPRED_PID_DIR="$HADOOP_PID_DIR" -------------------------------------------------------------------------------- /modules/yarn_client/templates/task-log4j.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | 18 | # Define some default values that can be overridden by system properties 19 | hadoop.root.logger=INFO,console 20 | hadoop.log.dir=. 21 | hadoop.log.file=hadoop.log 22 | 23 | # 24 | # Job Summary Appender 25 | # 26 | # Use following logger to send summary to separate file defined by 27 | # hadoop.mapreduce.jobsummary.log.file rolled daily: 28 | # hadoop.mapreduce.jobsummary.logger=INFO,JSA 29 | # 30 | hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger} 31 | hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log 32 | 33 | # Define the root logger to the system property "hadoop.root.logger". 34 | log4j.rootLogger=${hadoop.root.logger}, EventCounter 35 | 36 | # Logging Threshold 37 | log4j.threshhold=ALL 38 | 39 | # 40 | # Daily Rolling File Appender 41 | # 42 | 43 | log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender 44 | log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file} 45 | 46 | # Rollver at midnight 47 | log4j.appender.DRFA.DatePattern=.yyyy-MM-dd 48 | 49 | # 30-day backup 50 | #log4j.appender.DRFA.MaxBackupIndex=30 51 | log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout 52 | 53 | # Pattern format: Date LogLevel LoggerName LogMessage 54 | log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n 55 | # Debugging Pattern format 56 | #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n 57 | 58 | 59 | # 60 | # console 61 | # Add "console" to rootlogger above if you want to use this 62 | # 63 | 64 | log4j.appender.console=org.apache.log4j.ConsoleAppender 65 | log4j.appender.console.target=System.err 66 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 67 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n 68 | 69 | # 70 | # TaskLog Appender 71 | # 72 | 73 | #Default values 74 | hadoop.tasklog.taskid=null 75 | hadoop.tasklog.iscleanup=false 76 | hadoop.tasklog.noKeepSplits=4 77 | hadoop.tasklog.totalLogFileSize=100 78 | hadoop.tasklog.purgeLogSplits=true 79 | hadoop.tasklog.logsRetainHours=12 80 | 81 | log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender 82 | log4j.appender.TLA.taskId=${hadoop.tasklog.taskid} 83 | log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup} 84 | log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize} 85 | 86 | log4j.appender.TLA.layout=org.apache.log4j.PatternLayout 87 | log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n 88 | 89 | # 90 | # Rolling File Appender 91 | # 92 | 93 | #log4j.appender.RFA=org.apache.log4j.RollingFileAppender 94 | #log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} 95 | 96 | # Logfile size and and 30-day backups 97 | #log4j.appender.RFA.MaxFileSize=1MB 98 | #log4j.appender.RFA.MaxBackupIndex=30 99 | 100 | #log4j.appender.RFA.layout=org.apache.log4j.PatternLayout 101 | #log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n 102 | #log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n 103 | 104 | 105 | # Custom Logging levels 106 | 107 | hadoop.metrics.log.level=INFO 108 | #log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG 109 | #log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG 110 | #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG 111 | log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level} 112 | 113 | # Jets3t library 114 | log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR 115 | 116 | # 117 | # Null Appender 118 | # Trap security logger on the hadoop client side 119 | # 120 | log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender 121 | 122 | # 123 | # Event Counter Appender 124 | # Sends counts of logging messages at different severity levels to Hadoop Metrics. 125 | # 126 | log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter 127 | 128 | -------------------------------------------------------------------------------- /modules/yarn_client/templates/yarn-env.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | 18 | export YARN_LOG_DIR=<%= scope.lookupvar('hdfs_client::log_dir') %>/$USER 19 | export YARN_PID_DIR=<%= scope.lookupvar('hdfs_client::pid_dir') %>/$USER 20 | export JAVA_HOME=<%= scope.lookupvar('jdk::home') %> 21 | export YARN_IDENT_STRING="$HADOOP_IDENT_STRING" 22 | 23 | # User for YARN daemons 24 | export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn} 25 | 26 | # resolve links - $0 may be a softlink 27 | export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}" 28 | 29 | JAVA=$JAVA_HOME/bin/java 30 | JAVA_HEAP_MAX=-Xmx<%= @client_mem %>m 31 | 32 | # For setting YARN specific HEAP sizes please use this 33 | # Parameter and set appropriately 34 | YARN_HEAPSIZE=<%= @client_mem %> 35 | 36 | # Resource Manager specific parameters 37 | 38 | # Specify the max Heapsize for the ResourceManager using a numerical value 39 | # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 40 | # the value to 1000. 41 | # This value will be overridden by an Xmx setting specified in either YARN_OPTS 42 | # and/or YARN_RESOURCEMANAGER_OPTS. 43 | # If not specified, the default value will be picked from either YARN_HEAPMAX 44 | # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 45 | export YARN_RESOURCEMANAGER_HEAPSIZE=<%= @server_mem %> 46 | 47 | # Specify the JVM options to be used when starting the ResourceManager. 48 | # These options will be appended to the options specified as YARN_OPTS 49 | # and therefore may override any similar flags set in YARN_OPTS 50 | #export YARN_RESOURCEMANAGER_OPTS= 51 | 52 | # Node Manager specific parameters 53 | 54 | # Specify the max Heapsize for the NodeManager using a numerical value 55 | # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 56 | # the value to 1000. 57 | # This value will be overridden by an Xmx setting specified in either YARN_OPTS 58 | # and/or YARN_NODEMANAGER_OPTS. 59 | # If not specified, the default value will be picked from either YARN_HEAPMAX 60 | # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 61 | export YARN_NODEMANAGER_HEAPSIZE=<%= @server_mem %> 62 | 63 | # Specify the max Heapsize for the HistoryManager using a numerical value 64 | # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 65 | # the value to 1024. 66 | # This value will be overridden by an Xmx setting specified in either YARN_OPTS 67 | # and/or YARN_HISTORYSERVER_OPTS. 68 | # If not specified, the default value will be picked from either YARN_HEAPMAX 69 | # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 70 | export YARN_HISTORYSERVER_HEAPSIZE=1024 71 | 72 | # Specify the JVM options to be used when starting the NodeManager. 73 | # These options will be appended to the options specified as YARN_OPTS 74 | # and therefore may override any similar flags set in YARN_OPTS 75 | #export YARN_NODEMANAGER_OPTS= 76 | 77 | # so that filenames w/ spaces are handled correctly in loops below 78 | IFS= 79 | 80 | if [ "$YARN_LOGFILE" = "" ]; then 81 | YARN_LOGFILE='yarn.log' 82 | fi 83 | 84 | # default policy file for service-level authorization 85 | if [ "$YARN_POLICYFILE" = "" ]; then 86 | YARN_POLICYFILE="hadoop-policy.xml" 87 | fi 88 | 89 | # restore ordinary behaviour 90 | unset IFS 91 | 92 | 93 | YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR" 94 | YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR" 95 | YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE" 96 | YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE" 97 | YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME" 98 | YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING" 99 | YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" 100 | YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" 101 | if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then 102 | YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" 103 | fi 104 | YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE" 105 | -------------------------------------------------------------------------------- /modules/yarn_node_manager/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class yarn_node_manager { 17 | require yarn_client 18 | require hadoop_server 19 | 20 | $path="/usr/bin" 21 | 22 | if $security == "true" { 23 | require kerberos_http 24 | 25 | file { "${hdfs_client::keytab_dir}/nm.keytab": 26 | ensure => file, 27 | source => "/vagrant/generated/keytabs/${hostname}/nm.keytab", 28 | owner => yarn, 29 | group => hadoop, 30 | mode => '400', 31 | } 32 | -> 33 | file { "${hdfs_client::conf_dir}/container-executor.cfg": 34 | ensure => file, 35 | content => template('yarn_node_manager/container-executor.erb'), 36 | owner => root, 37 | group => mapred, 38 | mode => '400', 39 | } 40 | -> 41 | Package["hadoop${package_version}-yarn-nodemanager"] 42 | } 43 | 44 | package { "hadoop${package_version}-yarn-nodemanager" : 45 | ensure => installed, 46 | } 47 | -> 48 | exec { "hdp-select set hadoop-yarn-nodemanager ${hdp_version}": 49 | cwd => "/", 50 | path => "$path", 51 | } 52 | -> 53 | file { "/etc/init.d/hadoop-yarn-nodemanager": 54 | ensure => 'link', 55 | target => "/usr/hdp/current/hadoop-yarn-nodemanager/etc/rc.d/init.d/hadoop-yarn-nodemanager", 56 | } 57 | -> 58 | service {"hadoop-yarn-nodemanager": 59 | ensure => running, 60 | enable => true, 61 | } 62 | 63 | } 64 | -------------------------------------------------------------------------------- /modules/yarn_node_manager/templates/container-executor.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | yarn.nodemanager.log-dirs=/var/log/hadoop/mapred 18 | yarn.nodemanager.linux-container-executor.group=yarn 19 | banned.users = hdfs,yarn,mapred,bin 20 | min.user.id=500 21 | -------------------------------------------------------------------------------- /modules/yarn_resource_manager/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class yarn_resource_manager { 17 | require yarn_client 18 | require hadoop_server 19 | 20 | $path="/usr/bin" 21 | 22 | if $security == "true" { 23 | require kerberos_http 24 | 25 | file { "${hdfs_client::keytab_dir}/rm.keytab": 26 | ensure => file, 27 | source => "/vagrant/generated/keytabs/${hostname}/rm.keytab", 28 | owner => 'yarn', 29 | group => 'hadoop', 30 | mode => '400', 31 | } 32 | -> 33 | Package["hadoop${package_version}-mapreduce-historyserver"] 34 | 35 | file { "${hdfs_client::keytab_dir}/jhs.keytab": 36 | ensure => file, 37 | source => "/vagrant/generated/keytabs/${hostname}/jhs.keytab", 38 | owner => 'mapred', 39 | group => 'hadoop', 40 | mode => '400', 41 | } 42 | -> 43 | Package["hadoop${package_version}-yarn-resourcemanager"] 44 | } 45 | 46 | package { "hadoop${package_version}-yarn-resourcemanager" : 47 | ensure => installed, 48 | } 49 | -> 50 | exec { "hdp-select set hadoop-yarn-resourcemanager ${hdp_version}": 51 | cwd => "/", 52 | path => "$path", 53 | } 54 | -> 55 | file { "/etc/init.d/hadoop-yarn-resourcemanager": 56 | ensure => 'link', 57 | target => "/usr/hdp/current/hadoop-yarn-resourcemanager/etc/rc.d/init.d/hadoop-yarn-resourcemanager", 58 | } 59 | -> 60 | service {"hadoop-yarn-resourcemanager": 61 | ensure => running, 62 | enable => true, 63 | } 64 | 65 | package { "hadoop${package_version}-mapreduce-historyserver" : 66 | ensure => installed, 67 | } 68 | -> 69 | exec { "hdp-select set hadoop-mapreduce-historyserver ${hdp_version}": 70 | cwd => "/", 71 | path => "$path", 72 | } 73 | -> 74 | file { "/etc/init.d/hadoop-mapreduce-historyserver": 75 | ensure => 'link', 76 | target => "/usr/hdp/current/hadoop-mapreduce-historyserver/etc/rc.d/init.d/hadoop-mapreduce-historyserver", 77 | } 78 | -> 79 | service {"hadoop-mapreduce-historyserver": 80 | ensure => running, 81 | enable => true, 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /modules/zookeeper_client/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class zookeeper_client { 17 | require repos_setup 18 | require hdp_select 19 | require jdk 20 | 21 | $conf_dir="/etc/zookeeper/hdp" 22 | $log_dir="/var/log/zookeeper" 23 | $data_dir="/var/lib/zookeeper" 24 | $pid_dir="/var/run/pid/zookeeper" 25 | $path="/usr/bin" 26 | 27 | package { "zookeeper${package_version}": 28 | ensure => installed, 29 | } 30 | -> 31 | exec { "hdp-select set zookeeper-client ${hdp_version}": 32 | cwd => "/", 33 | path => "$path", 34 | } 35 | 36 | file { '/etc/zookeeper': 37 | ensure => 'directory', 38 | } 39 | 40 | file { "${conf_dir}": 41 | ensure => 'directory', 42 | } 43 | 44 | file { '/etc/zookeeper/conf': 45 | ensure => 'link', 46 | target => "${conf_dir}", 47 | require => Package["zookeeper${package_version}"], 48 | force => true 49 | } 50 | 51 | 52 | file { "${conf_dir}/log4j.properties": 53 | ensure => file, 54 | content => template('zookeeper_client/log4j.erb'), 55 | } 56 | 57 | file { "${conf_dir}/zoo.cfg": 58 | ensure => file, 59 | content => template('zookeeper_client/zoo.erb'), 60 | } 61 | 62 | file { "${conf_dir}/zookeeper-env.sh": 63 | ensure => file, 64 | content => template('zookeeper_client/zookeeper-env.erb'), 65 | } 66 | 67 | if $security == "true" { 68 | file { "${conf_dir}/zookeeper-client.jaas": 69 | ensure => file, 70 | content => template('zookeeper_client/zookeeper-client.erb'), 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /modules/zookeeper_client/templates/log4j.erb: -------------------------------------------------------------------------------- 1 | # 2 | # ZooKeeper Logging Configuration 3 | # 4 | 5 | # Format is " (, )+ 6 | 7 | # DEFAULT: console appender only 8 | log4j.rootLogger=INFO, CONSOLE 9 | 10 | # Example with rolling log file 11 | #log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE 12 | 13 | # Example with rolling log file and tracing 14 | #log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE 15 | 16 | # 17 | # Log INFO level and above messages to the console 18 | # 19 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender 20 | log4j.appender.CONSOLE.Threshold=INFO 21 | log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout 22 | log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n 23 | 24 | # 25 | # Add ROLLINGFILE to rootLogger to get log file output 26 | # Log DEBUG level and above messages to a log file 27 | log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender 28 | log4j.appender.ROLLINGFILE.Threshold=DEBUG 29 | log4j.appender.ROLLINGFILE.File=zookeeper.log 30 | 31 | # Max log file size of 10MB 32 | log4j.appender.ROLLINGFILE.MaxFileSize=10MB 33 | # uncomment the next line to limit number of backup files 34 | #log4j.appender.ROLLINGFILE.MaxBackupIndex=10 35 | 36 | log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout 37 | log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n 38 | 39 | 40 | # 41 | # Add TRACEFILE to rootLogger to get log file output 42 | # Log DEBUG level and above messages to a log file 43 | log4j.appender.TRACEFILE=org.apache.log4j.FileAppender 44 | log4j.appender.TRACEFILE.Threshold=TRACE 45 | log4j.appender.TRACEFILE.File=zookeeper_trace.log 46 | 47 | log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout 48 | ### Notice we are including log4j's NDC here (%x) 49 | log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n 50 | -------------------------------------------------------------------------------- /modules/zookeeper_client/templates/zoo.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | <% 18 | zookeeper_servers = eval(@nodes). 19 | select {|node| node[:roles].include? 'zk'}. 20 | map{|node| node[:hostname] + "." + @domain}; 21 | -%> 22 | # The number of milliseconds of each tick 23 | tickTime=2000 24 | # The number of ticks that the initial 25 | # synchronization phase can take 26 | initLimit=10 27 | # The number of ticks that can pass between 28 | # sending a request and getting an acknowledgement 29 | syncLimit=5 30 | # the directory where the snapshot is stored. 31 | dataDir=<%= @data_dir %> 32 | # the port at which the clients will connect 33 | clientPort=2181 34 | <% @server_id = 1; 35 | zookeeper_servers.each do |node| -%> 36 | server.<%= @server_id %>=<%= node %>:2888:3888 37 | <% @server_id = @server_id +1; 38 | end -%> 39 | <% if @security == "true" -%> 40 | authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider 41 | jaasLoginRenew=3600000 42 | kerberos.removeHostFromPrincipal=true 43 | kerberos.removeRealmFromPrincipal=true 44 | <% end -%> -------------------------------------------------------------------------------- /modules/zookeeper_client/templates/zookeeper-client.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | Client { 18 | com.sun.security.auth.module.Krb5LoginModule required 19 | useKeyTab=false 20 | useTicketCache=true; 21 | }; -------------------------------------------------------------------------------- /modules/zookeeper_client/templates/zookeeper-env.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | export JAVA_HOME=<%= scope.lookupvar('jdk::home') %> 18 | export ZOO_LOG_DIR=<%= @log_dir %> 19 | export ZOOPIDFILE=<%= @pid_dir %>/zookeeper-server.pid 20 | export SERVER_JVMFLAGS=-Xmx<%= @server_mem %>m 21 | <% if @security == "true" -%> 22 | export SERVER_JVMFLAGS="-Djava.security.auth.login.config=/etc/zookeeper/conf/zookeeper-server.jaas" 23 | export CLIENT_JVMFLAGS="-Djava.security.auth.login.config=/etc/zookeeper/conf/zookeeper-client.jaas" 24 | <% end -%> 25 | -------------------------------------------------------------------------------- /modules/zookeeper_server/manifests/init.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class zookeeper_server { 17 | require zookeeper_client 18 | 19 | $path="/usr/bin" 20 | 21 | if $security == "true" { 22 | file { "${zookeeper_client::conf_dir}/zookeeper-server.jaas": 23 | ensure => file, 24 | content => template('zookeeper_server/zookeeper-server.erb'), 25 | } 26 | -> Package["zookeeper${package_version}-server"] 27 | 28 | file { "${hdfs_client::keytab_dir}/zookeeper.keytab": 29 | ensure => file, 30 | source => "/vagrant/generated/keytabs/${hostname}/zookeeper.keytab", 31 | owner => zookeeper, 32 | group => hadoop, 33 | mode => '400', 34 | } 35 | -> 36 | Package["zookeeper${package_version}-server"] 37 | } 38 | 39 | package { "zookeeper${package_version}-server": 40 | ensure => installed, 41 | } 42 | -> 43 | exec { "hdp-select set zookeeper-server ${hdp_version}": 44 | cwd => "/", 45 | path => "$path", 46 | } 47 | -> 48 | file { "${zookeeper_client::conf_dir}/configuration.xsl": 49 | ensure => file, 50 | content => template('zookeeper_server/configuration.erb'), 51 | } 52 | -> 53 | file { "/etc/init.d/zookeeper-server": 54 | ensure => 'link', 55 | target => "/usr/hdp/current/zookeeper-server/etc/rc.d/init.d/zookeeper-server", 56 | } 57 | -> 58 | file { "${zookeeper_client::data_dir}": 59 | ensure => directory, 60 | owner => zookeeper, 61 | group => hadoop, 62 | mode => '700', 63 | } 64 | -> 65 | file { "${zookeeper_client::data_dir}/myid": 66 | ensure => file, 67 | content => template('zookeeper_server/myid.erb'), 68 | } 69 | -> 70 | file { "${zookeeper_client::log_dir}": 71 | ensure => directory, 72 | owner => zookeeper, 73 | group => hadoop, 74 | mode => '700', 75 | } 76 | -> 77 | file { "${zookeeper_client::pid_dir}": 78 | ensure => directory, 79 | owner => zookeeper, 80 | group => hadoop, 81 | mode => '755', 82 | } 83 | -> 84 | service { "zookeeper-server": 85 | ensure => running, 86 | enable => true, 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /modules/zookeeper_server/templates/configuration.erb: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 |
namevaluedescription
21 | 22 | 23 |
24 |
25 | -------------------------------------------------------------------------------- /modules/zookeeper_server/templates/myid.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | <%= (eval(@nodes). 18 | select {|node| node[:roles].include? 'zk'}. 19 | map {|node| node[:hostname]}. 20 | index @hostname) + 1 %> 21 | 22 | -------------------------------------------------------------------------------- /modules/zookeeper_server/templates/zookeeper-server.erb: -------------------------------------------------------------------------------- 1 | <%# 2 | Licensed to the Apache Software Foundation (ASF) under one or more 3 | contributor license agreements. See the NOTICE file distributed with 4 | this work for additional information regarding copyright ownership. 5 | The ASF licenses this file to You under the Apache License, Version 2.0 6 | (the "License"); you may not use this file except in compliance with 7 | the License. You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -%> 17 | Server { 18 | com.sun.security.auth.module.Krb5LoginModule required 19 | useKeyTab=true 20 | storeKey=true 21 | useTicketCache=false 22 | keyTab="<%= scope.lookupvar('hdfs_client::keytab_dir') %>/zookeeper.keytab" 23 | principal="zookeeper/<%= @hostname %>.<%= @domain %>"; 24 | }; -------------------------------------------------------------------------------- /profiles/1node-hbase-nonsecure.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": false, 5 | "vm_mem": 3072, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ "hbase", "hdfs", "zk"], 9 | "nodes": [ 10 | {"hostname": "nn", "ip": "240.0.0.11", 11 | "roles": ["client", "hbase-master", "hbase-regionserver", "nn", 12 | "slave", "zk"]} 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /profiles/1node-hbase-secure.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": true, 5 | "vm_mem": 3072, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ "hbase", "hdfs", "zk"], 9 | "nodes": [ 10 | {"hostname": "nn", "ip": "240.0.0.11", 11 | "roles": ["client", "hbase-master", "hbase-regionserver", "kdc", "nn", 12 | "slave", "zk"]} 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /profiles/1node-nonsecure.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": false, 5 | "vm_mem": 3072, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ "spark", "hdfs", "hive", "oozie", "pig", "tez", "yarn", "zk" ], 9 | "nodes": [ 10 | {"hostname": "nn", "ip": "240.0.0.11", 11 | "roles": ["client", "hive-db", "hive-meta", "nn", "oozie", "slave", 12 | "yarn", "zk"]} 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /profiles/1node-secure.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": true, 5 | "vm_mem": 3072, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ "hdfs", "hive", "oozie", "pig", "tez", "yarn", "zk" ], 9 | "nodes": [ 10 | {"hostname": "nn", "ip": "240.0.0.11", 11 | "roles": ["client", "kdc", "hive-db", "hive-meta", "nn", "oozie", "slave", 12 | "yarn", "zk"]} 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /profiles/3node-analytics-nonsecure.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": false, 5 | "vm_mem": 2048, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ "hdfs", "tez", "yarn", "zk" ], 9 | "nodes": [ 10 | { "hostname": "nn", "ip": "240.0.0.11", 11 | "roles": [ "hive-db", "hive-meta", "nn", "yarn", "zk", "client" ] }, 12 | { "hostname": "slave1", "ip": "240.0.0.12", "roles": [ "slave", "client", "hive-hs2" ] }, 13 | { "hostname": "slave2", "ip": "240.0.0.13", "roles": [ "slave" ] } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /profiles/3node-hbase-nonsecure.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": false, 5 | "vm_mem": 2048, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ "hbase", "hdfs", "zk"], 9 | "nodes": [ 10 | { "hostname": "gw", "ip": "240.0.0.10", "roles": [ "client" ]}, 11 | { "hostname": "nn", "ip": "240.0.0.11", 12 | "roles": [ "hbase-master", "nn", "slave", "zk" ] }, 13 | { "hostname": "slave1", "ip": "240.0.0.12", 14 | "roles": [ "hbase-regionserver", "slave" ] } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /profiles/3node-hbase-secure.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": true, 5 | "vm_mem": 2048, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ "hbase", "hdfs", "zk"], 9 | "nodes": [ 10 | { "hostname": "gw", "ip": "240.0.0.10", "roles": [ "client" ]}, 11 | { "hostname": "nn", "ip": "240.0.0.11", 12 | "roles": [ "hbase-master", "kdc", "nn", "slave", "zk" ] }, 13 | { "hostname": "slave1", "ip": "240.0.0.12", 14 | "roles": [ "hbase-regionserver", "slave" ] } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /profiles/3node-nonsecure.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": false, 5 | "vm_mem": 2048, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ "hdfs", "hive", "oozie", "pig", "tez", "yarn", "zk" ], 9 | "nodes": [ 10 | { "hostname": "gw", "ip": "240.0.0.10", "roles": [ "client" ] }, 11 | { "hostname": "nn", "ip": "240.0.0.11", 12 | "roles": [ "kdc", "hive-db", "hive-meta", "nn", "yarn", "zk" ] }, 13 | { "hostname": "slave1", "ip": "240.0.0.12", "roles": [ "oozie", "slave" ] } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /profiles/3node-odpi.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": false, 5 | "vm_mem": 2048, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ "hdfs", "yarn", "zk" ], 9 | "nodes": [ 10 | { "hostname": "gw", "ip": "240.0.0.10", "roles": [ "client", "dev" ] }, 11 | { "hostname": "nn", "ip": "240.0.0.11", 12 | "roles": [ "kdc", "nn", "yarn", "zk" ] }, 13 | { "hostname": "slave1", "ip": "240.0.0.12", "roles": [ "slave" ] } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /profiles/3node-secure.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": true, 5 | "vm_mem": 2048, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ "hdfs", "hive", "oozie", "pig", "tez", "yarn", "zk" ], 9 | "nodes": [ 10 | { "hostname": "gw", "ip": "240.0.0.10", "roles": [ "client" ] }, 11 | { "hostname": "nn", "ip": "240.0.0.11", "roles": 12 | [ "kdc", "hive-db", "hive-meta", "nn", "yarn", "zk" ] }, 13 | { "hostname": "slave1", "ip": "240.0.0.12", "roles": [ "oozie", "slave" ] } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /profiles/3node-spark-nonsecure.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": false, 5 | "vm_mem": 4072, 6 | "server_mem": 3000, 7 | "client_mem": 1000, 8 | "clients" : [ "hdfs", "yarn", "spark" ], 9 | "nodes": [ 10 | {"hostname": "spark", "ip": "240.0.0.11", "roles": [ "nn", "yarn", "slave", "client" ]}, 11 | {"hostname": "slave1", "ip": "240.0.0.12", "roles": [ "slave", "client" ]}, 12 | {"hostname": "slave2", "ip": "240.0.0.13", "roles": [ "slave", "client" ]} 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /profiles/5node-nonsecure.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": false, 5 | "vm_mem": 2048, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ "hdfs", "yarn", "pig", "hive", "zk" ], 9 | "nodes": [ 10 | { "hostname": "gw", "ip": "240.0.0.10", "roles": [ "client" ] }, 11 | { "hostname": "nn", "ip": "240.0.0.11", 12 | "roles": [ "kdc", "nn", "yarn", "hive-meta", "hive-db", "zk" ] }, 13 | { "hostname": "slave1", "ip": "240.0.0.12", "roles": [ "slave" ] }, 14 | { "hostname": "slave2", "ip": "240.0.0.13", "roles": [ "slave" ] }, 15 | { "hostname": "slave3", "ip": "240.0.0.14", "roles": [ "slave" ] } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /profiles/ambari-nonsecure-2-node.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": false, 5 | "vm_mem": 2048, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ ], 9 | "nodes": [ 10 | { "hostname": "ambari", "ip": "10.0.10.10", "roles": [ "ambari-server" ] }, 11 | { "hostname": "slave1", "ip": "10.0.10.11", "roles": [ "ambari-agent" ] } 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /profiles/ambari-nonsecure-4-nodes.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": false, 5 | "vm_mem": 2048, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ ], 9 | "nodes": [ 10 | { "hostname": "ambari", "ip": "10.0.10.10", "roles": [ "ambari-server" ] }, 11 | { "hostname": "master", "ip": "10.0.10.11", "roles": [ "ambari-agent" ] }, 12 | { "hostname": "slave1", "ip": "10.0.10.12", "roles": [ "ambari-agent" ] }, 13 | { "hostname": "slave2", "ip": "10.0.10.13", "roles": [ "ambari-agent" ] } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /profiles/knox-nonsecure.profile: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "example.com", 3 | "realm": "EXAMPLE.COM", 4 | "security": false, 5 | "vm_mem": 2048, 6 | "server_mem": 300, 7 | "client_mem": 200, 8 | "clients" : [ "hdfs", "yarn", "pig", "hive", "zk" ], 9 | "nodes": [ 10 | { "hostname": "gw", "ip": "240.0.0.10", "roles": [ "client", "knox" ] }, 11 | { "hostname": "nn", "ip": "240.0.0.11", "roles": [ "kdc", "nn", "yarn", "hive-meta", "hive-db", "zk" ] }, 12 | { "hostname": "slave1", "ip": "240.0.0.12", "roles": [ "slave" ] } 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /puppet/parser/functions/hasrole.rb: -------------------------------------------------------------------------------- 1 | # 2 | # hasrole.rb 3 | # 4 | 5 | module Puppet::Parser::Functions 6 | newfunction(:hasrole, :type => :rvalue, :doc => <<-EOS 7 | This function determines if a role is part of a list encoded as a string. 8 | 9 | *Examples:* 10 | hasrole("['nn', 'jt']", 'nn') 11 | 12 | Would return: true 13 | 14 | hasrole("['nn', 'jt']", 'client') 15 | 16 | Would return: false 17 | EOS 18 | ) do |arguments| 19 | 20 | raise(Puppet::ParseError, "hasrole(): Wrong number of arguments " + 21 | "given (#{arguments.size} for 2)") if arguments.size < 2 22 | 23 | array = eval(arguments[0]) 24 | 25 | unless array.is_a?(Array) 26 | raise(Puppet::ParseError, 'hasrole(): Requires array to work with') 27 | end 28 | 29 | item = arguments[1] 30 | 31 | raise(Puppet::ParseError, 'hasrole(): You must provide item ' + 32 | 'to search for within array given') if item.empty? 33 | 34 | return array.include?(item) 35 | end 36 | 37 | newfunction(:islastslave, :type => :rvalue, :doc => <<-EOS 38 | This function determines if a node is the last slave. The first parameter 39 | is the node map and the second is the hostname. 40 | EOS 41 | ) do |arguments| 42 | 43 | raise(Puppet::ParseError, "islastslave(): Wrong number of arguments " + 44 | "given (#{arguments.size} for 2)") if arguments.size < 2 45 | 46 | nodes = eval(arguments[0]) 47 | 48 | unless nodes.is_a?(Array) 49 | raise(Puppet::ParseError, 'islastslave(): Requires array to work with') 50 | end 51 | 52 | host = arguments[1] 53 | 54 | raise(Puppet::ParseError, 'islastslave(): You must provide item ' + 55 | 'to search for within array given') if host.empty? 56 | 57 | slaves = nodes.select {|node| node[:roles].include? 'slave'}. 58 | map{|node| node[:hostname]} 59 | return slaves.last == host 60 | end 61 | end 62 | --------------------------------------------------------------------------------