├── .gitignore ├── README.md ├── components ├── Dockerfile-ubuntu-jdk11 ├── README.md ├── docker-compose.yml ├── flink │ ├── .env │ ├── README.md │ ├── docker-compose.yml │ ├── etc │ │ ├── flink │ │ │ └── flink-conf.yaml │ │ └── hadoop-cli │ │ │ ├── capacity-scheduler.xml │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml │ │ │ ├── hadoop-env.cmd │ │ │ ├── hadoop-env.sh │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hadoop-user-functions.sh.example │ │ │ ├── hdfs-site.xml │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.cmd │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-queues.xml.template │ │ │ ├── mapred-site.xml │ │ │ ├── shellprofile.d │ │ │ └── example.sh │ │ │ ├── ssl-client.xml.example │ │ │ ├── ssl-server.xml.example │ │ │ ├── user_ec_policies.xml.template │ │ │ ├── workers │ │ │ ├── yarn-env.cmd │ │ │ ├── yarn-env.sh │ │ │ ├── yarn-site.xml │ │ │ └── yarnservice-log4j.properties │ └── script │ │ └── start.sh ├── hadoop │ ├── .env │ ├── README.md │ ├── docker-compose.yml │ ├── etc │ │ ├── hadoop-cli │ │ │ ├── capacity-scheduler.xml │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml │ │ │ ├── hadoop-env.cmd │ │ │ ├── hadoop-env.sh │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hadoop-user-functions.sh.example │ │ │ ├── hdfs-site.xml │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.cmd │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-queues.xml.template │ │ │ ├── mapred-site.xml │ │ │ ├── shellprofile.d │ │ │ │ └── example.sh │ │ │ ├── ssl-client.xml.example │ │ │ ├── ssl-server.xml.example │ │ │ ├── user_ec_policies.xml.template │ │ │ ├── workers │ │ │ ├── yarn-env.cmd │ │ │ ├── yarn-env.sh │ │ │ ├── yarn-site.xml │ │ │ └── yarnservice-log4j.properties │ │ ├── hadoop-default │ │ │ ├── capacity-scheduler.xml │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml │ │ │ ├── hadoop-env.cmd │ │ │ ├── hadoop-env.sh │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hadoop-user-functions.sh.example │ │ │ ├── hdfs-site.xml │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.cmd │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-queues.xml.template │ │ │ ├── mapred-site.xml │ │ │ ├── shellprofile.d │ │ │ │ └── example.sh │ │ │ ├── ssl-client.xml.example │ │ │ ├── ssl-server.xml.example │ │ │ ├── user_ec_policies.xml.template │ │ │ ├── workers │ │ │ ├── yarn-env.cmd │ │ │ ├── yarn-env.sh │ │ │ ├── yarn-site.xml │ │ │ └── yarnservice-log4j.properties │ │ ├── hadoop-federation-nm │ │ │ ├── capacity-scheduler.xml │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml │ │ │ ├── hadoop-env.cmd │ │ │ ├── hadoop-env.sh │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hadoop-user-functions.sh.example │ │ │ ├── hdfs-site.xml │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.cmd │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-queues.xml.template │ │ │ ├── mapred-site.xml │ │ │ ├── shellprofile.d │ │ │ │ └── example.sh │ │ │ ├── ssl-client.xml.example │ │ │ ├── ssl-server.xml.example │ │ │ ├── user_ec_policies.xml.template │ │ │ ├── workers │ │ │ ├── yarn-env.cmd │ │ │ ├── yarn-env.sh │ │ │ ├── yarn-site.xml │ │ │ └── yarnservice-log4j.properties │ │ ├── hadoop-federation-rm │ │ │ ├── capacity-scheduler.xml │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml │ │ │ ├── hadoop-env.cmd │ │ │ ├── hadoop-env.sh │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hadoop-user-functions.sh.example │ │ │ ├── hdfs-site.xml │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.cmd │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-queues.xml.template │ │ │ ├── mapred-site.xml │ │ │ ├── shellprofile.d │ │ │ │ └── example.sh │ │ │ ├── ssl-client.xml.example │ │ │ ├── ssl-server.xml.example │ │ │ ├── user_ec_policies.xml.template │ │ │ ├── workers │ │ │ ├── yarn-env.cmd │ │ │ ├── yarn-env.sh │ │ │ ├── yarn-site.xml │ │ │ └── yarnservice-log4j.properties │ │ ├── hadoop-for-nm │ │ │ ├── capacity-scheduler.xml │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml │ │ │ ├── hadoop-env.cmd │ │ │ ├── hadoop-env.sh │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hadoop-user-functions.sh.example │ │ │ ├── hdfs-site.xml │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.cmd │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-queues.xml.template │ │ │ ├── mapred-site.xml │ │ │ ├── shellprofile.d │ │ │ │ └── example.sh │ │ │ ├── ssl-client.xml.example │ │ │ ├── ssl-server.xml.example │ │ │ ├── user_ec_policies.xml.template │ │ │ ├── workers │ │ │ ├── yarn-env.cmd │ │ │ ├── yarn-env.sh │ │ │ ├── yarn-site.xml │ │ │ └── yarnservice-log4j.properties │ │ ├── hadoop-ha │ │ │ ├── capacity-scheduler.xml │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml │ │ │ ├── hadoop-env.cmd │ │ │ ├── hadoop-env.sh │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hadoop-user-functions.sh.example │ │ │ ├── hdfs-site.xml │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.cmd │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-queues.xml.template │ │ │ ├── mapred-site.xml │ │ │ ├── shellprofile.d │ │ │ │ └── example.sh │ │ │ ├── ssl-client.xml.example │ │ │ ├── ssl-server.xml.example │ │ │ ├── user_ec_policies.xml.template │ │ │ ├── workers │ │ │ ├── yarn-env.cmd │ │ │ ├── yarn-env.sh │ │ │ ├── yarn-site.xml │ │ │ └── yarnservice-log4j.properties │ │ ├── hadoop-router │ │ │ ├── capacity-scheduler.xml │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml │ │ │ ├── hadoop-env.cmd │ │ │ ├── hadoop-env.sh │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hadoop-user-functions.sh.example │ │ │ ├── hdfs-site.xml │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.cmd │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-queues.xml.template │ │ │ ├── mapred-site.xml │ │ │ ├── shellprofile.d │ │ │ │ └── example.sh │ │ │ ├── ssl-client.xml.example │ │ │ ├── ssl-server.xml.example │ │ │ ├── user_ec_policies.xml.template │ │ │ ├── workers │ │ │ ├── yarn-env.cmd │ │ │ ├── yarn-env.sh │ │ │ ├── yarn-site.xml │ │ │ └── yarnservice-log4j.properties │ │ ├── hadoop-security-cli │ │ │ ├── capacity-scheduler.xml │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml │ │ │ ├── hadoop-env.cmd │ │ │ ├── hadoop-env.sh │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hadoop-user-functions.sh.example │ │ │ ├── hdfs-site.xml │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── keystore.jks │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── krb5.conf │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.cmd │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-queues.xml.template │ │ │ ├── mapred-site.xml │ │ │ ├── shellprofile.d │ │ │ │ └── example.sh │ │ │ ├── ssl-client.xml │ │ │ ├── ssl-client.xml.example │ │ │ ├── ssl-server.xml │ │ │ ├── ssl-server.xml.example │ │ │ ├── truststore.jks │ │ │ ├── user_ec_policies.xml.template │ │ │ ├── workers │ │ │ ├── yarn-env.cmd │ │ │ ├── yarn-env.sh │ │ │ ├── yarn-site.xml │ │ │ └── yarnservice-log4j.properties │ │ └── hadoop-security │ │ │ ├── capacity-scheduler.xml │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml │ │ │ ├── hadoop-env.cmd │ │ │ ├── hadoop-env.sh │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hadoop-user-functions.sh.example │ │ │ ├── hdfs-site.xml │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── keystore.jks │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── krb5.conf │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.cmd │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-queues.xml.template │ │ │ ├── mapred-site.xml │ │ │ ├── shellprofile.d │ │ │ └── example.sh │ │ │ ├── ssl-client.xml │ │ │ ├── ssl-client.xml.example │ │ │ ├── ssl-server.xml │ │ │ ├── ssl-server.xml.example │ │ │ ├── truststore.jks │ │ │ ├── user_ec_policies.xml.template │ │ │ ├── workers │ │ │ ├── yarn-env.cmd │ │ │ ├── yarn-env.sh │ │ │ ├── yarn-site.xml │ │ │ └── yarnservice-log4j.properties │ └── script │ │ ├── dn.sh │ │ └── nn.sh ├── hbase │ ├── .env │ ├── docker-compose.yml │ ├── etc │ │ └── hbase │ │ │ ├── hadoop-metrics2-hbase.properties │ │ │ ├── hbase-env.cmd │ │ │ ├── hbase-env.sh │ │ │ ├── hbase-policy.xml │ │ │ ├── hbase-site.xml │ │ │ ├── log4j-hbtop.properties │ │ │ ├── log4j.properties │ │ │ └── regionservers │ └── script │ │ └── start.sh ├── hive │ ├── .env │ ├── docker-compose.yml │ ├── etc │ │ ├── hadoop-cli │ │ │ ├── capacity-scheduler.xml │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml │ │ │ ├── hadoop-env.cmd │ │ │ ├── hadoop-env.sh │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hadoop-user-functions.sh.example │ │ │ ├── hdfs-site.xml │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.cmd │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-queues.xml.template │ │ │ ├── mapred-site.xml │ │ │ ├── shellprofile.d │ │ │ │ └── example.sh │ │ │ ├── ssl-client.xml.example │ │ │ ├── ssl-server.xml.example │ │ │ ├── user_ec_policies.xml.template │ │ │ ├── workers │ │ │ ├── yarn-env.cmd │ │ │ ├── yarn-env.sh │ │ │ ├── yarn-site.xml │ │ │ └── yarnservice-log4j.properties │ │ └── hive-default │ │ │ ├── beeline-log4j.properties.template │ │ │ ├── hive-default.xml.template │ │ │ ├── hive-env.sh │ │ │ ├── hive-env.sh.template │ │ │ ├── hive-exec-log4j.properties.template │ │ │ ├── hive-log4j2.properties │ │ │ ├── hive-site.xml │ │ │ └── ivysettings.xml │ └── script │ │ └── start.sh ├── kafka │ ├── .env │ ├── README.md │ ├── docker-compose.yml │ ├── etc │ │ └── kafka │ │ │ ├── config.yml │ │ │ └── server.properties │ └── script │ │ └── start.sh ├── spark │ ├── .env │ ├── README.md │ ├── docker-compose.yml │ ├── etc │ │ ├── hadoop-cli │ │ │ ├── capacity-scheduler.xml │ │ │ ├── configuration.xsl │ │ │ ├── container-executor.cfg │ │ │ ├── core-site.xml │ │ │ ├── hadoop-env.cmd │ │ │ ├── hadoop-env.sh │ │ │ ├── hadoop-metrics2.properties │ │ │ ├── hadoop-policy.xml │ │ │ ├── hadoop-user-functions.sh.example │ │ │ ├── hdfs-site.xml │ │ │ ├── httpfs-env.sh │ │ │ ├── httpfs-log4j.properties │ │ │ ├── httpfs-signature.secret │ │ │ ├── httpfs-site.xml │ │ │ ├── kms-acls.xml │ │ │ ├── kms-env.sh │ │ │ ├── kms-log4j.properties │ │ │ ├── kms-site.xml │ │ │ ├── log4j.properties │ │ │ ├── mapred-env.cmd │ │ │ ├── mapred-env.sh │ │ │ ├── mapred-queues.xml.template │ │ │ ├── mapred-site.xml │ │ │ ├── shellprofile.d │ │ │ │ └── example.sh │ │ │ ├── ssl-client.xml.example │ │ │ ├── ssl-server.xml.example │ │ │ ├── user_ec_policies.xml.template │ │ │ ├── workers │ │ │ ├── yarn-env.cmd │ │ │ ├── yarn-env.sh │ │ │ ├── yarn-site.xml │ │ │ └── yarnservice-log4j.properties │ │ └── spark-default │ │ │ ├── fairscheduler.xml.template │ │ │ ├── log4j2.properties.template │ │ │ ├── metrics.properties.template │ │ │ ├── spark-defaults.conf.template │ │ │ ├── spark-env.sh.template │ │ │ └── workers.template │ └── script │ │ └── start.sh └── zookeeper │ ├── .env │ ├── README.md │ ├── docker-compose.yml │ ├── etc │ └── default │ │ └── zoo.cfg │ └── script │ └── start.sh ├── doc ├── Alluxio.md ├── HBase.md ├── Hadoop.md ├── Hive.md ├── Kafka.md ├── Kerberos.md ├── LLAP.md └── local-env.md ├── etc-example ├── alluxio-cli │ ├── alluxio-env.sh │ ├── alluxio-site.properties │ ├── alluxio-site.properties.template │ ├── core-site.xml.template │ ├── jaas.conf │ ├── log4j.properties │ ├── masters │ ├── metrics.properties.template │ └── workers ├── alluxio-security │ ├── alluxio-env.sh │ ├── alluxio-env.sh.template │ ├── alluxio-site.properties │ ├── alluxio-site.properties.template │ ├── core-site.xml.template │ ├── log4j.properties │ ├── masters │ ├── metrics.properties.template │ └── workers ├── hbase │ ├── hadoop-metrics2-hbase.properties │ ├── hbase-env.cmd │ ├── hbase-env.sh │ ├── hbase-policy.xml │ ├── hbase-site.xml │ ├── log4j-hbtop.properties │ ├── log4j.properties │ └── regionservers ├── hive-default │ ├── beeline-log4j.properties.template │ ├── hive-default.xml.template │ ├── hive-env.sh │ ├── hive-env.sh.template │ ├── hive-exec-log4j.properties.template │ ├── hive-log4j2.properties │ ├── hive-site.xml │ └── ivysettings.xml ├── hive-sec │ ├── beeline-log4j.properties.template │ ├── hive-default.xml.template │ ├── hive-env.sh │ ├── hive-env.sh.template │ ├── hive-exec-log4j.properties.template │ ├── hive-log4j2.properties │ ├── hive-site.xml │ └── ivysettings.xml ├── kafka │ └── server.properties ├── kerberos │ └── krb5.conf ├── tez │ └── tez-site.xml ├── zeppelin │ └── conf │ │ ├── configuration.xsl │ │ ├── interpreter-list │ │ ├── interpreter.json │ │ ├── log4j.properties │ │ ├── log4j_yarn_cluster.properties │ │ ├── notebook-authorization.json │ │ ├── shiro.ini.template │ │ ├── zeppelin-env.cmd.template │ │ ├── zeppelin-env.sh.template │ │ ├── zeppelin-site.xml │ │ └── zeppelin-site.xml.template └── zookeeper │ └── zoo.cfg └── install ├── flink ├── hadoop ├── hbase ├── hive ├── kafka ├── spark └── zookeeper /.gitignore: -------------------------------------------------------------------------------- 1 | *.tar.gz 2 | .DS_Store 3 | .idea 4 | *.jar 5 | build-env/app/* 6 | etc/hadoop 7 | etc/hive 8 | etc/alluxio 9 | ./install 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Easy Install Components of Bigdata 2 | 3 | **link the installed package locally in advance** 4 | `ln -s ` 5 | 6 | 7 | ## [Components](./components/README.md) 8 | Init the Docker environment 9 | ### [Hadoop](./components/hadoop/README.md) 10 | Setup a hadoop cluster 11 | ### [Hive](./components/hive/README.md) 12 | Setup hive service 13 | ### [Hbase](./components/hbase/README.md) 14 | Setup Hbase cluster 15 | 16 | ### [Zookeeper](./components/zookeeper/README.md) 17 | Setup ZK cluster 18 | ### [Kafka](./components/kafka/README.md) 19 | Setup Kafka 20 | ### [Spark](./components/spark/README.md) 21 | Setup Spark 22 | 23 | ## Local mode refer to the branch : [local](https://github.com/iceqiw/EasyBigdata/tree/local) 24 | -------------------------------------------------------------------------------- /components/README.md: -------------------------------------------------------------------------------- 1 | # Init docker 2 | 3 | ## create network 4 | 5 | ``` 6 | docker network create --driver bridge cluster_net 7 | ``` 8 | 9 | ## build images 10 | ``` 11 | docker compose build 12 | ``` 13 | 14 | ## Set install package path 15 | 16 | `export INSTALL=` 17 | 18 | example: 19 | ``` 20 | export INSTALL=~/workspace/EasyBigdata/install 21 | ``` 22 | 23 | # How to setup components 24 | 25 | goto the folder of components 26 | 27 | Run command as : `docker compose up` -------------------------------------------------------------------------------- /components/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | _hadoop: 3 | image: base-data:1.0 4 | build: 5 | context: . 6 | dockerfile: Dockerfile-ubuntu-jdk11 7 | -------------------------------------------------------------------------------- /components/flink/.env: -------------------------------------------------------------------------------- 1 | BIGDATA_HOME=/opt/bigdata 2 | BIGDATA_CONF=$BIGDATA_HOME/etc 3 | BIGDATA_LOG=$BIGDATA_HOME/logs 4 | HADOOP_HOME=$BIGDATA_HOME/hadoop 5 | 6 | HADOOP_CONF_DIR=$BIGDATA_CONF/hadoop 7 | YARN_CONF_DIR=$BIGDATA_CONF/hadoop 8 | 9 | FLINK_HOME=$BIGDATA_HOME/flink 10 | FLINK_CONF_DIR=$BIGDATA_CONF/flink 11 | PATH=$HADOOP_HOME/bin:$FLINK_HOME/bin:$PATH 12 | -------------------------------------------------------------------------------- /components/flink/README.md: -------------------------------------------------------------------------------- 1 | # How to apply job 2 | ``` 3 | . 4 | ├── docker-compose.yml 5 | ├── etc 6 | ├── README.md 7 | └── script 8 | ``` 9 | ## export HADOOP_CLASSPATH 10 | ``` shell 11 | export HADOOP_CLASSPATH=`hadoop classpath` 12 | ``` 13 | 14 | ## apply job 15 | ``` shell 16 | flink run-application -t yarn-application ./flink/examples/streaming/TopSpeedWindowing.jar 17 | ``` 18 | 19 | 20 | ## Start YARN Session 21 | ``` shell 22 | yarn-session.sh --detached 23 | ``` 24 | 25 | -------------------------------------------------------------------------------- /components/flink/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | _base: 3 | image: base-data:1.0 4 | volumes: 5 | - ${INSTALL}/hadoop:/opt/bigdata/hadoop 6 | - ${INSTALL}/flink:/opt/bigdata/flink 7 | - ./etc/hadoop-cli:/opt/bigdata/etc/hadoop 8 | - ./etc/flink:/opt/bigdata/etc/flink 9 | - ./script:/opt/bigdata/script 10 | flink01: 11 | extends: 12 | service: _base 13 | working_dir: /opt/bigdata 14 | hostname: flink 15 | networks: 16 | - cluster_net 17 | env_file: 18 | - .env 19 | networks: 20 | cluster_net: 21 | external: true 22 | -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | fs.defaultFS 4 | hdfs://master01:9000 5 | 6 | 7 | hadoop.tmp.dir 8 | /opt/bigdata/data/hadoop/tmp 9 | 10 | 11 | hadoop.proxyuser.ice.hosts 12 | * 13 | 14 | 15 | hadoop.proxyuser.ice.groups 16 | * 17 | 18 | 19 | -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | dfs.name.dir 4 | /opt/bigdata/data/hadoop/hdfs/name 5 | 6 | 7 | 8 | dfs.data.dir 9 | /opt/bigdata/data/hadoop/hdfs/data 10 | 11 | 12 | 13 | dfs.replication 14 | 1 15 | 16 | 17 | dfs.http.address 18 | 0.0.0.0:50070 19 | 20 | -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # HTTPFS config directory 22 | # 23 | # export HTTPFS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # HTTPFS log directory 26 | # 27 | # export HTTPFS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # HTTPFS temporary directory 30 | # 31 | # export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp 32 | 33 | # The HTTP port used by HTTPFS 34 | # 35 | # export HTTPFS_HTTP_PORT=14000 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export HTTPFS_MAX_THREADS=1000 40 | 41 | # The hostname HttpFS server runs on 42 | # 43 | # export HTTPFS_HTTP_HOSTNAME=$(hostname -f) 44 | 45 | # The maximum size of HTTP header 46 | # 47 | # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536 48 | 49 | # Whether SSL is enabled 50 | # 51 | # export HTTPFS_SSL_ENABLED=false 52 | 53 | # The location of the SSL keystore if using SSL 54 | # 55 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 56 | 57 | # The password of the SSL keystore if using SSL 58 | # 59 | # export HTTPFS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/httpfs-log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. See accompanying LICENSE file. 13 | # 14 | 15 | # If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time 16 | # Setup sets its value to '${httpfs.home}/logs' 17 | 18 | log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender 19 | log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd 20 | log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log 21 | log4j.appender.httpfs.Append=true 22 | log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 24 | 25 | log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd 27 | log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log 28 | log4j.appender.httpfsaudit.Append=true 29 | log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout 30 | log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 31 | 32 | log4j.logger.httpfsaudit=INFO, httpfsaudit 33 | 34 | log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs 35 | log4j.logger.org.apache.hadoop.lib=INFO, httpfs 36 | -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # KMS config directory 22 | # 23 | # export KMS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # KMS log directory 26 | # 27 | # export KMS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # KMS temporary directory 30 | # 31 | # export KMS_TEMP=${HADOOP_HOME}/temp 32 | 33 | # The HTTP port used by KMS 34 | # 35 | # export KMS_HTTP_PORT=9600 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export KMS_MAX_THREADS=1000 40 | 41 | # The maximum size of HTTP header 42 | # 43 | # export KMS_MAX_HTTP_HEADER_SIZE=65536 44 | 45 | # Whether SSL is enabled 46 | # 47 | # export KMS_SSL_ENABLED=false 48 | 49 | # The location of the SSL keystore if using SSL 50 | # 51 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 52 | 53 | # The password of the SSL keystore if using SSL 54 | # 55 | # export KMS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/kms-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA 20 | 21 | -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | mapreduce.framework.name 4 | yarn 5 | 6 | 7 | yarn.app.mapreduce.am.env 8 | HADOOP_MAPRED_HOME=$HADOOP_HOME 9 | 10 | 11 | mapreduce.map.env 12 | HADOOP_MAPRED_HOME=$HADOOP_HOME 13 | 14 | 15 | mapreduce.reduce.env 16 | HADOOP_MAPRED_HOME=$HADOOP_HOME 17 | 18 | 19 | -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/workers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/flink/etc/hadoop-cli/yarn-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | yarn.resourcemanager.hostname 4 | master01 5 | 6 | 7 | yarn.nodemanager.aux-services 8 | mapreduce_shuffle 9 | 10 | 11 | yarn.nodemanager.resource.memory-mb 12 | 2048 13 | 14 | 15 | yarn.nodemanager.vmem-check-enabled 16 | false 17 | 18 | 19 | yarn.nodemanager.resource.cpu-vcores 20 | 1 21 | 22 | 23 | yarn.resourcemanager.scheduler.address 24 | master01:8099 25 | 26 | 27 | -------------------------------------------------------------------------------- /components/flink/script/start.sh: -------------------------------------------------------------------------------- 1 | export HADOOP_CLASSPATH=`hadoop classpath` -------------------------------------------------------------------------------- /components/hadoop/.env: -------------------------------------------------------------------------------- 1 | BIGDATA_HOME=/opt/bigdata 2 | BIGDATA_CONF=$BIGDATA_HOME/etc 3 | BIGDATA_LOG=$BIGDATA_HOME/logs 4 | HADOOP_HOME=$BIGDATA_HOME/hadoop 5 | 6 | PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin 7 | 8 | HADOOP_CONF_DIR=$BIGDATA_CONF/hadoop 9 | HADOOP_LOG_DIR=$BIGDATA_LOG/hadoop 10 | YARN_CONF_DIR=$BIGDATA_CONF/hadoop 11 | YARN_LOG_DIR=$BIGDATA_LOG/hadoop 12 | -------------------------------------------------------------------------------- /components/hadoop/README.md: -------------------------------------------------------------------------------- 1 | # How to setup hadoop env with docker? 2 | 3 | ``` 4 | . 5 | └── hadoop 6 | ├── .env 7 | ├── Dockerfile-ubuntu-jdk11 8 | ├── README.md 9 | ├── docker-compose.yml 10 | ├── etc 11 | └── install -> hadoop-3.3.4 12 | 13 | ``` 14 | 15 | ## prepare files 16 | 17 | - hadoop tar [link](https://dlcdn.apache.org/hadoop/common/hadoop-3.3.6/hadoop-3.3.6.tar.gz) 18 | 19 | ### setup hadoop cluster 20 | refer to [hadoop](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html) 21 | 22 | 23 | ## start cluster 24 | run command 🚀 `docker compose up ` 25 | -------------------------------------------------------------------------------- /components/hadoop/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | _base: 3 | image: base-data:1.0 4 | volumes: 5 | - ${INSTALL}/hadoop:/opt/bigdata/hadoop 6 | - ./etc/hadoop-default:/opt/bigdata/etc/hadoop 7 | - ./script:/opt/bigdata/script 8 | networks: 9 | - cluster_net 10 | env_file: 11 | - .env 12 | master01: 13 | extends: 14 | service: _base 15 | hostname: master01 16 | working_dir: /opt/bigdata 17 | ports: 18 | - "50070:50070" 19 | - "8088:8088" 20 | - "18088:18088" 21 | command: ["bash","./script/nn.sh"] 22 | slave01: 23 | working_dir: /opt/bigdata 24 | extends: 25 | service: _base 26 | tty: true 27 | command: ["bash","./script/dn.sh"] 28 | networks: 29 | cluster_net: 30 | external: true 31 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-cli/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-cli/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | fs.defaultFS 4 | hdfs://bd-master-1:9000 5 | 6 | 7 | hadoop.tmp.dir 8 | /opt/bigdata/data/hadoop/tmp 9 | 10 | 11 | hadoop.proxyuser.root.hosts 12 | * 13 | 14 | 15 | hadoop.proxyuser.root.groups 16 | * 17 | 18 | 19 | hadoop.proxyuser.qiwei.hosts 20 | * 21 | 22 | 23 | hadoop.proxyuser.qiwei.groups 24 | * 25 | 26 | 27 | hadoop.proxyuser.tw.hosts 28 | * 29 | 30 | 31 | hadoop.proxyuser.tw.groups 32 | * 33 | 34 | 35 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-cli/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | dfs.name.dir 4 | /opt/bigdata/data/hadoop/hdfs/name 5 | 6 | 7 | 8 | dfs.data.dir 9 | /opt/bigdata/data/hadoop/hdfs/data 10 | 11 | 12 | 13 | dfs.replication 14 | 1 15 | 16 | 17 | dfs.http.address 18 | 0.0.0.0:50070 19 | 20 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-cli/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # HTTPFS config directory 22 | # 23 | # export HTTPFS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # HTTPFS log directory 26 | # 27 | # export HTTPFS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # HTTPFS temporary directory 30 | # 31 | # export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp 32 | 33 | # The HTTP port used by HTTPFS 34 | # 35 | # export HTTPFS_HTTP_PORT=14000 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export HTTPFS_MAX_THREADS=1000 40 | 41 | # The hostname HttpFS server runs on 42 | # 43 | # export HTTPFS_HTTP_HOSTNAME=$(hostname -f) 44 | 45 | # The maximum size of HTTP header 46 | # 47 | # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536 48 | 49 | # Whether SSL is enabled 50 | # 51 | # export HTTPFS_SSL_ENABLED=false 52 | 53 | # The location of the SSL keystore if using SSL 54 | # 55 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 56 | 57 | # The password of the SSL keystore if using SSL 58 | # 59 | # export HTTPFS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-cli/httpfs-log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. See accompanying LICENSE file. 13 | # 14 | 15 | # If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time 16 | # Setup sets its value to '${httpfs.home}/logs' 17 | 18 | log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender 19 | log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd 20 | log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log 21 | log4j.appender.httpfs.Append=true 22 | log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 24 | 25 | log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd 27 | log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log 28 | log4j.appender.httpfsaudit.Append=true 29 | log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout 30 | log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 31 | 32 | log4j.logger.httpfsaudit=INFO, httpfsaudit 33 | 34 | log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs 35 | log4j.logger.org.apache.hadoop.lib=INFO, httpfs 36 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-cli/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-cli/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-cli/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # KMS config directory 22 | # 23 | # export KMS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # KMS log directory 26 | # 27 | # export KMS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # KMS temporary directory 30 | # 31 | # export KMS_TEMP=${HADOOP_HOME}/temp 32 | 33 | # The HTTP port used by KMS 34 | # 35 | # export KMS_HTTP_PORT=9600 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export KMS_MAX_THREADS=1000 40 | 41 | # The maximum size of HTTP header 42 | # 43 | # export KMS_MAX_HTTP_HEADER_SIZE=65536 44 | 45 | # Whether SSL is enabled 46 | # 47 | # export KMS_SSL_ENABLED=false 48 | 49 | # The location of the SSL keystore if using SSL 50 | # 51 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 52 | 53 | # The password of the SSL keystore if using SSL 54 | # 55 | # export KMS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-cli/kms-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-cli/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-cli/workers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-cli/yarn-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | yarn.resourcemanager.address 4 | bd-master-1:8050 5 | 6 | 7 | yarn.resourcemanager.scheduler.address 8 | localhost:8049 9 | 10 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | fs.defaultFS 4 | hdfs://master01:9000 5 | 6 | 7 | hadoop.tmp.dir 8 | /opt/bigdata/data/hadoop/tmp 9 | 10 | 11 | hadoop.proxyuser.ice.hosts 12 | * 13 | 14 | 15 | hadoop.proxyuser.ice.groups 16 | * 17 | 18 | 19 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | dfs.name.dir 4 | /opt/bigdata/data/hadoop/hdfs/name 5 | 6 | 7 | 8 | dfs.data.dir 9 | /opt/bigdata/data/hadoop/hdfs/data 10 | 11 | 12 | 13 | dfs.replication 14 | 1 15 | 16 | 17 | dfs.http.address 18 | 0.0.0.0:50070 19 | 20 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # HTTPFS config directory 22 | # 23 | # export HTTPFS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # HTTPFS log directory 26 | # 27 | # export HTTPFS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # HTTPFS temporary directory 30 | # 31 | # export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp 32 | 33 | # The HTTP port used by HTTPFS 34 | # 35 | # export HTTPFS_HTTP_PORT=14000 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export HTTPFS_MAX_THREADS=1000 40 | 41 | # The hostname HttpFS server runs on 42 | # 43 | # export HTTPFS_HTTP_HOSTNAME=$(hostname -f) 44 | 45 | # The maximum size of HTTP header 46 | # 47 | # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536 48 | 49 | # Whether SSL is enabled 50 | # 51 | # export HTTPFS_SSL_ENABLED=false 52 | 53 | # The location of the SSL keystore if using SSL 54 | # 55 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 56 | 57 | # The password of the SSL keystore if using SSL 58 | # 59 | # export HTTPFS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/httpfs-log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. See accompanying LICENSE file. 13 | # 14 | 15 | # If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time 16 | # Setup sets its value to '${httpfs.home}/logs' 17 | 18 | log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender 19 | log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd 20 | log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log 21 | log4j.appender.httpfs.Append=true 22 | log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 24 | 25 | log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd 27 | log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log 28 | log4j.appender.httpfsaudit.Append=true 29 | log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout 30 | log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 31 | 32 | log4j.logger.httpfsaudit=INFO, httpfsaudit 33 | 34 | log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs 35 | log4j.logger.org.apache.hadoop.lib=INFO, httpfs 36 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # KMS config directory 22 | # 23 | # export KMS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # KMS log directory 26 | # 27 | # export KMS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # KMS temporary directory 30 | # 31 | # export KMS_TEMP=${HADOOP_HOME}/temp 32 | 33 | # The HTTP port used by KMS 34 | # 35 | # export KMS_HTTP_PORT=9600 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export KMS_MAX_THREADS=1000 40 | 41 | # The maximum size of HTTP header 42 | # 43 | # export KMS_MAX_HTTP_HEADER_SIZE=65536 44 | 45 | # Whether SSL is enabled 46 | # 47 | # export KMS_SSL_ENABLED=false 48 | 49 | # The location of the SSL keystore if using SSL 50 | # 51 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 52 | 53 | # The password of the SSL keystore if using SSL 54 | # 55 | # export KMS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/kms-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | mapreduce.framework.name 4 | yarn 5 | 6 | 7 | yarn.app.mapreduce.am.env 8 | HADOOP_MAPRED_HOME=$HADOOP_HOME 9 | 10 | 11 | mapreduce.map.env 12 | HADOOP_MAPRED_HOME=$HADOOP_HOME 13 | 14 | 15 | mapreduce.reduce.env 16 | HADOOP_MAPRED_HOME=$HADOOP_HOME 17 | 18 | 19 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/workers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-default/yarn-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | yarn.resourcemanager.hostname 4 | master01 5 | 6 | 7 | yarn.nodemanager.aux-services 8 | mapreduce_shuffle 9 | 10 | 11 | yarn.nodemanager.resource.memory-mb 12 | 4096 13 | 14 | 15 | yarn.nodemanager.vmem-check-enabled 16 | false 17 | 18 | 19 | yarn.nodemanager.resource.cpu-vcores 20 | 2 21 | 22 | 23 | yarn.resourcemanager.scheduler.address 24 | master01:8099 25 | 26 | 27 | yarn.web-proxy.address 28 | 0.0.0.0:18088 29 | 30 | 31 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | fs.defaultFS 4 | hdfs://bd-master-1:9000 5 | 6 | 7 | hadoop.tmp.dir 8 | /opt/bigdata/data/hadoop/tmp 9 | 10 | 11 | hadoop.proxyuser.root.hosts 12 | * 13 | 14 | 15 | hadoop.proxyuser.root.groups 16 | * 17 | 18 | 19 | hadoop.proxyuser.qiwei.hosts 20 | * 21 | 22 | 23 | hadoop.proxyuser.qiwei.groups 24 | * 25 | 26 | 27 | hadoop.proxyuser.tw.hosts 28 | * 29 | 30 | 31 | hadoop.proxyuser.tw.groups 32 | * 33 | 34 | 35 | hadoop.proxyuser.hdp.hosts 36 | * 37 | 38 | 39 | hadoop.proxyuser.hdp.groups 40 | * 41 | 42 | 43 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | dfs.name.dir 4 | /opt/bigdata/data/hadoop/hdfs/name 5 | 6 | 7 | 8 | dfs.data.dir 9 | /opt/bigdata/data/hadoop/hdfs/data 10 | 11 | 12 | 13 | dfs.replication 14 | 1 15 | 16 | 17 | dfs.http.address 18 | 0.0.0.0:50070 19 | 20 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # HTTPFS config directory 22 | # 23 | # export HTTPFS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # HTTPFS log directory 26 | # 27 | # export HTTPFS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # HTTPFS temporary directory 30 | # 31 | # export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp 32 | 33 | # The HTTP port used by HTTPFS 34 | # 35 | # export HTTPFS_HTTP_PORT=14000 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export HTTPFS_MAX_THREADS=1000 40 | 41 | # The hostname HttpFS server runs on 42 | # 43 | # export HTTPFS_HTTP_HOSTNAME=$(hostname -f) 44 | 45 | # The maximum size of HTTP header 46 | # 47 | # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536 48 | 49 | # Whether SSL is enabled 50 | # 51 | # export HTTPFS_SSL_ENABLED=false 52 | 53 | # The location of the SSL keystore if using SSL 54 | # 55 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 56 | 57 | # The password of the SSL keystore if using SSL 58 | # 59 | # export HTTPFS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/httpfs-log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. See accompanying LICENSE file. 13 | # 14 | 15 | # If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time 16 | # Setup sets its value to '${httpfs.home}/logs' 17 | 18 | log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender 19 | log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd 20 | log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log 21 | log4j.appender.httpfs.Append=true 22 | log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 24 | 25 | log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd 27 | log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log 28 | log4j.appender.httpfsaudit.Append=true 29 | log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout 30 | log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 31 | 32 | log4j.logger.httpfsaudit=INFO, httpfsaudit 33 | 34 | log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs 35 | log4j.logger.org.apache.hadoop.lib=INFO, httpfs 36 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # KMS config directory 22 | # 23 | # export KMS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # KMS log directory 26 | # 27 | # export KMS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # KMS temporary directory 30 | # 31 | # export KMS_TEMP=${HADOOP_HOME}/temp 32 | 33 | # The HTTP port used by KMS 34 | # 35 | # export KMS_HTTP_PORT=9600 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export KMS_MAX_THREADS=1000 40 | 41 | # The maximum size of HTTP header 42 | # 43 | # export KMS_MAX_HTTP_HEADER_SIZE=65536 44 | 45 | # Whether SSL is enabled 46 | # 47 | # export KMS_SSL_ENABLED=false 48 | 49 | # The location of the SSL keystore if using SSL 50 | # 51 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 52 | 53 | # The password of the SSL keystore if using SSL 54 | # 55 | # export KMS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/kms-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | mapreduce.framework.name 4 | yarn 5 | 6 | 7 | yarn.app.mapreduce.am.env 8 | HADOOP_MAPRED_HOME=$HADOOP_HOME 9 | 10 | 11 | mapreduce.map.env 12 | HADOOP_MAPRED_HOME=$HADOOP_HOME 13 | 14 | 15 | mapreduce.reduce.env 16 | HADOOP_MAPRED_HOME=$HADOOP_HOME 17 | 18 | 19 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/workers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-nm/yarn-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | yarn.resourcemanager.hostname 4 | bd-master-1 5 | 6 | 7 | yarn.nodemanager.aux-services 8 | mapreduce_shuffle 9 | 10 | 11 | yarn.nodemanager.resource.memory-mb 12 | 4096 13 | 14 | 15 | yarn.nodemanager.vmem-check-enabled 16 | false 17 | 18 | 19 | yarn.nodemanager.resource.cpu-vcores 20 | 4 21 | 22 | 23 | yarn.resourcemanager.scheduler.address 24 | bd-master-1:8099 25 | 26 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | fs.defaultFS 4 | hdfs://bd-master-1:9000 5 | 6 | 7 | hadoop.tmp.dir 8 | /opt/bigdata/data/hadoop/tmp 9 | 10 | 11 | hadoop.proxyuser.root.hosts 12 | * 13 | 14 | 15 | hadoop.proxyuser.root.groups 16 | * 17 | 18 | 19 | hadoop.proxyuser.qiwei.hosts 20 | * 21 | 22 | 23 | hadoop.proxyuser.qiwei.groups 24 | * 25 | 26 | 27 | hadoop.proxyuser.tw.hosts 28 | * 29 | 30 | 31 | hadoop.proxyuser.tw.groups 32 | * 33 | 34 | 35 | hadoop.proxyuser.hdp.hosts 36 | * 37 | 38 | 39 | hadoop.proxyuser.hdp.groups 40 | * 41 | 42 | 43 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | dfs.name.dir 4 | /opt/bigdata/data/hadoop/hdfs/name 5 | 6 | 7 | 8 | dfs.data.dir 9 | /opt/bigdata/data/hadoop/hdfs/data 10 | 11 | 12 | 13 | dfs.replication 14 | 1 15 | 16 | 17 | dfs.http.address 18 | 0.0.0.0:50070 19 | 20 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # HTTPFS config directory 22 | # 23 | # export HTTPFS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # HTTPFS log directory 26 | # 27 | # export HTTPFS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # HTTPFS temporary directory 30 | # 31 | # export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp 32 | 33 | # The HTTP port used by HTTPFS 34 | # 35 | # export HTTPFS_HTTP_PORT=14000 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export HTTPFS_MAX_THREADS=1000 40 | 41 | # The hostname HttpFS server runs on 42 | # 43 | # export HTTPFS_HTTP_HOSTNAME=$(hostname -f) 44 | 45 | # The maximum size of HTTP header 46 | # 47 | # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536 48 | 49 | # Whether SSL is enabled 50 | # 51 | # export HTTPFS_SSL_ENABLED=false 52 | 53 | # The location of the SSL keystore if using SSL 54 | # 55 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 56 | 57 | # The password of the SSL keystore if using SSL 58 | # 59 | # export HTTPFS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/httpfs-log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. See accompanying LICENSE file. 13 | # 14 | 15 | # If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time 16 | # Setup sets its value to '${httpfs.home}/logs' 17 | 18 | log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender 19 | log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd 20 | log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log 21 | log4j.appender.httpfs.Append=true 22 | log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 24 | 25 | log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd 27 | log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log 28 | log4j.appender.httpfsaudit.Append=true 29 | log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout 30 | log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 31 | 32 | log4j.logger.httpfsaudit=INFO, httpfsaudit 33 | 34 | log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs 35 | log4j.logger.org.apache.hadoop.lib=INFO, httpfs 36 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # KMS config directory 22 | # 23 | # export KMS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # KMS log directory 26 | # 27 | # export KMS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # KMS temporary directory 30 | # 31 | # export KMS_TEMP=${HADOOP_HOME}/temp 32 | 33 | # The HTTP port used by KMS 34 | # 35 | # export KMS_HTTP_PORT=9600 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export KMS_MAX_THREADS=1000 40 | 41 | # The maximum size of HTTP header 42 | # 43 | # export KMS_MAX_HTTP_HEADER_SIZE=65536 44 | 45 | # Whether SSL is enabled 46 | # 47 | # export KMS_SSL_ENABLED=false 48 | 49 | # The location of the SSL keystore if using SSL 50 | # 51 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 52 | 53 | # The password of the SSL keystore if using SSL 54 | # 55 | # export KMS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/kms-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | mapreduce.framework.name 4 | yarn 5 | 6 | 7 | yarn.app.mapreduce.am.env 8 | HADOOP_MAPRED_HOME=$HADOOP_HOME 9 | 10 | 11 | mapreduce.map.env 12 | HADOOP_MAPRED_HOME=$HADOOP_HOME 13 | 14 | 15 | mapreduce.reduce.env 16 | HADOOP_MAPRED_HOME=$HADOOP_HOME 17 | 18 | 19 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/workers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-federation-rm/yarn-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | yarn.resourcemanager.hostname 4 | bd-master-1 5 | 6 | 7 | yarn.nodemanager.aux-services 8 | mapreduce_shuffle 9 | 10 | 11 | yarn.nodemanager.resource.memory-mb 12 | 4096 13 | 14 | 15 | yarn.nodemanager.vmem-check-enabled 16 | false 17 | 18 | 19 | yarn.nodemanager.resource.cpu-vcores 20 | 4 21 | 22 | 23 | yarn.resourcemanager.scheduler.address 24 | bd-master-1:8099 25 | 26 | 27 | 28 | yarn.federation.enabled 29 | true 30 | 31 | 32 | yarn.federation.state-store.class 33 | org.apache.hadoop.yarn.server.federation.store.impl.ZookeeperFederationStateStore 34 | 35 | 36 | yarn.resourcemanager.cluster-id 37 | test1 38 | 39 | 40 | hadoop.zk.address 41 | bd-master-1:2181 42 | 43 | 44 | yarn.resourcemanager.epoch 45 | 11 46 | 47 | 48 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-for-nm/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-for-nm/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | fs.defaultFS 4 | hdfs://bd-master-1:9000 5 | 6 | 7 | hadoop.tmp.dir 8 | /opt/bigdata/data/hadoop/tmp 9 | 10 | 11 | hadoop.proxyuser.root.hosts 12 | * 13 | 14 | 15 | hadoop.proxyuser.root.groups 16 | * 17 | 18 | 19 | hadoop.proxyuser.qiwei.hosts 20 | * 21 | 22 | 23 | hadoop.proxyuser.qiwei.groups 24 | * 25 | 26 | 27 | hadoop.proxyuser.tw.hosts 28 | * 29 | 30 | 31 | hadoop.proxyuser.tw.groups 32 | * 33 | 34 | 35 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-for-nm/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | dfs.name.dir 4 | /opt/bigdata/data/hadoop/hdfs/name 5 | 6 | 7 | 8 | dfs.data.dir 9 | /opt/bigdata/data/hadoop/hdfs/data 10 | 11 | 12 | 13 | dfs.replication 14 | 1 15 | 16 | 17 | dfs.http.address 18 | 0.0.0.0:50070 19 | 20 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-for-nm/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # HTTPFS config directory 22 | # 23 | # export HTTPFS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # HTTPFS log directory 26 | # 27 | # export HTTPFS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # HTTPFS temporary directory 30 | # 31 | # export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp 32 | 33 | # The HTTP port used by HTTPFS 34 | # 35 | # export HTTPFS_HTTP_PORT=14000 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export HTTPFS_MAX_THREADS=1000 40 | 41 | # The hostname HttpFS server runs on 42 | # 43 | # export HTTPFS_HTTP_HOSTNAME=$(hostname -f) 44 | 45 | # The maximum size of HTTP header 46 | # 47 | # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536 48 | 49 | # Whether SSL is enabled 50 | # 51 | # export HTTPFS_SSL_ENABLED=false 52 | 53 | # The location of the SSL keystore if using SSL 54 | # 55 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 56 | 57 | # The password of the SSL keystore if using SSL 58 | # 59 | # export HTTPFS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-for-nm/httpfs-log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. See accompanying LICENSE file. 13 | # 14 | 15 | # If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time 16 | # Setup sets its value to '${httpfs.home}/logs' 17 | 18 | log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender 19 | log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd 20 | log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log 21 | log4j.appender.httpfs.Append=true 22 | log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 24 | 25 | log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd 27 | log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log 28 | log4j.appender.httpfsaudit.Append=true 29 | log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout 30 | log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 31 | 32 | log4j.logger.httpfsaudit=INFO, httpfsaudit 33 | 34 | log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs 35 | log4j.logger.org.apache.hadoop.lib=INFO, httpfs 36 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-for-nm/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-for-nm/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-for-nm/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # KMS config directory 22 | # 23 | # export KMS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # KMS log directory 26 | # 27 | # export KMS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # KMS temporary directory 30 | # 31 | # export KMS_TEMP=${HADOOP_HOME}/temp 32 | 33 | # The HTTP port used by KMS 34 | # 35 | # export KMS_HTTP_PORT=9600 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export KMS_MAX_THREADS=1000 40 | 41 | # The maximum size of HTTP header 42 | # 43 | # export KMS_MAX_HTTP_HEADER_SIZE=65536 44 | 45 | # Whether SSL is enabled 46 | # 47 | # export KMS_SSL_ENABLED=false 48 | 49 | # The location of the SSL keystore if using SSL 50 | # 51 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 52 | 53 | # The password of the SSL keystore if using SSL 54 | # 55 | # export KMS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-for-nm/kms-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-for-nm/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-for-nm/workers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-for-nm/yarn-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | yarn.nodemanager.aux-services 4 | mapreduce_shuffle 5 | 6 | 7 | yarn.nodemanager.resource.memory-mb 8 | 4096 9 | 10 | 11 | yarn.nodemanager.vmem-check-enabled 12 | false 13 | 14 | 15 | yarn.nodemanager.resource.cpu-vcores 16 | 4 17 | 18 | 19 | yarn.resourcemanager.scheduler.address 20 | bd-master-1:8099 21 | 22 | 23 | 24 | yarn.nodemanager.amrmproxy.enabled 25 | true 26 | 27 | 28 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-ha/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-ha/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | fs.defaultFS 4 | hdfs://bd-master-1:9000 5 | 6 | 7 | hadoop.tmp.dir 8 | /opt/bigdata/data/hadoop/tmp 9 | 10 | 11 | hadoop.proxyuser.root.hosts 12 | * 13 | 14 | 15 | hadoop.proxyuser.root.groups 16 | * 17 | 18 | 19 | hadoop.proxyuser.qiwei.hosts 20 | * 21 | 22 | 23 | hadoop.proxyuser.qiwei.groups 24 | * 25 | 26 | 27 | hadoop.proxyuser.tw.hosts 28 | * 29 | 30 | 31 | hadoop.proxyuser.tw.groups 32 | * 33 | 34 | 35 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-ha/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | dfs.name.dir 4 | /opt/bigdata/data/hadoop/hdfs/name 5 | 6 | 7 | 8 | dfs.data.dir 9 | /opt/bigdata/data/hadoop/hdfs/data 10 | 11 | 12 | 13 | dfs.replication 14 | 1 15 | 16 | 17 | dfs.http.address 18 | 0.0.0.0:50070 19 | 20 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-ha/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # HTTPFS config directory 22 | # 23 | # export HTTPFS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # HTTPFS log directory 26 | # 27 | # export HTTPFS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # HTTPFS temporary directory 30 | # 31 | # export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp 32 | 33 | # The HTTP port used by HTTPFS 34 | # 35 | # export HTTPFS_HTTP_PORT=14000 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export HTTPFS_MAX_THREADS=1000 40 | 41 | # The hostname HttpFS server runs on 42 | # 43 | # export HTTPFS_HTTP_HOSTNAME=$(hostname -f) 44 | 45 | # The maximum size of HTTP header 46 | # 47 | # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536 48 | 49 | # Whether SSL is enabled 50 | # 51 | # export HTTPFS_SSL_ENABLED=false 52 | 53 | # The location of the SSL keystore if using SSL 54 | # 55 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 56 | 57 | # The password of the SSL keystore if using SSL 58 | # 59 | # export HTTPFS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-ha/httpfs-log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. See accompanying LICENSE file. 13 | # 14 | 15 | # If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time 16 | # Setup sets its value to '${httpfs.home}/logs' 17 | 18 | log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender 19 | log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd 20 | log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log 21 | log4j.appender.httpfs.Append=true 22 | log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 24 | 25 | log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd 27 | log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log 28 | log4j.appender.httpfsaudit.Append=true 29 | log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout 30 | log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 31 | 32 | log4j.logger.httpfsaudit=INFO, httpfsaudit 33 | 34 | log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs 35 | log4j.logger.org.apache.hadoop.lib=INFO, httpfs 36 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-ha/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-ha/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-ha/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # KMS config directory 22 | # 23 | # export KMS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # KMS log directory 26 | # 27 | # export KMS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # KMS temporary directory 30 | # 31 | # export KMS_TEMP=${HADOOP_HOME}/temp 32 | 33 | # The HTTP port used by KMS 34 | # 35 | # export KMS_HTTP_PORT=9600 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export KMS_MAX_THREADS=1000 40 | 41 | # The maximum size of HTTP header 42 | # 43 | # export KMS_MAX_HTTP_HEADER_SIZE=65536 44 | 45 | # Whether SSL is enabled 46 | # 47 | # export KMS_SSL_ENABLED=false 48 | 49 | # The location of the SSL keystore if using SSL 50 | # 51 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 52 | 53 | # The password of the SSL keystore if using SSL 54 | # 55 | # export KMS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-ha/kms-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-ha/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-ha/workers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-ha/yarn-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | yarn.nodemanager.aux-services 4 | mapreduce_shuffle 5 | 6 | 7 | yarn.nodemanager.resource.memory-mb 8 | 8192 9 | 10 | 11 | yarn.nodemanager.vmem-check-enabled 12 | false 13 | 14 | 15 | yarn.nodemanager.resource.cpu-vcores 16 | 4 17 | 18 | 19 | 20 | yarn.resourcemanager.ha.enabled 21 | true 22 | 23 | 24 | yarn.resourcemanager.cluster-id 25 | cluster1 26 | 27 | 28 | yarn.resourcemanager.ha.rm-ids 29 | rm1,rm2 30 | 31 | 32 | yarn.resourcemanager.hostname.rm1 33 | bd-master-1 34 | 35 | 36 | yarn.resourcemanager.hostname.rm2 37 | hadoop-master2 38 | 39 | 40 | yarn.resourcemanager.webapp.address.rm1 41 | bd-master-1:8088 42 | 43 | 44 | yarn.resourcemanager.webapp.address.rm2 45 | hadoop-master2:8088 46 | 47 | 48 | hadoop.zk.address 49 | bd-master-1:2181 50 | 51 | 52 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-router/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-router/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | fs.defaultFS 4 | hdfs://bd-master-1:9000 5 | 6 | 7 | hadoop.tmp.dir 8 | /opt/bigdata/data/hadoop/tmp 9 | 10 | 11 | hadoop.proxyuser.root.hosts 12 | * 13 | 14 | 15 | hadoop.proxyuser.root.groups 16 | * 17 | 18 | 19 | hadoop.proxyuser.qiwei.hosts 20 | * 21 | 22 | 23 | hadoop.proxyuser.qiwei.groups 24 | * 25 | 26 | 27 | hadoop.proxyuser.tw.hosts 28 | * 29 | 30 | 31 | hadoop.proxyuser.tw.groups 32 | * 33 | 34 | 35 | hadoop.proxyuser.hdp.hosts 36 | * 37 | 38 | 39 | hadoop.proxyuser.hdp.groups 40 | * 41 | 42 | 43 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-router/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | dfs.name.dir 4 | /opt/bigdata/data/hadoop/hdfs/name 5 | 6 | 7 | 8 | dfs.data.dir 9 | /opt/bigdata/data/hadoop/hdfs/data 10 | 11 | 12 | 13 | dfs.replication 14 | 1 15 | 16 | 17 | dfs.http.address 18 | 0.0.0.0:50070 19 | 20 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-router/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # HTTPFS config directory 22 | # 23 | # export HTTPFS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # HTTPFS log directory 26 | # 27 | # export HTTPFS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # HTTPFS temporary directory 30 | # 31 | # export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp 32 | 33 | # The HTTP port used by HTTPFS 34 | # 35 | # export HTTPFS_HTTP_PORT=14000 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export HTTPFS_MAX_THREADS=1000 40 | 41 | # The hostname HttpFS server runs on 42 | # 43 | # export HTTPFS_HTTP_HOSTNAME=$(hostname -f) 44 | 45 | # The maximum size of HTTP header 46 | # 47 | # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536 48 | 49 | # Whether SSL is enabled 50 | # 51 | # export HTTPFS_SSL_ENABLED=false 52 | 53 | # The location of the SSL keystore if using SSL 54 | # 55 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 56 | 57 | # The password of the SSL keystore if using SSL 58 | # 59 | # export HTTPFS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-router/httpfs-log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. See accompanying LICENSE file. 13 | # 14 | 15 | # If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time 16 | # Setup sets its value to '${httpfs.home}/logs' 17 | 18 | log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender 19 | log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd 20 | log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log 21 | log4j.appender.httpfs.Append=true 22 | log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 24 | 25 | log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd 27 | log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log 28 | log4j.appender.httpfsaudit.Append=true 29 | log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout 30 | log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 31 | 32 | log4j.logger.httpfsaudit=INFO, httpfsaudit 33 | 34 | log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs 35 | log4j.logger.org.apache.hadoop.lib=INFO, httpfs 36 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-router/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-router/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-router/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # KMS config directory 22 | # 23 | # export KMS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # KMS log directory 26 | # 27 | # export KMS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # KMS temporary directory 30 | # 31 | # export KMS_TEMP=${HADOOP_HOME}/temp 32 | 33 | # The HTTP port used by KMS 34 | # 35 | # export KMS_HTTP_PORT=9600 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export KMS_MAX_THREADS=1000 40 | 41 | # The maximum size of HTTP header 42 | # 43 | # export KMS_MAX_HTTP_HEADER_SIZE=65536 44 | 45 | # Whether SSL is enabled 46 | # 47 | # export KMS_SSL_ENABLED=false 48 | 49 | # The location of the SSL keystore if using SSL 50 | # 51 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 52 | 53 | # The password of the SSL keystore if using SSL 54 | # 55 | # export KMS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-router/kms-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-router/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-router/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | mapreduce.framework.name 4 | yarn 5 | 6 | 7 | yarn.app.mapreduce.am.env 8 | HADOOP_MAPRED_HOME=$HADOOP_HOME 9 | 10 | 11 | mapreduce.map.env 12 | HADOOP_MAPRED_HOME=$HADOOP_HOME 13 | 14 | 15 | mapreduce.reduce.env 16 | HADOOP_MAPRED_HOME=$HADOOP_HOME 17 | 18 | 19 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-router/workers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | fs.defaultFS 4 | hdfs://bd-master-1:9000 5 | 6 | 7 | hadoop.tmp.dir 8 | /opt/bigdata/data/hadoop/tmp 9 | 10 | 11 | hadoop.proxyuser.root.hosts 12 | * 13 | 14 | 15 | hadoop.proxyuser.root.groups 16 | * 17 | 18 | 19 | hadoop.proxyuser.qiwei.hosts 20 | * 21 | 22 | 23 | hadoop.proxyuser.qiwei.groups 24 | * 25 | 26 | 27 | hadoop.proxyuser.tw.hosts 28 | * 29 | 30 | 31 | hadoop.proxyuser.tw.groups 32 | * 33 | 34 | 35 | 36 | hadoop.security.authentication 37 | kerberos 38 | 39 | 40 | hadoop.security.authorization 41 | true 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | dfs.name.dir 4 | /opt/bigdata/data/hadoop/hdfs/name 5 | 6 | 7 | dfs.data.dir 8 | /opt/bigdata/data/hadoop/hdfs/data 9 | 10 | 11 | dfs.replication 12 | 1 13 | 14 | 15 | dfs.http.address 16 | 0.0.0.0:50070 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # HTTPFS config directory 22 | # 23 | # export HTTPFS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # HTTPFS log directory 26 | # 27 | # export HTTPFS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # HTTPFS temporary directory 30 | # 31 | # export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp 32 | 33 | # The HTTP port used by HTTPFS 34 | # 35 | # export HTTPFS_HTTP_PORT=14000 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export HTTPFS_MAX_THREADS=1000 40 | 41 | # The hostname HttpFS server runs on 42 | # 43 | # export HTTPFS_HTTP_HOSTNAME=$(hostname -f) 44 | 45 | # The maximum size of HTTP header 46 | # 47 | # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536 48 | 49 | # Whether SSL is enabled 50 | # 51 | # export HTTPFS_SSL_ENABLED=false 52 | 53 | # The location of the SSL keystore if using SSL 54 | # 55 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 56 | 57 | # The password of the SSL keystore if using SSL 58 | # 59 | # export HTTPFS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/httpfs-log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. See accompanying LICENSE file. 13 | # 14 | 15 | # If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time 16 | # Setup sets its value to '${httpfs.home}/logs' 17 | 18 | log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender 19 | log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd 20 | log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log 21 | log4j.appender.httpfs.Append=true 22 | log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 24 | 25 | log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd 27 | log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log 28 | log4j.appender.httpfsaudit.Append=true 29 | log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout 30 | log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 31 | 32 | log4j.logger.httpfsaudit=INFO, httpfsaudit 33 | 34 | log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs 35 | log4j.logger.org.apache.hadoop.lib=INFO, httpfs 36 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/keystore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iceqiw/EasyBigdata/fa822718a321d56d4f3000153ee2d793f7c6ba85/components/hadoop/etc/hadoop-security-cli/keystore.jks -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # KMS config directory 22 | # 23 | # export KMS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # KMS log directory 26 | # 27 | # export KMS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # KMS temporary directory 30 | # 31 | # export KMS_TEMP=${HADOOP_HOME}/temp 32 | 33 | # The HTTP port used by KMS 34 | # 35 | # export KMS_HTTP_PORT=9600 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export KMS_MAX_THREADS=1000 40 | 41 | # The maximum size of HTTP header 42 | # 43 | # export KMS_MAX_HTTP_HEADER_SIZE=65536 44 | 45 | # Whether SSL is enabled 46 | # 47 | # export KMS_SSL_ENABLED=false 48 | 49 | # The location of the SSL keystore if using SSL 50 | # 51 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 52 | 53 | # The password of the SSL keystore if using SSL 54 | # 55 | # export KMS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/kms-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/krb5.conf: -------------------------------------------------------------------------------- 1 | # Configuration snippets may be placed in this directory as well 2 | 3 | [logging] 4 | default = FILE:/var/log/krb5libs.log 5 | kdc = FILE:/var/log/krb5kdc.log 6 | admin_server = FILE:/var/log/kadmind.log 7 | 8 | [libdefaults] 9 | dns_lookup_kdc = false 10 | dns_lookup_realm = false 11 | ticket_lifetime = 24h 12 | renew_lifetime = 7d 13 | forwardable = true 14 | default_realm = HADOOP.COM 15 | udp_preference_limit = 1 16 | [realms] 17 | HADOOP.COM = { 18 | kdc = kdc:188 19 | admin_server = kdc:188 20 | } 21 | 22 | [domain_realm] 23 | .hadoop.com = HADOOP.COM 24 | hadoop.com = HADOOP.COM 25 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iceqiw/EasyBigdata/fa822718a321d56d4f3000153ee2d793f7c6ba85/components/hadoop/etc/hadoop-security-cli/truststore.jks -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security-cli/workers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # HTTPFS config directory 22 | # 23 | # export HTTPFS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # HTTPFS log directory 26 | # 27 | # export HTTPFS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # HTTPFS temporary directory 30 | # 31 | # export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp 32 | 33 | # The HTTP port used by HTTPFS 34 | # 35 | # export HTTPFS_HTTP_PORT=14000 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export HTTPFS_MAX_THREADS=1000 40 | 41 | # The hostname HttpFS server runs on 42 | # 43 | # export HTTPFS_HTTP_HOSTNAME=$(hostname -f) 44 | 45 | # The maximum size of HTTP header 46 | # 47 | # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536 48 | 49 | # Whether SSL is enabled 50 | # 51 | # export HTTPFS_SSL_ENABLED=false 52 | 53 | # The location of the SSL keystore if using SSL 54 | # 55 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 56 | 57 | # The password of the SSL keystore if using SSL 58 | # 59 | # export HTTPFS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/httpfs-log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. See accompanying LICENSE file. 13 | # 14 | 15 | # If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time 16 | # Setup sets its value to '${httpfs.home}/logs' 17 | 18 | log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender 19 | log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd 20 | log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log 21 | log4j.appender.httpfs.Append=true 22 | log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 24 | 25 | log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd 27 | log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log 28 | log4j.appender.httpfsaudit.Append=true 29 | log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout 30 | log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 31 | 32 | log4j.logger.httpfsaudit=INFO, httpfsaudit 33 | 34 | log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs 35 | log4j.logger.org.apache.hadoop.lib=INFO, httpfs 36 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/keystore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iceqiw/EasyBigdata/fa822718a321d56d4f3000153ee2d793f7c6ba85/components/hadoop/etc/hadoop-security/keystore.jks -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # KMS config directory 22 | # 23 | # export KMS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # KMS log directory 26 | # 27 | # export KMS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # KMS temporary directory 30 | # 31 | # export KMS_TEMP=${HADOOP_HOME}/temp 32 | 33 | # The HTTP port used by KMS 34 | # 35 | # export KMS_HTTP_PORT=9600 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export KMS_MAX_THREADS=1000 40 | 41 | # The maximum size of HTTP header 42 | # 43 | # export KMS_MAX_HTTP_HEADER_SIZE=65536 44 | 45 | # Whether SSL is enabled 46 | # 47 | # export KMS_SSL_ENABLED=false 48 | 49 | # The location of the SSL keystore if using SSL 50 | # 51 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 52 | 53 | # The password of the SSL keystore if using SSL 54 | # 55 | # export KMS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/kms-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/krb5.conf: -------------------------------------------------------------------------------- 1 | # Configuration snippets may be placed in this directory as well 2 | 3 | [logging] 4 | default = FILE:/var/log/krb5libs.log 5 | kdc = FILE:/var/log/krb5kdc.log 6 | admin_server = FILE:/var/log/kadmind.log 7 | 8 | [libdefaults] 9 | dns_lookup_kdc = false 10 | dns_lookup_realm = false 11 | ticket_lifetime = 24h 12 | renew_lifetime = 7d 13 | forwardable = true 14 | default_realm = HADOOP.COM 15 | udp_preference_limit = 1 16 | [realms] 17 | HADOOP.COM = { 18 | kdc = kdc:188 19 | admin_server = kdc:188 20 | } 21 | 22 | [domain_realm] 23 | .hadoop.com = HADOOP.COM 24 | hadoop.com = HADOOP.COM 25 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA 20 | 21 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | mapreduce.framework.name 4 | yarn 5 | 6 | 7 | yarn.app.mapreduce.am.env 8 | HADOOP_MAPRED_HOME=$HADOOP_HOME 9 | 10 | 11 | mapreduce.map.env 12 | HADOOP_MAPRED_HOME=$HADOOP_HOME 13 | 14 | 15 | mapreduce.reduce.env 16 | HADOOP_MAPRED_HOME=$HADOOP_HOME 17 | 18 | 19 | mapreduce.jobhistory.keytab 20 | /opt/bigdata/hd.keytab 21 | 22 | 23 | mapreduce.jobhistory.principal 24 | hd/hadoop@HADOOP.COM 25 | 26 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iceqiw/EasyBigdata/fa822718a321d56d4f3000153ee2d793f7c6ba85/components/hadoop/etc/hadoop-security/truststore.jks -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/workers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/hadoop/etc/hadoop-security/yarn-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | yarn.resourcemanager.hostname 4 | bd-master-1 5 | 6 | 7 | yarn.nodemanager.aux-services 8 | mapreduce_shuffle 9 | 10 | 11 | yarn.nodemanager.resource.memory-mb 12 | 4096 13 | 14 | 15 | yarn.nodemanager.vmem-check-enabled 16 | false 17 | 18 | 19 | yarn.nodemanager.resource.cpu-vcores 20 | 2 21 | 22 | 23 | 24 | yarn.resourcemanager.keytab 25 | /opt/bigdata/hd.keytab 26 | 27 | 28 | yarn.resourcemanager.principal 29 | hd/hadoop@HADOOP.COM 30 | 31 | 32 | yarn.nodemanager.keytab 33 | /opt/bigdata/hd.keytab 34 | 35 | 36 | yarn.nodemanager.principal 37 | hd/hadoop@HADOOP.COM 38 | 39 | -------------------------------------------------------------------------------- /components/hadoop/script/dn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | function start(){ 3 | echo "start dn" 4 | hdfs --daemon start datanode 5 | echo "start nm" 6 | yarn --daemon start nodemanager 7 | } 8 | 9 | start 10 | sleep infinity 11 | -------------------------------------------------------------------------------- /components/hadoop/script/nn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | function start(){ 3 | echo "format" 4 | hdfs namenode -format 5 | echo "start nn" 6 | hdfs --daemon start namenode 7 | echo "start rm" 8 | yarn --daemon start resourcemanager 9 | echo "start proxy" 10 | yarn --daemon start proxyserver 11 | } 12 | 13 | 14 | start 15 | sleep infinity 16 | -------------------------------------------------------------------------------- /components/hbase/.env: -------------------------------------------------------------------------------- 1 | BIGDATA_HOME=/opt/bigdata 2 | BIGDATA_CONF=$BIGDATA_HOME/etc 3 | BIGDATA_LOG=$BIGDATA_HOME/logs 4 | HBASE_HOME=$BIGDATA_HOME/hbase 5 | HBASE_CONF_DIR=$BIGDATA_CONF/hbase 6 | HBASE_LOG_DIR=$BIGDATA_LOG/hbase 7 | 8 | PATH=$PATH:$HBASE_HOME/bin 9 | #HBASE_MANAGES_ZK=true 10 | -------------------------------------------------------------------------------- /components/hbase/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | _base: 3 | image: base-data:1.0 4 | volumes: 5 | - ${INSTALL}/hbase:/opt/bigdata/hbase 6 | - ./etc/hbase:/opt/bigdata/etc/hbase 7 | - ./script:/opt/bigdata/script 8 | hmaster: 9 | extends: 10 | service: _base 11 | working_dir: /opt/bigdata 12 | hostname: hmaster 13 | networks: 14 | - cluster_net 15 | env_file: 16 | - .env 17 | networks: 18 | cluster_net: 19 | external: true 20 | -------------------------------------------------------------------------------- /components/hbase/etc/hbase/hbase-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 23 | 24 | 25 | hbase.rootdir 26 | hdfs://master01:9000/hbase 27 | 28 | 29 | hbase.cluster.distributed 30 | true 31 | 32 | 33 | hbase.zookeeper.quorum 34 | zk01:2181 35 | 36 | 37 | hbase.unsafe.stream.capability.enforce 38 | false 39 | 40 | 41 | -------------------------------------------------------------------------------- /components/hbase/etc/hbase/log4j-hbtop.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | log4j.rootLogger=WARN,console 18 | log4j.threshold=WARN 19 | 20 | # console 21 | log4j.appender.console=org.apache.log4j.ConsoleAppender 22 | log4j.appender.console.target=System.err 23 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 24 | log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n 25 | 26 | # ZooKeeper will still put stuff at WARN 27 | log4j.logger.org.apache.zookeeper=ERROR 28 | -------------------------------------------------------------------------------- /components/hbase/etc/hbase/regionservers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/hbase/script/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | hbase-daemon.sh start master 3 | hbase-daemon.sh start regionserver 4 | -------------------------------------------------------------------------------- /components/hive/.env: -------------------------------------------------------------------------------- 1 | BIGDATA_HOME=/opt/bigdata 2 | BIGDATA_CONF=$BIGDATA_HOME/etc 3 | BIGDATA_LOG=$BIGDATA_HOME/logs 4 | 5 | HADOOP_HOME=$BIGDATA_HOME/hadoop 6 | HADOOP_CONF_DIR=$BIGDATA_CONF/hadoop 7 | HADOOP_LOG_DIR=$BIGDATA_LOG/hadoop 8 | YARN_CONF_DIR=$BIGDATA_CONF/hadoop 9 | YARN_LOG_DIR=$BIGDATA_LOG/hadoop 10 | 11 | HIVE_HOME=$BIGDATA_HOME/hive 12 | HIVE_CONF_DIR=$BIGDATA_CONF/hive 13 | 14 | PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HIVE_HOME/bin:$HIVE_HOME/hcatalog/sbin:$HIVE_HOME/hcatalog/bin 15 | -------------------------------------------------------------------------------- /components/hive/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | _base: 3 | image: base-data:1.0 4 | volumes: 5 | - ${INSTALL}/hive:/opt/bigdata/hive 6 | - ${INSTALL}/hadoop:/opt/bigdata/hadoop 7 | - ./etc/hive-default:/opt/bigdata/etc/hive 8 | - ./etc/hadoop-cli:/opt/bigdata/etc/hadoop 9 | - ./script:/opt/bigdata/script 10 | hive01: 11 | extends: 12 | service: _base 13 | working_dir: /opt/bigdata 14 | hostname: hive01 15 | networks: 16 | - cluster_net 17 | env_file: 18 | - .env 19 | networks: 20 | cluster_net: 21 | external: true 22 | -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | fs.defaultFS 4 | hdfs://master01:9000 5 | 6 | 7 | hadoop.tmp.dir 8 | /opt/bigdata/data/hadoop/tmp 9 | 10 | 11 | hadoop.proxyuser.ice.hosts 12 | * 13 | 14 | 15 | hadoop.proxyuser.ice.groups 16 | * 17 | 18 | 19 | -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | dfs.name.dir 4 | /opt/bigdata/data/hadoop/hdfs/name 5 | 6 | 7 | 8 | dfs.data.dir 9 | /opt/bigdata/data/hadoop/hdfs/data 10 | 11 | 12 | 13 | dfs.replication 14 | 1 15 | 16 | 17 | dfs.http.address 18 | 0.0.0.0:50070 19 | 20 | -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # HTTPFS config directory 22 | # 23 | # export HTTPFS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # HTTPFS log directory 26 | # 27 | # export HTTPFS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # HTTPFS temporary directory 30 | # 31 | # export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp 32 | 33 | # The HTTP port used by HTTPFS 34 | # 35 | # export HTTPFS_HTTP_PORT=14000 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export HTTPFS_MAX_THREADS=1000 40 | 41 | # The hostname HttpFS server runs on 42 | # 43 | # export HTTPFS_HTTP_HOSTNAME=$(hostname -f) 44 | 45 | # The maximum size of HTTP header 46 | # 47 | # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536 48 | 49 | # Whether SSL is enabled 50 | # 51 | # export HTTPFS_SSL_ENABLED=false 52 | 53 | # The location of the SSL keystore if using SSL 54 | # 55 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 56 | 57 | # The password of the SSL keystore if using SSL 58 | # 59 | # export HTTPFS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/httpfs-log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. See accompanying LICENSE file. 13 | # 14 | 15 | # If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time 16 | # Setup sets its value to '${httpfs.home}/logs' 17 | 18 | log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender 19 | log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd 20 | log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log 21 | log4j.appender.httpfs.Append=true 22 | log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 24 | 25 | log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd 27 | log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log 28 | log4j.appender.httpfsaudit.Append=true 29 | log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout 30 | log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 31 | 32 | log4j.logger.httpfsaudit=INFO, httpfsaudit 33 | 34 | log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs 35 | log4j.logger.org.apache.hadoop.lib=INFO, httpfs 36 | -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # KMS config directory 22 | # 23 | # export KMS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # KMS log directory 26 | # 27 | # export KMS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # KMS temporary directory 30 | # 31 | # export KMS_TEMP=${HADOOP_HOME}/temp 32 | 33 | # The HTTP port used by KMS 34 | # 35 | # export KMS_HTTP_PORT=9600 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export KMS_MAX_THREADS=1000 40 | 41 | # The maximum size of HTTP header 42 | # 43 | # export KMS_MAX_HTTP_HEADER_SIZE=65536 44 | 45 | # Whether SSL is enabled 46 | # 47 | # export KMS_SSL_ENABLED=false 48 | 49 | # The location of the SSL keystore if using SSL 50 | # 51 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 52 | 53 | # The password of the SSL keystore if using SSL 54 | # 55 | # export KMS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/kms-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA 20 | 21 | -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | mapreduce.framework.name 4 | yarn 5 | 6 | 7 | yarn.app.mapreduce.am.env 8 | HADOOP_MAPRED_HOME=$HADOOP_HOME 9 | 10 | 11 | mapreduce.map.env 12 | HADOOP_MAPRED_HOME=$HADOOP_HOME 13 | 14 | 15 | mapreduce.reduce.env 16 | HADOOP_MAPRED_HOME=$HADOOP_HOME 17 | 18 | 19 | -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/workers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/hive/etc/hadoop-cli/yarn-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | yarn.resourcemanager.hostname 4 | master01 5 | 6 | 7 | yarn.nodemanager.aux-services 8 | mapreduce_shuffle 9 | 10 | 11 | yarn.nodemanager.resource.memory-mb 12 | 2048 13 | 14 | 15 | yarn.nodemanager.vmem-check-enabled 16 | false 17 | 18 | 19 | yarn.nodemanager.resource.cpu-vcores 20 | 1 21 | 22 | 23 | yarn.resourcemanager.scheduler.address 24 | master01:8099 25 | 26 | 27 | -------------------------------------------------------------------------------- /components/hive/etc/hive-default/beeline-log4j.properties.template: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | log4j.rootLogger=WARN, console 18 | 19 | ######## console appender ######## 20 | log4j.appender.console=org.apache.log4j.ConsoleAppender 21 | log4j.appender.console.target=System.err 22 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n 24 | log4j.appender.console.encoding=UTF-8 25 | -------------------------------------------------------------------------------- /components/hive/etc/hive-default/hive-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | hive.metastore.uris 6 | thrift://hive01:9083 7 | 8 | 9 | hive.metastore.warehouse.dir 10 | /user/hive/warehouse 11 | 12 | 13 | -------------------------------------------------------------------------------- /components/hive/etc/hive-default/ivysettings.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /components/hive/script/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | hadoop fs -mkdir /tmp 3 | hadoop fs -mkdir /user/hive/warehouse 4 | hadoop fs -chmod g+w /tmp 5 | hadoop fs -chmod g+w /user/hive/warehouse 6 | 7 | schematool -dbType derby -initSchema 8 | 9 | hiveserver2 & 10 | 11 | hcat_server.sh & 12 | -------------------------------------------------------------------------------- /components/kafka/.env: -------------------------------------------------------------------------------- 1 | BIGDATA_HOME=/opt/bigdata 2 | BIGDATA_CONF=$BIGDATA_HOME/etc 3 | BIGDATA_LOG=$BIGDATA_HOME/logs 4 | 5 | KAFKA_HOME=$BIGDATA_HOME/kafka 6 | LOG_DIR=$BIGDATA_LOG/kafka 7 | PATH=$KAFKA_HOME/bin:$PATH 8 | -------------------------------------------------------------------------------- /components/kafka/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iceqiw/EasyBigdata/fa822718a321d56d4f3000153ee2d793f7c6ba85/components/kafka/README.md -------------------------------------------------------------------------------- /components/kafka/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | _base: 3 | image: base-data:1.0 4 | volumes: 5 | - ${INSTALL}/kafka:/opt/bigdata/kafka 6 | - ./etc/kafka:/opt/bigdata/etc/kafka 7 | - ./script:/opt/bigdata/script 8 | broker01: 9 | extends: 10 | service: _base 11 | working_dir: /opt/bigdata 12 | hostname: broker01 13 | networks: 14 | - cluster_net 15 | env_file: 16 | - .env 17 | ports: 18 | - "9092:9092" 19 | ui: 20 | image: provectuslabs/kafka-ui:latest 21 | ports: 22 | - 8080:8080 23 | environment: 24 | DYNAMIC_CONFIG_ENABLED: 'true' 25 | networks: 26 | - cluster_net 27 | networks: 28 | cluster_net: 29 | external: true 30 | -------------------------------------------------------------------------------- /components/kafka/etc/kafka/config.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iceqiw/EasyBigdata/fa822718a321d56d4f3000153ee2d793f7c6ba85/components/kafka/etc/kafka/config.yml -------------------------------------------------------------------------------- /components/kafka/etc/kafka/server.properties: -------------------------------------------------------------------------------- 1 | broker.id=1 2 | log.dirs=/opt/bigdata/logs/kafka 3 | zookeeper.connect=zk01:2181 4 | offsets.topic.replication.factor=1 5 | -------------------------------------------------------------------------------- /components/kafka/script/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #mkdir -p $BIGDATA_LOG/kafka 3 | kafka-server-start.sh $BIGDATA_CONF/kafka/server.properties 4 | -------------------------------------------------------------------------------- /components/spark/.env: -------------------------------------------------------------------------------- 1 | BIGDATA_HOME=/opt/bigdata 2 | BIGDATA_CONF=$BIGDATA_HOME/etc 3 | BIGDATA_LOG=$BIGDATA_HOME/logs 4 | HADOOP_HOME=$BIGDATA_HOME/hadoop 5 | 6 | HADOOP_CONF_DIR=$BIGDATA_CONF/hadoop 7 | YARN_CONF_DIR=$BIGDATA_CONF/hadoop 8 | 9 | SPARK_HOME=$BIGDATA_HOME/spark 10 | SPARK_CONF_DIR=$BIGDATA_CONF/spark 11 | PATH=$HADOOP_HOME/bin:$SPARK_HOME/bin:$PATH 12 | -------------------------------------------------------------------------------- /components/spark/README.md: -------------------------------------------------------------------------------- 1 | # How to apply job 2 | ``` 3 | . 4 | ├── docker-compose.yml 5 | ├── etc 6 | ├── README.md 7 | └── script 8 | ``` 9 | ## export HADOOP_CLASSPATH 10 | ``` shell 11 | export HADOOP_CLASSPATH=`hadoop classpath` 12 | ``` 13 | 14 | ## apply job 15 | ``` shell 16 | flink run-application -t yarn-application ./flink/examples/streaming/TopSpeedWindowing.jar 17 | ``` 18 | 19 | 20 | ## Start YARN Session 21 | ``` shell 22 | yarn-session.sh --detached 23 | ``` 24 | 25 | -------------------------------------------------------------------------------- /components/spark/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | _base: 3 | image: base-data:1.0 4 | volumes: 5 | - ${INSTALL}/hadoop:/opt/bigdata/hadoop 6 | - ${INSTALL}/spark:/opt/bigdata/spark 7 | - ./etc/hadoop-cli:/opt/bigdata/etc/hadoop 8 | - ./etc/spark-default:/opt/bigdata/etc/spark 9 | - ./script:/opt/bigdata/script 10 | spark01: 11 | extends: 12 | service: _base 13 | working_dir: /opt/bigdata 14 | hostname: spark01 15 | networks: 16 | - cluster_net 17 | env_file: 18 | - .env 19 | networks: 20 | cluster_net: 21 | external: true 22 | -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | fs.defaultFS 4 | hdfs://master01:9000 5 | 6 | 7 | hadoop.tmp.dir 8 | /opt/bigdata/data/hadoop/tmp 9 | 10 | 11 | hadoop.proxyuser.ice.hosts 12 | * 13 | 14 | 15 | hadoop.proxyuser.ice.groups 16 | * 17 | 18 | 19 | -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | dfs.name.dir 4 | /opt/bigdata/data/hadoop/hdfs/name 5 | 6 | 7 | 8 | dfs.data.dir 9 | /opt/bigdata/data/hadoop/hdfs/data 10 | 11 | 12 | 13 | dfs.replication 14 | 1 15 | 16 | 17 | dfs.http.address 18 | 0.0.0.0:50070 19 | 20 | -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # HTTPFS config directory 22 | # 23 | # export HTTPFS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # HTTPFS log directory 26 | # 27 | # export HTTPFS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # HTTPFS temporary directory 30 | # 31 | # export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp 32 | 33 | # The HTTP port used by HTTPFS 34 | # 35 | # export HTTPFS_HTTP_PORT=14000 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export HTTPFS_MAX_THREADS=1000 40 | 41 | # The hostname HttpFS server runs on 42 | # 43 | # export HTTPFS_HTTP_HOSTNAME=$(hostname -f) 44 | 45 | # The maximum size of HTTP header 46 | # 47 | # export HTTPFS_MAX_HTTP_HEADER_SIZE=65536 48 | 49 | # Whether SSL is enabled 50 | # 51 | # export HTTPFS_SSL_ENABLED=false 52 | 53 | # The location of the SSL keystore if using SSL 54 | # 55 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 56 | 57 | # The password of the SSL keystore if using SSL 58 | # 59 | # export HTTPFS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/httpfs-log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. See accompanying LICENSE file. 13 | # 14 | 15 | # If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time 16 | # Setup sets its value to '${httpfs.home}/logs' 17 | 18 | log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender 19 | log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd 20 | log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log 21 | log4j.appender.httpfs.Append=true 22 | log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 24 | 25 | log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd 27 | log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log 28 | log4j.appender.httpfsaudit.Append=true 29 | log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout 30 | log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n 31 | 32 | log4j.logger.httpfsaudit=INFO, httpfsaudit 33 | 34 | log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs 35 | log4j.logger.org.apache.hadoop.lib=INFO, httpfs 36 | -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | # 18 | # hadoop-env.sh is read prior to this file. 19 | # 20 | 21 | # KMS config directory 22 | # 23 | # export KMS_CONFIG=${HADOOP_CONF_DIR} 24 | 25 | # KMS log directory 26 | # 27 | # export KMS_LOG=${HADOOP_LOG_DIR} 28 | 29 | # KMS temporary directory 30 | # 31 | # export KMS_TEMP=${HADOOP_HOME}/temp 32 | 33 | # The HTTP port used by KMS 34 | # 35 | # export KMS_HTTP_PORT=9600 36 | 37 | # The maximum number of HTTP handler threads 38 | # 39 | # export KMS_MAX_THREADS=1000 40 | 41 | # The maximum size of HTTP header 42 | # 43 | # export KMS_MAX_HTTP_HEADER_SIZE=65536 44 | 45 | # Whether SSL is enabled 46 | # 47 | # export KMS_SSL_ENABLED=false 48 | 49 | # The location of the SSL keystore if using SSL 50 | # 51 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 52 | 53 | # The password of the SSL keystore if using SSL 54 | # 55 | # export KMS_SSL_KEYSTORE_PASS=password -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/kms-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA 20 | 21 | -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | mapreduce.framework.name 4 | yarn 5 | 6 | 7 | yarn.app.mapreduce.am.env 8 | HADOOP_MAPRED_HOME=$HADOOP_HOME 9 | 10 | 11 | mapreduce.map.env 12 | HADOOP_MAPRED_HOME=$HADOOP_HOME 13 | 14 | 15 | mapreduce.reduce.env 16 | HADOOP_MAPRED_HOME=$HADOOP_HOME 17 | 18 | 19 | -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/workers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /components/spark/etc/hadoop-cli/yarn-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | yarn.resourcemanager.hostname 4 | master01 5 | 6 | 7 | yarn.nodemanager.aux-services 8 | mapreduce_shuffle 9 | 10 | 11 | yarn.nodemanager.resource.memory-mb 12 | 2048 13 | 14 | 15 | yarn.nodemanager.vmem-check-enabled 16 | false 17 | 18 | 19 | yarn.nodemanager.resource.cpu-vcores 20 | 1 21 | 22 | 23 | yarn.resourcemanager.scheduler.address 24 | master01:8099 25 | 26 | 27 | -------------------------------------------------------------------------------- /components/spark/etc/spark-default/fairscheduler.xml.template: -------------------------------------------------------------------------------- 1 | 2 | 3 | 19 | 20 | 21 | 22 | FAIR 23 | 1 24 | 2 25 | 26 | 27 | FIFO 28 | 2 29 | 3 30 | 31 | 32 | -------------------------------------------------------------------------------- /components/spark/etc/spark-default/spark-defaults.conf.template: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # Default system properties included when running spark-submit. 19 | # This is useful for setting default environmental settings. 20 | 21 | # Example: 22 | # spark.master spark://master:7077 23 | # spark.eventLog.enabled true 24 | # spark.eventLog.dir hdfs://namenode:8021/directory 25 | # spark.serializer org.apache.spark.serializer.KryoSerializer 26 | # spark.driver.memory 5g 27 | # spark.executor.extraJavaOptions -XX:+PrintGCDetails -Dkey=value -Dnumbers="one two three" 28 | -------------------------------------------------------------------------------- /components/spark/etc/spark-default/workers.template: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # A Spark Worker will be started on each of the machines listed below. 19 | localhost -------------------------------------------------------------------------------- /components/spark/script/start.sh: -------------------------------------------------------------------------------- 1 | export HADOOP_CLASSPATH=`hadoop classpath` -------------------------------------------------------------------------------- /components/zookeeper/.env: -------------------------------------------------------------------------------- 1 | BIGDATA_HOME=/opt/bigdata 2 | BIGDATA_CONF=$BIGDATA_HOME/etc 3 | BIGDATA_LOG=$BIGDATA_HOME/logs 4 | ZK_HOME=$BIGDATA_HOME/zookeeper 5 | ZOOCFGDIR=$BIGDATA_CONF/zookeeper 6 | ZOO_LOG_DIR=$BIGDATA_LOG/zookeeper 7 | PATH=$ZK_HOME/bin:$PATH 8 | -------------------------------------------------------------------------------- /components/zookeeper/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iceqiw/EasyBigdata/fa822718a321d56d4f3000153ee2d793f7c6ba85/components/zookeeper/README.md -------------------------------------------------------------------------------- /components/zookeeper/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | _base: 3 | image: base-data:1.0 4 | volumes: 5 | - ${INSTALL}/zookeeper:/opt/bigdata/zookeeper 6 | - ./etc/default:/opt/bigdata/etc/zookeeper 7 | - ./script:/opt/bigdata/script 8 | zk01: 9 | extends: 10 | service: _base 11 | working_dir: /opt/bigdata 12 | hostname: zk01 13 | networks: 14 | - cluster_net 15 | ports: 16 | - "2181:2181" 17 | env_file: 18 | - .env 19 | command: ["bash","./script/start.sh"] 20 | networks: 21 | cluster_net: 22 | external: true 23 | -------------------------------------------------------------------------------- /components/zookeeper/etc/default/zoo.cfg: -------------------------------------------------------------------------------- 1 | ticketTime=2000 2 | clientPort=2181 3 | dataDir=/opt/bigdata/data/zookeeper 4 | initLimit=10 5 | syncLimit=5 6 | -------------------------------------------------------------------------------- /components/zookeeper/script/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | zkServer.sh start 3 | sleep infinity 4 | -------------------------------------------------------------------------------- /doc/HBase.md: -------------------------------------------------------------------------------- 1 | ## 部署 2 | 3 | 参考 Hadoop,kafka 部署,启动 hadoop,zk 4 | 5 | | bd-master-1 | hadoop-slave1 | hadoop-slave2 | 6 | | :------------------------: | :-----------: | :-----------: | 7 | | namenode,datanode | datanode | datanode | 8 | | resoursemanager,nodemanger | nodemanger | nodemanger | 9 | | HMaster,regionserver | regionserver | regionserver | 10 | | zk | zk | zk | 11 | 12 | ## 初始化 13 | 14 | ``` 15 | none 16 | ``` 17 | 18 | ## 启动 19 | 20 | ``` 21 | #1 start hmaster (master node) 22 | hbase-daemon.sh start master 23 | 24 | #2 start regionserver (all node) 25 | hbase-daemon.sh start regionserver 26 | 27 | hbase-daemon.sh start zookeeper 28 | ``` 29 | 30 | -------------------------------------------------------------------------------- /doc/Kafka.md: -------------------------------------------------------------------------------- 1 | ## 部署 2 | 3 | | bd-master-1 | hadoop-slave1 | hadoop-slave2 | 4 | | :-----------: | :-----------: | :-----------: | 5 | | zk1(myid=>1) | zk2(myid=>2) | zk3(myid=>3) | 6 | | kafka broker1 | kafka broker2 | kafka broker3 | 7 | 8 | ## 初始化 9 | 10 | ``` 11 | # 1.初始化目录 所有节点 12 | mkdir -p /opt/bigdata/data/zookeeper 13 | # 2.指定zk myid 14 | echo 1 > /opt/bigdata/data/zookeeper/myid 15 | echo 2 > /opt/bigdata/data/zookeeper/myid 16 | echo 3 > /opt/bigdata/data/zookeeper/myid 17 | ``` 18 | 19 | ## 启动 20 | 21 | ``` 22 | # 1 start zk 所有节点 23 | zkServer.sh start 24 | 25 | # 2 start kafka 26 | kafka-server-start.sh -daemon /opt/bigdata/etc/kafka/server-1.properties 27 | kafka-server-start.sh -daemon /opt/bigdata/etc/kafka/server-2.properties 28 | kafka-server-start.sh -daemon /opt/bigdata/etc/kafka/server-3.properties 29 | 30 | #3 start kafak manager 31 | nohup kafka-manager & 32 | ``` 33 | 34 | ## 备注 35 | 36 | kafka manager 没有单独配置文件,使用安装目录下的配置文件 37 | -------------------------------------------------------------------------------- /etc-example/alluxio-cli/alluxio-site.properties: -------------------------------------------------------------------------------- 1 | # 2 | # The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 3 | # (the "License"). You may not use this work except in compliance with the License, which is 4 | # available at www.apache.org/licenses/LICENSE-2.0 5 | # 6 | # This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 7 | # either express or implied, as more fully set forth in the License. 8 | # 9 | # See the NOTICE file distributed with this work for information regarding copyright ownership. 10 | # 11 | 12 | # Site specific configuration properties for Alluxio 13 | # Details about all configuration properties https://docs.alluxio.io/os/user/stable/en/reference/Properties-List.html 14 | 15 | alluxio.master.hostname=10.205.20.245 16 | #alluxio.master.mount.table.root.ufs=/opt/bigdata/alluxio/data 17 | 18 | #alluxio.worker.tieredstore.level0.dirs.path=/opt/bigdata/alluxio/dat 19 | alluxio.security.authentication.type=KERBEROS 20 | alluxio.security.authorization.permission.enabled=true 21 | -------------------------------------------------------------------------------- /etc-example/alluxio-cli/alluxio-site.properties.template: -------------------------------------------------------------------------------- 1 | # 2 | # The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 3 | # (the "License"). You may not use this work except in compliance with the License, which is 4 | # available at www.apache.org/licenses/LICENSE-2.0 5 | # 6 | # This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 7 | # either express or implied, as more fully set forth in the License. 8 | # 9 | # See the NOTICE file distributed with this work for information regarding copyright ownership. 10 | # 11 | 12 | # Site specific configuration properties for Alluxio 13 | # Details about all configuration properties https://docs.alluxio.io/os/user/stable/en/reference/Properties-List.html 14 | 15 | # Common properties 16 | # alluxio.master.hostname=localhost 17 | # alluxio.master.mount.table.root.ufs=${alluxio.work.dir}/underFSStorage 18 | 19 | # Security properties 20 | # alluxio.security.authorization.permission.enabled=true 21 | # alluxio.security.authentication.type=SIMPLE 22 | 23 | # Worker properties 24 | # alluxio.worker.memory.size=1GB 25 | # alluxio.worker.tieredstore.levels=1 26 | # alluxio.worker.tieredstore.level0.alias=MEM 27 | # alluxio.worker.tieredstore.level0.dirs.path=/mnt/ramdisk 28 | 29 | # User properties 30 | # alluxio.user.file.readtype.default=CACHE_PROMOTE 31 | # alluxio.user.file.writetype.default=MUST_CACHE 32 | -------------------------------------------------------------------------------- /etc-example/alluxio-cli/jaas.conf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iceqiw/EasyBigdata/fa822718a321d56d4f3000153ee2d793f7c6ba85/etc-example/alluxio-cli/jaas.conf -------------------------------------------------------------------------------- /etc-example/alluxio-cli/masters: -------------------------------------------------------------------------------- 1 | # 2 | # The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 3 | # (the "License"). You may not use this work except in compliance with the License, which is 4 | # available at www.apache.org/licenses/LICENSE-2.0 5 | # 6 | # This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 7 | # either express or implied, as more fully set forth in the License. 8 | # 9 | # See the NOTICE file distributed with this work for information regarding copyright ownership. 10 | # 11 | 12 | # An Alluxio master will be started on each of the machines listed below. 13 | # 14 | # In HA mode, Alluxio will use internal leader election or Zookeeper leader election 15 | # to decide which of the masters should act as the primary. In non-HA mode, the master started 16 | # on the host identified by the first entry in this file will act as the primary; the remaining hosts 17 | # will be used to start a secondary; which are responsible for journal compaction. 18 | # 19 | # The multi-master Zookeeper HA mode requires that all the masters can access 20 | # the same journal through a shared medium (e.g. HDFS or NFS). 21 | localhost 22 | -------------------------------------------------------------------------------- /etc-example/alluxio-cli/workers: -------------------------------------------------------------------------------- 1 | # 2 | # The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 3 | # (the "License"). You may not use this work except in compliance with the License, which is 4 | # available at www.apache.org/licenses/LICENSE-2.0 5 | # 6 | # This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 7 | # either express or implied, as more fully set forth in the License. 8 | # 9 | # See the NOTICE file distributed with this work for information regarding copyright ownership. 10 | # 11 | 12 | # An Alluxio Worker will be started on each of the machines listed below. 13 | bd-master-1 14 | -------------------------------------------------------------------------------- /etc-example/alluxio-security/alluxio-site.properties: -------------------------------------------------------------------------------- 1 | # 2 | # The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 3 | # (the "License"). You may not use this work except in compliance with the License, which is 4 | # available at www.apache.org/licenses/LICENSE-2.0 5 | # 6 | # This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 7 | # either express or implied, as more fully set forth in the License. 8 | # 9 | # See the NOTICE file distributed with this work for information regarding copyright ownership. 10 | # 11 | 12 | # Site specific configuration properties for Alluxio 13 | # Details about all configuration properties https://docs.alluxio.io/os/user/stable/en/reference/Properties-List.html 14 | 15 | # Common properties 16 | alluxio.master.hostname=bd-master-1 17 | alluxio.master.mount.table.root.ufs=/opt/bigdata/alluxioStore 18 | alluxio.master.journal.folder=/opt/bigdata/alluxioJournal 19 | # Security properties 20 | alluxio.security.authentication.type=KERBEROS 21 | alluxio.security.authorization.permission.enabled=true 22 | alluxio.security.kerberos.unified.instance.name=bd-master-1 23 | alluxio.security.kerberos.server.principal=alluxio/bd-master-1@HADOOP.COM 24 | alluxio.security.kerberos.server.keytab.file=/opt/bigdata/alluxio.keytab 25 | alluxio.security.authorization.capability.enabled=true 26 | alluxio.user.conf.cluster.default.enabled=false 27 | # Worker properties 28 | # alluxio.worker.memory.size=1GB 29 | # alluxio.worker.tieredstore.levels=1 30 | # alluxio.worker.tieredstore.level0.alias=MEM 31 | alluxio.worker.tieredstore.level0.dirs.path=/opt/bigdata/alluxioStore 32 | 33 | # User properties 34 | # alluxio.user.file.readtype.default=CACHE_PROMOTE 35 | alluxio.user.file.writetype.default=MUST_CACHE -------------------------------------------------------------------------------- /etc-example/alluxio-security/alluxio-site.properties.template: -------------------------------------------------------------------------------- 1 | # 2 | # The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 3 | # (the "License"). You may not use this work except in compliance with the License, which is 4 | # available at www.apache.org/licenses/LICENSE-2.0 5 | # 6 | # This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 7 | # either express or implied, as more fully set forth in the License. 8 | # 9 | # See the NOTICE file distributed with this work for information regarding copyright ownership. 10 | # 11 | 12 | # Site specific configuration properties for Alluxio 13 | # Details about all configuration properties https://docs.alluxio.io/os/user/stable/en/reference/Properties-List.html 14 | 15 | # Common properties 16 | # alluxio.master.hostname=localhost 17 | # alluxio.master.mount.table.root.ufs=${alluxio.work.dir}/underFSStorage 18 | 19 | # Security properties 20 | # alluxio.security.authorization.permission.enabled=true 21 | # alluxio.security.authentication.type=SIMPLE 22 | 23 | # Worker properties 24 | # alluxio.worker.memory.size=1GB 25 | # alluxio.worker.tieredstore.levels=1 26 | # alluxio.worker.tieredstore.level0.alias=MEM 27 | # alluxio.worker.tieredstore.level0.dirs.path=/mnt/ramdisk 28 | 29 | # User properties 30 | # alluxio.user.file.readtype.default=CACHE_PROMOTE 31 | # alluxio.user.file.writetype.default=MUST_CACHE 32 | -------------------------------------------------------------------------------- /etc-example/alluxio-security/masters: -------------------------------------------------------------------------------- 1 | # 2 | # The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 3 | # (the "License"). You may not use this work except in compliance with the License, which is 4 | # available at www.apache.org/licenses/LICENSE-2.0 5 | # 6 | # This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 7 | # either express or implied, as more fully set forth in the License. 8 | # 9 | # See the NOTICE file distributed with this work for information regarding copyright ownership. 10 | # 11 | 12 | # An Alluxio master will be started on each of the machines listed below. 13 | # 14 | # In HA mode, Alluxio will use internal leader election or Zookeeper leader election 15 | # to decide which of the masters should act as the primary. In non-HA mode, the master started 16 | # on the host identified by the first entry in this file will act as the primary; the remaining hosts 17 | # will be used to start a secondary; which are responsible for journal compaction. 18 | # 19 | # The multi-master Zookeeper HA mode requires that all the masters can access 20 | # the same journal through a shared medium (e.g. HDFS or NFS). 21 | bd-master-1 22 | -------------------------------------------------------------------------------- /etc-example/alluxio-security/workers: -------------------------------------------------------------------------------- 1 | # 2 | # The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 3 | # (the "License"). You may not use this work except in compliance with the License, which is 4 | # available at www.apache.org/licenses/LICENSE-2.0 5 | # 6 | # This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 7 | # either express or implied, as more fully set forth in the License. 8 | # 9 | # See the NOTICE file distributed with this work for information regarding copyright ownership. 10 | # 11 | 12 | # An Alluxio Worker will be started on each of the machines listed below. 13 | bd-master-1 -------------------------------------------------------------------------------- /etc-example/hbase/hbase-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 23 | 24 | 25 | hbase.rootdir 26 | hdfs://bd-master-1:9000/hbase 27 | 28 | 29 | hbase.cluster.distributed 30 | true 31 | 32 | 33 | hbase.zookeeper.quorum 34 | bd-master-1:2181 35 | 36 | 37 | hbase.unsafe.stream.capability.enforce 38 | false 39 | 40 | 41 | -------------------------------------------------------------------------------- /etc-example/hbase/log4j-hbtop.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | log4j.rootLogger=WARN,console 18 | log4j.threshold=WARN 19 | 20 | # console 21 | log4j.appender.console=org.apache.log4j.ConsoleAppender 22 | log4j.appender.console.target=System.err 23 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 24 | log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n 25 | 26 | # ZooKeeper will still put stuff at WARN 27 | log4j.logger.org.apache.zookeeper=ERROR 28 | -------------------------------------------------------------------------------- /etc-example/hbase/regionservers: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /etc-example/hive-default/beeline-log4j.properties.template: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | log4j.rootLogger=WARN, console 18 | 19 | ######## console appender ######## 20 | log4j.appender.console=org.apache.log4j.ConsoleAppender 21 | log4j.appender.console.target=System.err 22 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n 24 | log4j.appender.console.encoding=UTF-8 25 | -------------------------------------------------------------------------------- /etc-example/hive-default/hive-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | hive.metastore.uris 6 | thrift://bd-master-1:9083 7 | 8 | 9 | hive.metastore.warehouse.dir 10 | /user/hive/warehouse 11 | 12 | 13 | -------------------------------------------------------------------------------- /etc-example/hive-default/ivysettings.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /etc-example/hive-sec/beeline-log4j.properties.template: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | log4j.rootLogger=WARN, console 18 | 19 | ######## console appender ######## 20 | log4j.appender.console=org.apache.log4j.ConsoleAppender 21 | log4j.appender.console.target=System.err 22 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n 24 | log4j.appender.console.encoding=UTF-8 25 | -------------------------------------------------------------------------------- /etc-example/hive-sec/ivysettings.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /etc-example/kafka/server.properties: -------------------------------------------------------------------------------- 1 | broker.id=1 2 | log.dirs=/opt/bigdata/logs/kafka 3 | zookeeper.connect=bd-master-1:2181 4 | offsets.topic.replication.factor=1 -------------------------------------------------------------------------------- /etc-example/kerberos/krb5.conf: -------------------------------------------------------------------------------- 1 | # Configuration snippets may be placed in this directory as well 2 | 3 | [logging] 4 | default = FILE:/var/log/krb5libs.log 5 | kdc = FILE:/var/log/krb5kdc.log 6 | admin_server = FILE:/var/log/kadmind.log 7 | 8 | [libdefaults] 9 | dns_lookup_kdc = false 10 | dns_lookup_realm = false 11 | ticket_lifetime = 24h 12 | renew_lifetime = 7d 13 | forwardable = true 14 | default_realm = HADOOP.COM 15 | udp_preference_limit = 1 16 | [realms] 17 | HADOOP.COM = { 18 | kdc = kdc 19 | admin_server = kdc 20 | } 21 | 22 | [domain_realm] 23 | .hadoop.com = HADOOP.COM 24 | hadoop.com = HADOOP.COM 25 | -------------------------------------------------------------------------------- /etc-example/tez/tez-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | tez.lib.uris 7 | ${fs.defaultFS}/app/tez/tez-0.9.2-minimal.tar.gz 8 | 9 | 10 | tez.use.cluster.hadoop-libs 11 | true 12 | 13 | -------------------------------------------------------------------------------- /etc-example/zeppelin/conf/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 |
namevaluedescription
38 | 39 | 40 |
41 |
42 | -------------------------------------------------------------------------------- /etc-example/zeppelin/conf/log4j.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | log4j.rootLogger = INFO, dailyfile 19 | 20 | log4j.appender.stdout = org.apache.log4j.ConsoleAppender 21 | log4j.appender.stdout.layout = org.apache.log4j.PatternLayout 22 | log4j.appender.stdout.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n 23 | 24 | log4j.appender.dailyfile.DatePattern=.yyyy-MM-dd 25 | log4j.appender.dailyfile = org.apache.log4j.DailyRollingFileAppender 26 | log4j.appender.dailyfile.File = ${zeppelin.log.file} 27 | log4j.appender.dailyfile.layout = org.apache.log4j.PatternLayout 28 | log4j.appender.dailyfile.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n 29 | -------------------------------------------------------------------------------- /etc-example/zeppelin/conf/log4j_yarn_cluster.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | log4j.rootLogger = INFO, stdout 19 | 20 | log4j.appender.stdout = org.apache.log4j.ConsoleAppender 21 | log4j.appender.stdout.layout = org.apache.log4j.PatternLayout 22 | log4j.appender.stdout.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n 23 | 24 | -------------------------------------------------------------------------------- /etc-example/zeppelin/conf/notebook-authorization.json: -------------------------------------------------------------------------------- 1 | {"authInfo":{}} -------------------------------------------------------------------------------- /etc-example/zeppelin/conf/zeppelin-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 19 | 20 | 21 | 22 | zeppelin.server.addr 23 | 0.0.0.0 24 | Server binding address 25 | 26 | 27 | 28 | zeppelin.server.port 29 | 18080 30 | Server port. 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /etc-example/zookeeper/zoo.cfg: -------------------------------------------------------------------------------- 1 | ticketTime=2000 2 | clientPort=2181 3 | dataDir=/opt/bigdata/data/zookeeper 4 | dataLogDir=/opt/bigdata/logs/zookeeper 5 | initLimit=10 6 | syncLimit=5 -------------------------------------------------------------------------------- /install/flink: -------------------------------------------------------------------------------- 1 | /home/wei/Tools/install/flink-1.19.0 -------------------------------------------------------------------------------- /install/hadoop: -------------------------------------------------------------------------------- 1 | /home/wei/Tools/install/hadoop-3.3.6 -------------------------------------------------------------------------------- /install/hbase: -------------------------------------------------------------------------------- 1 | /home/wei/Tools/install/hbase-2.4.18 -------------------------------------------------------------------------------- /install/hive: -------------------------------------------------------------------------------- 1 | /home/wei/Tools/install/apache-hive-4.0.0-bin -------------------------------------------------------------------------------- /install/kafka: -------------------------------------------------------------------------------- 1 | /home/wei/Tools/install/kafka_2.12-3.7.0 -------------------------------------------------------------------------------- /install/spark: -------------------------------------------------------------------------------- 1 | /home/wei/Tools/install/spark-3.4.3-bin-hadoop3 -------------------------------------------------------------------------------- /install/zookeeper: -------------------------------------------------------------------------------- 1 | /home/wei/Tools/install/apache-zookeeper-3.8.4-bin --------------------------------------------------------------------------------