├── .dockerignore ├── .gitignore ├── .travis.yml ├── CHANGES.md ├── Dockerfile ├── LICENSE ├── README.md ├── checkstyle.xml ├── docker_build.sh ├── hadoop-mini-clusters-activemq ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ └── impl │ │ │ └── ActivemqLocalBroker.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ └── impl │ ├── ActivemqLocalBrokerIntegrationTest.java │ └── ActivemqLocalBrokerTest.java ├── hadoop-mini-clusters-common ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ ├── MiniCluster.java │ │ │ ├── auth │ │ │ └── Jaas.java │ │ │ ├── config │ │ │ ├── ConfigVars.java │ │ │ └── package-info.java │ │ │ ├── http │ │ │ └── HttpUtils.java │ │ │ └── util │ │ │ ├── FileUtils.java │ │ │ └── WindowsLibsUtils.java │ └── resources │ │ └── log4j.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ ├── http │ └── HttpUtilsTest.java │ └── util │ └── FileUtilsTest.java ├── hadoop-mini-clusters-hbase ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ └── impl │ │ │ ├── HbaseLocalCluster.java │ │ │ └── HbaseRestLocalCluster.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ └── impl │ ├── HbaseLocalClusterIntegrationTest.java │ └── HbaseLocalClusterTest.java ├── hadoop-mini-clusters-hdfs ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ └── impl │ │ │ └── HdfsLocalCluster.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ └── impl │ ├── HdfsLocalClusterIntegrationTest.java │ └── HdfsLocalClusterTest.java ├── hadoop-mini-clusters-hivemetastore ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ └── impl │ │ │ └── HiveLocalMetaStore.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ └── impl │ ├── HiveLocalMetaStoreIntegrationTest.java │ └── HiveLocalMetaStoreTest.java ├── hadoop-mini-clusters-hiveserver2 ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ └── impl │ │ │ └── HiveLocalServer2.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ └── impl │ ├── HiveLocalServer2IntegrationTest.java │ └── HiveLocalServer2Test.java ├── hadoop-mini-clusters-hyperscaledb ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ └── impl │ │ │ └── HsqldbLocalServer.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ └── impl │ ├── HsqldbLocalServerIntegrationTest.java │ └── HsqldbLocalServerTest.java ├── hadoop-mini-clusters-kafka ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ ├── impl │ │ │ └── KafkaLocalBroker.java │ │ │ └── systemtime │ │ │ └── LocalSystemTime.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ ├── datatime │ └── GenerateRandomDay.java │ ├── impl │ ├── KafkaLocalBrokerIntegrationTest.java │ └── KafkaLocalBrokerTest.java │ └── kafka │ ├── consumer │ └── KafkaTestConsumer.java │ └── producer │ └── KafkaSimpleTestProducer.java ├── hadoop-mini-clusters-kdc ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ └── impl │ │ │ └── KdcLocalCluster.java │ └── resources │ │ └── default.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ └── impl │ ├── KdcLocalClusterHBaseIntegrationTest.java │ ├── KdcLocalClusterHdfsIntegrationTest.java │ ├── KdcLocalClusterTest.java │ └── KdcLocalClusterZookeeperIntegrationTest.java ├── hadoop-mini-clusters-knox ├── pom.xml └── src │ ├── main │ ├── java │ │ ├── com │ │ │ └── github │ │ │ │ └── sakserv │ │ │ │ └── minicluster │ │ │ │ └── impl │ │ │ │ ├── KnoxLocalCluster.java │ │ │ │ └── LocalGatewayConfig.java │ │ └── org │ │ │ └── apache │ │ │ └── hadoop │ │ │ └── gateway │ │ │ ├── GatewayServer.java │ │ │ └── GatewayServlet.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ └── impl │ ├── KnoxLocalClusterIntegrationTest.java │ └── KnoxLocalClusterTest.java ├── hadoop-mini-clusters-mapreduce ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ └── impl │ │ │ └── MRLocalCluster.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ ├── java │ └── com │ │ └── github │ │ └── sakserv │ │ └── minicluster │ │ ├── impl │ │ ├── MRLocalClusterIntegrationTest.java │ │ └── MRLocalClusterTest.java │ │ └── mapreduce │ │ ├── Driver.java │ │ ├── SumReducer.java │ │ └── WordMapper.java │ └── resources │ └── mr_input.txt ├── hadoop-mini-clusters-mongodb ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ └── impl │ │ │ └── MongodbLocalServer.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ └── impl │ ├── MongodbLocalServerIntegrationTest.java │ └── MongodbLocalServerTest.java ├── hadoop-mini-clusters-oozie ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ ├── impl │ │ │ └── OozieLocalServer.java │ │ │ └── oozie │ │ │ ├── sharelib │ │ │ ├── Framework.java │ │ │ └── util │ │ │ │ └── OozieShareLibUtil.java │ │ │ └── util │ │ │ └── OozieConfigUtil.java │ └── resources │ │ ├── default.properties │ │ ├── localoozie-log4j.properties │ │ ├── log4j.properties │ │ ├── sharelib.properties │ │ └── test_input.txt │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ └── impl │ ├── OozieLocalServerIntegrationTest.java │ └── OozieLocalServerTest.java ├── hadoop-mini-clusters-storm ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ └── impl │ │ │ └── StormLocalCluster.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ ├── impl │ ├── StormLocalClusterIntegrationTest.java │ └── StormLocalClusterTest.java │ └── storm │ ├── bolt │ └── PrinterBolt.java │ └── spout │ └── RandomSentenceSpout.java ├── hadoop-mini-clusters-yarn ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ ├── impl │ │ │ └── YarnLocalCluster.java │ │ │ └── yarn │ │ │ ├── InJvmContainerExecutor.java │ │ │ ├── SystemExitException.java │ │ │ └── util │ │ │ ├── EnvironmentUtils.java │ │ │ ├── ExecJavaCliParser.java │ │ │ ├── ExecShellCliParser.java │ │ │ └── ReflectionUtils.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ ├── java │ └── com │ │ └── github │ │ └── sakserv │ │ └── minicluster │ │ ├── impl │ │ ├── YarnLocalClusterInJvmContainerExecutorTest.java │ │ ├── YarnLocalClusterIntegrationTest.java │ │ └── YarnLocalClusterTest.java │ │ └── yarn │ │ ├── InJvmContainerExecutorTest.java │ │ ├── SystemExitExceptionTest.java │ │ ├── simpleyarnapp │ │ └── Client.java │ │ └── util │ │ ├── EnvironmentUtilsTest.java │ │ ├── ExecJavaCliParserTest.java │ │ ├── ExecShellCliParserTest.java │ │ └── ReflectionUtilsTest.java │ └── resources │ └── simple-yarn-app-1.1.0.jar ├── hadoop-mini-clusters-zookeeper ├── pom.xml └── src │ ├── main │ ├── java │ │ └── com │ │ │ └── github │ │ │ └── sakserv │ │ │ └── minicluster │ │ │ └── impl │ │ │ └── ZookeeperLocalCluster.java │ └── resources │ │ ├── default.properties │ │ └── log4j.properties │ └── test │ └── java │ └── com │ └── github │ └── sakserv │ └── minicluster │ └── impl │ ├── ZookeeperLocalClusterIntegrationTest.java │ └── ZookeeperLocalClusterTest.java ├── pom.xml └── windows_libs ├── 2.3.0.0 ├── bin │ └── winutils.exe └── lib │ ├── hadoop.dll │ ├── hdfs.dll │ └── libwinutils.lib ├── 2.3.2.0 ├── bin │ └── winutils.exe └── lib │ ├── hadoop.dll │ ├── hdfs.dll │ └── libwinutils.lib ├── 2.3.4.0 ├── bin │ └── winutils.exe └── lib │ ├── hadoop.dll │ ├── hdfs.dll │ └── libwinutils.lib ├── 2.4.0.0 ├── bin │ └── winutils.exe └── lib │ ├── hadoop.dll │ ├── hdfs.dll │ └── libwinutils.lib ├── 2.4.2.0 ├── bin │ └── winutils.exe └── lib │ ├── hadoop.dll │ ├── hdfs.dll │ └── libwinutils.lib ├── 2.5.0.0 ├── bin │ └── winutils.exe └── lib │ ├── hadoop.dll │ ├── hdfs.dll │ └── libwinutils.lib ├── 2.5.3.0 ├── bin │ └── winutils.exe └── lib │ ├── hadoop.dll │ ├── hdfs.dll │ └── libwinutils.lib ├── 2.6.0.3 ├── bin │ └── winutils.exe └── lib │ ├── hadoop.dll │ ├── hdfs.dll │ └── libwinutils.lib ├── 2.6.1.0 ├── bin │ └── winutils.exe └── lib │ ├── hadoop.dll │ ├── hdfs.dll │ └── libwinutils.lib └── 2.6.2.0 ├── bin └── winutils.exe └── lib ├── hadoop.dll ├── hdfs.dll └── libwinutils.lib /.dockerignore: -------------------------------------------------------------------------------- 1 | * 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.class 2 | 3 | .DS_Store 4 | 5 | # Mobile Tools for Java (J2ME) 6 | .mtj.tmp/ 7 | 8 | # Package Files # 9 | #*.jar 10 | *.war 11 | *.ear 12 | 13 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 14 | hs_err_pid* 15 | 16 | # Build 17 | target/ 18 | build/ 19 | 20 | # Idea 21 | .idea 22 | */*.iml 23 | *.iml 24 | 25 | # Coverage report 26 | coverage-error.log 27 | 28 | # Oozie sharelib cache 29 | share_lib_cache 30 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: java 2 | 3 | jdk: 4 | - oraclejdk8 5 | 6 | cache: 7 | directories: 8 | - $HOME/.m2 9 | 10 | script: travis_wait 60 mvn -B clean test 11 | 12 | after_success: travis_wait 60 mvn -B clean cobertura:cobertura coveralls:report 13 | -------------------------------------------------------------------------------- /CHANGES.md: -------------------------------------------------------------------------------- 1 | Change Log 2 | ========== 3 | ### 0.1.16 - 1/18/2019 4 | * Add support for HDP 2.6.5.0 5 | 6 | ### 0.1.15 - 9/14/2018 7 | * Add support for HDP 2.6.3.0 8 | * Fix kdc NPE 9 | * Fix HADOOP_HOME on Windows 10 | * Fix missing libx on Windows 11 | 12 | ### 0.1.14 - 9/25/2017 13 | * Add mini kdc (Thanks @treydone) 14 | * Improve oozie documentation 15 | * Add support for HDP 2.6.2.0 16 | 17 | ### 0.1.13 - 7/11/2017 18 | * Add proxy support for sharelib downloads (Thanks @rajesh-kumar) 19 | * Work around for Knox dependency conflicts (Thanks @jetoile) 20 | 21 | ### 0.1.12 - 6/29/2017 22 | * Fix YARN RM address and hostname for issue #27 23 | * Remove Intellij assets 24 | * Fix directory clean up; add cleanup for Knox and Storm 25 | * Oozie share lib fixes for "real" workflows 26 | * HBase InfoServer is properly shutdown 27 | * Allow NimbusClient to work with StormLocalCluster 28 | * Fixed ports for MapReduce Job History Server 29 | * Add support for HDP 2.6.0 and 2.6.1 30 | 31 | ### 0.1.11 - 1/20/2017 32 | * Make RestBuilder public (Thanks @jetoile) 33 | 34 | ### 0.1.10 - 1/16/2017 35 | * HBase REST support (Thanks @jetoile) 36 | * Knox support (Thanks Treydone) 37 | 38 | ### 0.1.9 - 12/26/2016 39 | * Make JDK 8 the default 40 | * Add support for HDP 2.5.3 41 | * Add Kafka 2.10.1 (Thanks @timvw) 42 | * Fix LocalOozie logging for issue #21 43 | 44 | ### 0.1.8 - 10/03/2016 45 | * Add support for HDP 2.5.0 46 | * Eliminate fat jar for hbase (Thanks @isendel) 47 | * Coordinator support for oozie (Thanks @RomanKovalik) 48 | 49 | ### 0.1.7 - 05/12/2016 50 | * Add support for HDP 2.4.2 51 | 52 | ### 0.1.6 - 05/09/2016 53 | * Add support for Curator InstanceSpec to improve ZK flexibility 54 | 55 | ### 0.1.5 - 04/06/2016 56 | * Add support for HDP 2.4.0 57 | * Handle the evolving Kafka apis via reflection 58 | * Fix LMAX disruptor conflict in storm-core + hbase-testing-utils 59 | * Added script to allow running tests in a docker container 60 | 61 | ### 0.1.4 - 02/10/2016 62 | * Add support for HDP 2.3.4 63 | 64 | ### 0.1.3 - 12/21/2015 65 | * Add support for WebHDFS via setHdfsNamenodeHttpPort 66 | 67 | ### 0.1.2 - 12/01/2015 68 | * Add more flexibility to windows libs location via HADOOP_HOME 69 | 70 | ### 0.1.1 - 11/02/2015 71 | * Fix maven build vars as a workaround for the maven release plugin 72 | 73 | ### 0.1.0 - 11/01/2015 74 | * Major changes in this release 75 | * Moved each mini cluster to a seperate module to reduce deps - fixes #3 76 | * Removed the shade plugin 77 | * Added maven profile support to allow for supporting multiple versions of HDP 78 | * Added Oozie Share Lib support - fixes #2 79 | * Added Windows support - fixes #1 80 | * Avoid needlessly creating Strings for log messages 81 | * Cleaned up imports 82 | 83 | ### 0.0.15 - 08/24/2015 84 | * Upgraded dependencies to Hadoop 2.7.1 (HDP 2.3) 85 | 86 | ### 0.0.14 - 07/28/2015 87 | * Added Oozie Support 88 | * Added optional argument to HDFS to enable the current user as a proxy user 89 | 90 | ### 0.0.13 - 07/04/2015 91 | * Added YARN Support 92 | * Added MRv2 Support 93 | * Added HBase Support 94 | * Added support for the InJvmContainerExecutor 95 | * Updated dependencies to apache releases due to HWX repo issues 96 | * Added additional details to the README 97 | * 98% code coverage for all classes (less InJvmContainerExecutor) 98 | 99 | ### 0.0.12 - 02/08/2015 100 | * Added HyperSQL support 101 | 102 | ### 0.0.11 - 02/02/2015 103 | * Fixed shade plugin to resolve corrupt jar issues 104 | * Added usage examples to README 105 | 106 | ### 0.0.10 - 02/02/2015 - DO NOT USE 107 | * RELEASE NUKED DUE TO SHADE PLUGIN PRODUCING A BAD JAR 108 | * Breaking Change: Moved all mini clusters to the builder pattern 109 | * Moved configuration to properties file 110 | * Split unit and integration tests 111 | * Refactored the pom 112 | 113 | ### 0.0.9 - 01/19/2015 - DO NOT USE 114 | * RELEASE NUKED DUE TO SHADE PLUGIN PRODUCING A BAD JAR 115 | * Moved to log4j 116 | * Added proper assertions 117 | * Option to wait on topology kill for StormLocalCluster 118 | * Added ASL headers 119 | * Added a proper README 120 | 121 | ### 0.0.8 - 01/08/2015 122 | * Added embedded MongodbLocalServer 123 | 124 | ### 0.0.7 - 01/07/2015 125 | * Added missing calls to cleanUp() 126 | 127 | ### 0.0.6 - 01/06/2015 128 | * First Release 129 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:latest 2 | 3 | RUN yum install maven wget unzip -y 4 | RUN cd /tmp && wget https://github.com/sakserv/hadoop-mini-clusters/archive/master.zip && unzip master.zip && cd hadoop-mini-clusters-master && mvn clean test 5 | -------------------------------------------------------------------------------- /docker_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_NAME=$(basename $0) 4 | SCRIPT_PATH=$(cd `dirname $0` && pwd) 5 | 6 | docker build --no-cache -t hadoop-mini-clusters $SCRIPT_PATH 7 | docker run -m 6g -d sakserv/hadoop-mini-clusters 8 | #docker rm $(docker ps -a --filter ancestor=hadoop-mini-clusters) 9 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-activemq/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-activemq 11 | 12 | 13 | 14 | 15 | 16 | org.apache.activemq 17 | activemq-all 18 | ${activemq.version} 19 | 20 | 21 | 22 | 23 | com.github.sakserv 24 | hadoop-mini-clusters-common 25 | ${project.version} 26 | 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-activemq/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # ActiveMQ 2 | activemq.hostname=localhost 3 | activemq.port=61616 4 | activemq.queue=defaultQueue 5 | activemq.store.dir=activemq-data 6 | activemq.uri.prefix=vm:// 7 | activemq.uri.postfix=?create=false -------------------------------------------------------------------------------- /hadoop-mini-clusters-activemq/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-activemq/src/test/java/com/github/sakserv/minicluster/impl/ActivemqLocalBrokerIntegrationTest.java: -------------------------------------------------------------------------------- 1 | package com.github.sakserv.minicluster.impl; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | 5 | import java.io.IOException; 6 | 7 | import javax.jms.JMSException; 8 | 9 | import org.junit.AfterClass; 10 | import org.junit.BeforeClass; 11 | import org.junit.Test; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import com.github.sakserv.minicluster.config.ConfigVars; 16 | import com.github.sakserv.propertyparser.PropertyParser; 17 | 18 | public class ActivemqLocalBrokerIntegrationTest { 19 | 20 | // Logger 21 | private static final Logger LOG = LoggerFactory.getLogger(ActivemqLocalBrokerIntegrationTest.class); 22 | 23 | // Setup the property parser 24 | private static PropertyParser propertyParser; 25 | static { 26 | try { 27 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 28 | propertyParser.parsePropsFile(); 29 | } catch(IOException e) { 30 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 31 | } 32 | } 33 | 34 | // Setup the activemq broker before running tests 35 | private static ActivemqLocalBroker amq; 36 | 37 | @BeforeClass 38 | public static void setUp() throws Exception { 39 | amq = new ActivemqLocalBroker.Builder() 40 | .setHostName(propertyParser.getProperty(ConfigVars.ACTIVEMQ_HOSTNAME_KEY)) 41 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ACTIVEMQ_PORT_KEY))) 42 | .setQueueName(propertyParser.getProperty(ConfigVars.ACTIVEMQ_QUEUE_NAME_KEY)) 43 | .setStoreDir(propertyParser.getProperty(ConfigVars.ACTIVEMQ_STORE_DIR_KEY)) 44 | .setUriPrefix(propertyParser.getProperty(ConfigVars.ACTIVEMQ_URI_PREFIX_KEY)) 45 | .setUriPostfix(propertyParser.getProperty(ConfigVars.ACTIVEMQ_URI_POSTFIX_KEY)) 46 | .build(); 47 | 48 | amq.start(); 49 | } 50 | 51 | 52 | // Stop and cleanup when tests are finished 53 | @AfterClass 54 | public static void tearDown() throws Exception { 55 | amq.stop(); 56 | } 57 | 58 | @Test 59 | /* 60 | sends lots of short messages and one long one 61 | */ 62 | public void testMessageProcessing() throws JMSException { 63 | int n = 10000; 64 | String msg; 65 | 66 | LOG.info("ACTIVEMQ: Sending {} messages", n); 67 | 68 | //send a lot of messages 69 | for (int i = 0; i < n; i++) { 70 | msg = "hello from active mq. " + n; 71 | amq.sendTextMessage(msg); 72 | assertEquals(msg,amq.getTextMessage()); 73 | } 74 | 75 | //send a really long message 76 | StringBuilder sb = new StringBuilder(n); 77 | for (int i = 0; i < n; i++) { 78 | sb.append(n).append(" "); 79 | } 80 | msg = sb.toString(); 81 | amq.sendTextMessage(msg); 82 | assertEquals(msg,amq.getTextMessage()); 83 | 84 | } 85 | 86 | } 87 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-common/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-common 11 | 12 | 13 | 14 | 15 | org.apache.hadoop 16 | hadoop-client 17 | ${hadoop.version} 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-common/src/main/java/com/github/sakserv/minicluster/MiniCluster.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster; 16 | 17 | public interface MiniCluster { 18 | 19 | public void start() throws Exception; 20 | 21 | public void stop() throws Exception; 22 | 23 | public void stop(boolean cleanUp) throws Exception; 24 | 25 | public void configure() throws Exception; 26 | 27 | public void cleanUp() throws Exception; 28 | 29 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-common/src/main/java/com/github/sakserv/minicluster/auth/Jaas.java: -------------------------------------------------------------------------------- 1 | package com.github.sakserv.minicluster.auth; 2 | 3 | import javax.security.auth.login.AppConfigurationEntry; 4 | import javax.security.auth.login.Configuration; 5 | import java.util.HashMap; 6 | import java.util.Map; 7 | 8 | public class Jaas extends Configuration { 9 | 10 | private static final String krb5LoginModuleName; 11 | public static final String NL = "\n"; 12 | 13 | static { 14 | if (System.getProperty("java.vendor").contains("IBM")) { 15 | krb5LoginModuleName = "com.ibm.security.auth.module.Krb5LoginModule"; 16 | } else { 17 | krb5LoginModuleName = "com.sun.security.auth.module.Krb5LoginModule"; 18 | } 19 | } 20 | 21 | private Map entries = new HashMap(); 22 | 23 | public Jaas addServiceEntry(String name, String principal, String keytab, String serviceName) { 24 | Map options = common(principal, keytab); 25 | options.put("serviceName", serviceName); 26 | entries.put(name, new AppConfigurationEntry(krb5LoginModuleName, AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options)); 27 | return this; 28 | } 29 | 30 | public Jaas addEntry(String name, String principal, String keytab) { 31 | Map options = common(principal, keytab); 32 | entries.put(name, new AppConfigurationEntry(krb5LoginModuleName, AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options)); 33 | return this; 34 | } 35 | 36 | protected static Map common(String principal, String keytab) { 37 | Map options = new HashMap<>(); 38 | options.put("keyTab", keytab); 39 | options.put("principal", principal); 40 | options.put("useKeyTab", "true"); 41 | options.put("storeKey", "true"); 42 | options.put("useTicketCache", "false"); 43 | options.put("debug", "true"); 44 | return options; 45 | } 46 | 47 | public void removeEntry(String name) { 48 | entries.remove(name); 49 | } 50 | 51 | public void clear() { 52 | entries.clear(); 53 | } 54 | 55 | public Map getEntries() { 56 | return entries; 57 | } 58 | 59 | public AppConfigurationEntry[] getAppConfigurationEntry(String name) { 60 | return new AppConfigurationEntry[]{entries.get(name)}; 61 | } 62 | 63 | public String toFile() { 64 | StringBuilder builder = new StringBuilder(); 65 | entries.forEach((e, v) -> { 66 | builder 67 | .append(e).append(" {").append(NL) 68 | .append("\t").append(krb5LoginModuleName).append(" requiered").append(NL); 69 | v.getOptions().forEach((o, p) -> 70 | builder.append("\t").append(o).append(" = ").append("\"" + p + "\"").append(NL)); 71 | builder.append("}"); 72 | }); 73 | return builder.toString(); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-common/src/main/java/com/github/sakserv/minicluster/config/package-info.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | /** 16 | * Provides the classes that contain the list of supported configuration variables. 17 | * 18 | * @since 0.1.0 19 | * @author Shane Kumpf 20 | */ 21 | package com.github.sakserv.minicluster.config; -------------------------------------------------------------------------------- /hadoop-mini-clusters-common/src/main/java/com/github/sakserv/minicluster/http/HttpUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.http; 15 | 16 | import java.io.BufferedInputStream; 17 | import java.io.BufferedOutputStream; 18 | import java.io.File; 19 | import java.io.FileOutputStream; 20 | import java.io.IOException; 21 | import java.net.*; 22 | 23 | import org.slf4j.Logger; 24 | import org.slf4j.LoggerFactory; 25 | 26 | public class HttpUtils { 27 | 28 | // Logger 29 | private static final Logger LOG = LoggerFactory.getLogger(HttpUtils.class); 30 | 31 | // Proxy properties 32 | private static final String PROXY_PROPERTY_NAME = "HTTP_PROXY"; 33 | private static final String ALL_PROXY_PROPERTY_NAME = "ALL_PROXY"; 34 | 35 | public static void downloadFileWithProgress(String fileUrl, String outputFilePath) throws IOException { 36 | String fileName = fileUrl.substring(fileUrl.lastIndexOf('/') + 1); 37 | URL url = new URL(fileUrl); 38 | HttpURLConnection httpURLConnection; 39 | 40 | //Check if system proxy is set 41 | Proxy proxySettings = returnProxyIfEnabled(); 42 | if (proxySettings != null) { 43 | httpURLConnection = (HttpURLConnection) (url.openConnection(proxySettings)); 44 | } else { 45 | httpURLConnection = (HttpURLConnection) (url.openConnection()); 46 | } 47 | long fileSize = httpURLConnection.getContentLength(); 48 | 49 | // Create the parent output directory if it doesn't exis 50 | if (!new File(outputFilePath).getParentFile().isDirectory()) { 51 | new File(outputFilePath).getParentFile().mkdirs(); 52 | } 53 | 54 | BufferedInputStream bufferedInputStream = new BufferedInputStream(httpURLConnection.getInputStream()); 55 | FileOutputStream fileOutputStream = new FileOutputStream(outputFilePath); 56 | BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(fileOutputStream, 1024); 57 | 58 | byte[] data = new byte[1024]; 59 | long downloadedFileSize = 0; 60 | 61 | Integer previousProgress = 0; 62 | int x = 0; 63 | while ((x = bufferedInputStream.read(data, 0, 1024)) >= 0) { 64 | downloadedFileSize += x; 65 | 66 | final int currentProgress = (int) (((double) downloadedFileSize / (double) fileSize) * 100d); 67 | if (!previousProgress.equals(currentProgress)) { 68 | LOG.info("HTTP: Download Status: Filename {} - {}% ({}/{})", fileName, currentProgress, 69 | downloadedFileSize, fileSize); 70 | previousProgress = currentProgress; 71 | } 72 | 73 | bufferedOutputStream.write(data, 0, x); 74 | } 75 | bufferedOutputStream.close(); 76 | bufferedInputStream.close(); 77 | } 78 | 79 | public static Proxy returnProxyIfEnabled() { 80 | LOG.debug("returnProxyIfEnabled() start!!"); 81 | String proxyStarturl = "http://"; 82 | 83 | String proxyURLString = System.getProperty(PROXY_PROPERTY_NAME) != null ? System.getProperty(PROXY_PROPERTY_NAME) 84 | : System.getProperty(PROXY_PROPERTY_NAME.toLowerCase()); 85 | String allproxyURLString = System.getProperty(ALL_PROXY_PROPERTY_NAME) != null 86 | ? System.getProperty(ALL_PROXY_PROPERTY_NAME) : System.getProperty(ALL_PROXY_PROPERTY_NAME.toLowerCase()); 87 | //Pick PROXY URL from two widely used system properties 88 | String finalProxyString = proxyURLString != null ? proxyURLString : allproxyURLString; 89 | URL proxyURL = null; 90 | 91 | try { 92 | //If Proxy URL starts with HTTP then use HTTP PROXY settings 93 | if (finalProxyString != null && finalProxyString.toLowerCase().startsWith(proxyStarturl)) { 94 | // Basic method to validate proxy URL is correct or not. 95 | proxyURL = returnParsedURL(finalProxyString); 96 | LOG.debug("protocol of proxy used is: " + proxyURL.getProtocol()); 97 | return new Proxy(Proxy.Type.HTTP, new InetSocketAddress(proxyURL.getHost(), proxyURL.getPort())); 98 | //If Proxy URL starts with no protocol then assume it is HTTP 99 | } else if (finalProxyString != null && !finalProxyString.contains("://") 100 | && finalProxyString.split(":").length == 2) { 101 | 102 | LOG.debug("protocol of proxy used is: http default"); 103 | proxyURL = returnParsedURL(proxyStarturl.concat(finalProxyString)); 104 | return proxyURL != null ? new Proxy(Proxy.Type.HTTP, new InetSocketAddress(proxyURL.getHost(), proxyURL.getPort())) : null; 105 | //If Proxy URL starts with SOCKS4 or SOCKS5 protocol then go for SOCKS settings 106 | } else if (finalProxyString != null && finalProxyString.toLowerCase().startsWith("sock") 107 | && finalProxyString.split("://").length == 2) { 108 | LOG.debug("protocol of proxy used is: Socks"); 109 | proxyURL = returnParsedURL(proxyStarturl.concat(finalProxyString.split("://")[1])); 110 | return proxyURL != null ? new Proxy(Proxy.Type.SOCKS, new InetSocketAddress(proxyURL.getHost(), proxyURL.getPort())) : null; 111 | } 112 | } catch (MalformedURLException | URISyntaxException mUE) { 113 | LOG.error("Can not configure Proxy because URL {} is incorrect: " + mUE, finalProxyString); 114 | } 115 | 116 | return null; 117 | } 118 | 119 | private static URL returnParsedURL(String urlString) throws MalformedURLException, URISyntaxException { 120 | if (urlString != null) { 121 | URL url = new URL(urlString); 122 | url.toURI(); 123 | LOG.info("System has been set to use proxy. Hence, configuring proxy URL: {}", urlString); 124 | return url; 125 | } 126 | return null; 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-common/src/main/java/com/github/sakserv/minicluster/util/FileUtils.java: -------------------------------------------------------------------------------- 1 | package com.github.sakserv.minicluster.util; 2 | 3 | import org.slf4j.LoggerFactory; 4 | 5 | import java.io.IOException; 6 | import java.nio.file.*; 7 | import java.nio.file.attribute.BasicFileAttributes; 8 | 9 | public final class FileUtils { 10 | 11 | // Logger 12 | private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(FileUtils.class); 13 | 14 | public static void deleteFolder(String directory, boolean quietly) { 15 | try { 16 | Path directoryPath = Paths.get(directory).toAbsolutePath(); 17 | if (!quietly) { 18 | LOG.info("FILEUTILS: Deleting contents of directory: {}", 19 | directoryPath.toAbsolutePath().toString()); 20 | } 21 | Files.walkFileTree(directoryPath, new SimpleFileVisitor() { 22 | @Override 23 | public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) 24 | throws IOException { 25 | Files.delete(file); 26 | if (!quietly) { 27 | LOG.info("Removing file: {}", file.toAbsolutePath().toString()); 28 | } 29 | return FileVisitResult.CONTINUE; 30 | } 31 | 32 | @Override 33 | public FileVisitResult postVisitDirectory(Path dir, IOException exc) 34 | throws IOException { 35 | Files.delete(dir); 36 | if (!quietly) { 37 | LOG.info("Removing directory: {}", dir.toAbsolutePath().toString()); 38 | } 39 | return FileVisitResult.CONTINUE; 40 | } 41 | }); 42 | } catch (IOException e) { 43 | LOG.error("FILEUTILS: Unable to remove {}", directory); 44 | } 45 | } 46 | 47 | public static void deleteFolder(String directory) { 48 | deleteFolder(directory, false); 49 | } 50 | 51 | @Override 52 | public String toString() { 53 | return "FileUtils"; 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-common/src/main/java/com/github/sakserv/minicluster/util/WindowsLibsUtils.java: -------------------------------------------------------------------------------- 1 | package com.github.sakserv.minicluster.util; 2 | 3 | import java.io.File; 4 | 5 | import org.apache.hadoop.fs.Path; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | public class WindowsLibsUtils { 10 | 11 | // Logger 12 | private static final Logger LOG = LoggerFactory.getLogger(WindowsLibsUtils.class); 13 | 14 | public static void setHadoopHome() { 15 | 16 | // Set hadoop.home.dir to point to the windows lib dir 17 | if (System.getProperty("os.name").startsWith("Windows")) { 18 | 19 | String windowsLibDir = getHadoopHome(); 20 | 21 | LOG.info("WINDOWS: Setting hadoop.home.dir: {}", windowsLibDir); 22 | System.setProperty("hadoop.home.dir", windowsLibDir); 23 | System.load(new File(windowsLibDir + Path.SEPARATOR + "lib" + Path.SEPARATOR + "hadoop.dll").getAbsolutePath()); 24 | System.load(new File(windowsLibDir + Path.SEPARATOR + "lib" + Path.SEPARATOR + "hdfs.dll").getAbsolutePath()); 25 | 26 | } 27 | } 28 | 29 | public static String getHadoopHome() { 30 | 31 | if(System.getProperty("HADOOP_HOME") != null) { 32 | LOG.info("HADOOP_HOME: " + System.getProperty("HADOOP_HOME")); 33 | return System.getProperty("HADOOP_HOME"); 34 | } else if (System.getenv("HADOOP_HOME") != null) { //takes the hadoop home from system environment variable 35 | LOG.info("HADOOP_HOME: " + System.getenv("HADOOP_HOME")); 36 | return System.getenv("HADOOP_HOME"); 37 | } else { 38 | 39 | File windowsLibDir = new File("." + Path.SEPARATOR + "windows_libs" + 40 | Path.SEPARATOR + System.getProperty("hdp.release.version")); 41 | 42 | if (!windowsLibDir.exists()) { 43 | windowsLibDir = new File(".." + Path.SEPARATOR + windowsLibDir); 44 | if (!windowsLibDir.exists()) { 45 | LOG.error("WINDOWS: ERROR: Could not find windows native libs"); 46 | } 47 | } 48 | return windowsLibDir.getAbsolutePath(); 49 | } 50 | 51 | } 52 | 53 | } 54 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-common/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-common/src/test/java/com/github/sakserv/minicluster/http/HttpUtilsTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package com.github.sakserv.minicluster.http; 18 | 19 | import static org.junit.Assert.assertEquals; 20 | import static org.junit.Assert.assertNotNull; 21 | import static org.junit.Assert.assertNull; 22 | 23 | import java.net.Proxy; 24 | 25 | import org.junit.Test; 26 | 27 | public class HttpUtilsTest { 28 | 29 | @Test 30 | public void testReturnProxyIfProxyPropsAreSetToNull() { 31 | System.clearProperty("HTTP_PROXY"); 32 | System.clearProperty("ALL_PROXY"); 33 | assertNull(HttpUtils.returnProxyIfEnabled()); 34 | } 35 | 36 | @Test 37 | public void testReturnProxyIfHTTPProxyIsSet() { 38 | System.setProperty("HTTP_PROXY", "http://104.207.145.113:3128"); 39 | System.clearProperty("ALL_PROXY"); 40 | assertNotNull(HttpUtils.returnProxyIfEnabled()); 41 | assertEquals("/104.207.145.113:3128", HttpUtils.returnProxyIfEnabled().address().toString()); 42 | assertEquals(Proxy.Type.HTTP, HttpUtils.returnProxyIfEnabled().type()); 43 | } 44 | 45 | @Test 46 | public void testReturnProxyIfSOCKProxyIsSet() { 47 | System.setProperty("HTTP_PROXY", "sock5://207.98.253.161:10200"); 48 | System.clearProperty("ALL_PROXY"); 49 | assertNotNull(HttpUtils.returnProxyIfEnabled()); 50 | assertEquals("/207.98.253.161:10200", HttpUtils.returnProxyIfEnabled().address().toString()); 51 | assertEquals(Proxy.Type.SOCKS, HttpUtils.returnProxyIfEnabled().type()); 52 | } 53 | 54 | @Test 55 | public void testReturnProxyIfSOCKProxyIsSetGnomeClient() { 56 | System.clearProperty("HTTP_PROXY"); 57 | System.setProperty("ALL_PROXY", "sock5://207.98.253.161:10200"); 58 | assertNotNull(HttpUtils.returnProxyIfEnabled()); 59 | assertEquals("/207.98.253.161:10200", HttpUtils.returnProxyIfEnabled().address().toString()); 60 | assertEquals(Proxy.Type.SOCKS, HttpUtils.returnProxyIfEnabled().type()); 61 | } 62 | 63 | @Test 64 | public void testReturnProxyIfHTTPProxyIsSetGnomeClient() { 65 | System.clearProperty("HTTP_PROXY"); 66 | System.setProperty("ALL_PROXY", "104.207.145.113:3128"); 67 | assertNotNull(HttpUtils.returnProxyIfEnabled()); 68 | assertEquals("/104.207.145.113:3128", HttpUtils.returnProxyIfEnabled().address().toString()); 69 | assertEquals(Proxy.Type.HTTP, HttpUtils.returnProxyIfEnabled().type()); 70 | } 71 | 72 | @Test 73 | public void testReturnProxyIfProxyHasInvalidUrl() { 74 | System.setProperty("HTTP_PROXY", "104.207.145.113"); 75 | System.clearProperty("ALL_PROXY"); 76 | assertNull(HttpUtils.returnProxyIfEnabled()); 77 | } 78 | 79 | @Test 80 | public void testReturnProxyIfProxyHasInvalidUrlWithoutPort() { 81 | System.setProperty("HTTP_PROXY", "http104.207.145.113"); 82 | System.clearProperty("ALL_PROXY"); 83 | assertNull(HttpUtils.returnProxyIfEnabled()); 84 | } 85 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-common/src/test/java/com/github/sakserv/minicluster/util/FileUtilsTest.java: -------------------------------------------------------------------------------- 1 | package com.github.sakserv.minicluster.util; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | 5 | import org.junit.Test; 6 | 7 | /* 8 | * Licensed under the Apache License, Version 2.0 (the "License"); 9 | * you may not use this file except in compliance with the License. 10 | * You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */public class FileUtilsTest { 20 | 21 | @Test 22 | public void testToString() throws Exception { 23 | FileUtils fileUtils = new FileUtils(); 24 | assertEquals("FileUtils", fileUtils.toString()); 25 | 26 | } 27 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-hbase/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-hbase 11 | 12 | 13 | 14 | 15 | 16 | org.apache.hbase 17 | hbase-rest 18 | ${hbase.version} 19 | 20 | 21 | 22 | 23 | org.apache.hbase 24 | hbase-client 25 | ${hbase.version} 26 | 27 | 28 | 29 | 30 | org.apache.hbase 31 | hbase-testing-util 32 | ${hbase.version} 33 | 34 | 35 | 36 | 37 | com.github.sakserv 38 | hadoop-mini-clusters-zookeeper 39 | ${project.version} 40 | test 41 | 42 | 43 | 44 | 45 | com.github.sakserv 46 | hadoop-mini-clusters-common 47 | ${project.version} 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | org.apache.maven.plugins 56 | maven-shade-plugin 57 | 2.4.3 58 | 59 | 60 | package 61 | 62 | shade 63 | 64 | 65 | true 66 | 67 | 68 | com.lmax 69 | shaded.com.lmax 70 | 71 | 72 | com.lmax 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-hbase/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # Zookeeper 2 | zookeeper.temp.dir=embedded_zk 3 | zookeeper.host=127.0.0.1 4 | zookeeper.port=22010 5 | zookeeper.connection.string=127.0.0.1:22010 6 | 7 | # HBase 8 | hbase.master.port=25111 9 | hbase.master.info.port=-1 10 | hbase.num.region.servers=1 11 | hbase.root.dir=embedded_hbase 12 | hbase.znode.parent=/hbase-unsecure 13 | hbase.wal.replication.enabled=false 14 | 15 | # HBase REST 16 | hbase.rest.port=28000 17 | hbase.rest.readonly=false 18 | hbase.rest.info.port=28080 19 | hbase.rest.host=0.0.0.0 20 | hbase.rest.threads.max=100 21 | hbase.rest.threads.min=2 22 | 23 | # HBase Test 24 | hbase.test.table.name=hbase_test_table 25 | hbase.test.col.family.name=cf1 26 | hbase.test.col.qualifier.name=cq1 27 | hbase.test.num.rows.to.put=50 -------------------------------------------------------------------------------- /hadoop-mini-clusters-hbase/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-hdfs/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-hdfs 11 | 12 | 13 | 14 | 15 | 16 | org.apache.hadoop 17 | hadoop-client 18 | ${hadoop.version} 19 | 20 | 21 | 22 | 23 | org.apache.hadoop 24 | hadoop-minicluster 25 | ${hadoop.version} 26 | 27 | 28 | 29 | 30 | com.github.sakserv 31 | hadoop-mini-clusters-common 32 | ${project.version} 33 | 34 | 35 | 37 | 38 | org.apache.httpcomponents 39 | httpclient 40 | ${httpclient.version} 41 | test 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-hdfs/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # HDFS 2 | hdfs.namenode.port=20112 3 | hdfs.namenode.http.port=50070 4 | hdfs.temp.dir=embedded_hdfs 5 | hdfs.num.datanodes=1 6 | hdfs.enable.permissions=false 7 | hdfs.format=true 8 | hdfs.enable.running.user.as.proxy.user=true 9 | 10 | # HDFS Test 11 | hdfs.test.file=/tmp/testing 12 | hdfs.test.string=TESTING -------------------------------------------------------------------------------- /hadoop-mini-clusters-hdfs/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-hdfs/src/test/java/com/github/sakserv/minicluster/impl/HdfsLocalClusterIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.impl; 16 | 17 | import org.apache.http.client.HttpClient; 18 | import org.apache.http.impl.client.HttpClients; 19 | import static org.junit.Assert.assertEquals; 20 | 21 | import java.io.BufferedReader; 22 | import java.io.IOException; 23 | import java.io.InputStreamReader; 24 | import java.net.URL; 25 | import java.net.URLConnection; 26 | 27 | import org.apache.hadoop.conf.Configuration; 28 | import org.apache.hadoop.fs.FSDataInputStream; 29 | import org.apache.hadoop.fs.FSDataOutputStream; 30 | import org.apache.hadoop.fs.FileSystem; 31 | import org.apache.hadoop.fs.Path; 32 | import org.junit.AfterClass; 33 | import org.junit.BeforeClass; 34 | import org.junit.Test; 35 | import org.slf4j.Logger; 36 | import org.slf4j.LoggerFactory; 37 | 38 | import com.github.sakserv.minicluster.config.ConfigVars; 39 | import com.github.sakserv.propertyparser.PropertyParser; 40 | 41 | public class HdfsLocalClusterIntegrationTest { 42 | 43 | // Logger 44 | private static final Logger LOG = LoggerFactory.getLogger(HdfsLocalClusterIntegrationTest.class); 45 | 46 | // Setup the property parser 47 | private static PropertyParser propertyParser; 48 | static { 49 | try { 50 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 51 | propertyParser.parsePropsFile(); 52 | } catch(IOException e) { 53 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 54 | } 55 | } 56 | 57 | private static HdfsLocalCluster dfsCluster; 58 | 59 | @BeforeClass 60 | public static void setUp() throws Exception { 61 | dfsCluster = new HdfsLocalCluster.Builder() 62 | .setHdfsNamenodePort(Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NAMENODE_PORT_KEY))) 63 | .setHdfsNamenodeHttpPort( Integer.parseInt( propertyParser.getProperty( ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY ) ) ) 64 | .setHdfsTempDir(propertyParser.getProperty(ConfigVars.HDFS_TEMP_DIR_KEY)) 65 | .setHdfsNumDatanodes(Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NUM_DATANODES_KEY))) 66 | .setHdfsEnablePermissions( 67 | Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.HDFS_ENABLE_PERMISSIONS_KEY))) 68 | .setHdfsFormat(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.HDFS_FORMAT_KEY))) 69 | .setHdfsEnableRunningUserAsProxyUser(Boolean.parseBoolean( 70 | propertyParser.getProperty(ConfigVars.HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER))) 71 | .setHdfsConfig(new Configuration()) 72 | .build(); 73 | dfsCluster.start(); 74 | } 75 | 76 | @AfterClass 77 | public static void tearDown() throws Exception { 78 | dfsCluster.stop(); 79 | } 80 | 81 | @Test 82 | public void testDfsClusterStart() throws Exception { 83 | 84 | // Write a file to HDFS containing the test string 85 | FileSystem hdfsFsHandle = dfsCluster.getHdfsFileSystemHandle(); 86 | FSDataOutputStream writer = hdfsFsHandle.create( 87 | new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY))); 88 | writer.writeUTF(propertyParser.getProperty(ConfigVars.HDFS_TEST_STRING_KEY)); 89 | writer.close(); 90 | 91 | // Read the file and compare to test string 92 | FSDataInputStream reader = hdfsFsHandle.open( 93 | new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY))); 94 | assertEquals(reader.readUTF(), propertyParser.getProperty(ConfigVars.HDFS_TEST_STRING_KEY)); 95 | reader.close(); 96 | hdfsFsHandle.close(); 97 | 98 | URL url = new URL( 99 | String.format( "http://localhost:%s/webhdfs/v1?op=GETHOMEDIRECTORY&user.name=guest", 100 | propertyParser.getProperty( ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY ) ) ); 101 | URLConnection connection = url.openConnection(); 102 | connection.setRequestProperty( "Accept-Charset", "UTF-8" ); 103 | BufferedReader response = new BufferedReader( new InputStreamReader( connection.getInputStream() ) ); 104 | String line = response.readLine(); 105 | response.close(); 106 | assertEquals( "{\"Path\":\"/user/guest\"}", line ); 107 | 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-hivemetastore/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-hivemetastore 11 | 12 | 13 | 14 | 15 | 16 | 17 | org.apache.hadoop 18 | hadoop-client 19 | ${hadoop.version} 20 | 21 | 22 | 23 | 24 | org.apache.hive 25 | hive-exec 26 | ${hive.version} 27 | 28 | 29 | 30 | 31 | com.github.sakserv 32 | hadoop-mini-clusters-common 33 | ${project.version} 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-hivemetastore/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # Hive 2 | hive.scratch.dir=hive_scratch_dir 3 | hive.warehouse.dir=warehouse_dir 4 | 5 | # Hive Metastore 6 | hive.metastore.hostname=localhost 7 | hive.metastore.port=20102 8 | hive.metastore.derby.db.dir=metastore_db 9 | 10 | # Hive Test 11 | hive.test.database.name=default 12 | hive.test.table.name=test_table -------------------------------------------------------------------------------- /hadoop-mini-clusters-hivemetastore/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-hiveserver2/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-hiveserver2 11 | 12 | 13 | 14 | 15 | 16 | 17 | org.apache.hadoop 18 | hadoop-client 19 | ${hadoop.version} 20 | 21 | 22 | 23 | 24 | org.apache.hive 25 | hive-service 26 | ${hive.version} 27 | 28 | 29 | 30 | 31 | org.apache.hive 32 | hive-jdbc 33 | ${hive.version} 34 | test 35 | 36 | 37 | 38 | 39 | com.github.sakserv 40 | hadoop-mini-clusters-zookeeper 41 | ${project.version} 42 | test 43 | 44 | 45 | 46 | 47 | com.github.sakserv 48 | hadoop-mini-clusters-hivemetastore 49 | ${project.version} 50 | test 51 | 52 | 53 | 54 | 55 | com.github.sakserv 56 | hadoop-mini-clusters-common 57 | ${project.version} 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-hiveserver2/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # Zookeeper 2 | zookeeper.temp.dir=embedded_zk 3 | zookeeper.host=127.0.0.1 4 | zookeeper.port=22010 5 | zookeeper.connection.string=127.0.0.1:22010 6 | 7 | # Hive 8 | hive.scratch.dir=hive_scratch_dir 9 | hive.warehouse.dir=warehouse_dir 10 | 11 | # Hive Metastore 12 | hive.metastore.hostname=localhost 13 | hive.metastore.port=20202 14 | hive.metastore.derby.db.dir=metastore_db 15 | 16 | # Hive Server2 17 | hive.server2.hostname=localhost 18 | hive.server2.port=20203 19 | 20 | # Hive Test 21 | hive.test.database.name=default 22 | hive.test.table.name=test_table -------------------------------------------------------------------------------- /hadoop-mini-clusters-hiveserver2/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-hyperscaledb/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-hyperscaledb 11 | 12 | 13 | 14 | 15 | 16 | org.hsqldb 17 | hsqldb 18 | ${hsqldb.version} 19 | 20 | 21 | 22 | 23 | mysql 24 | mysql-connector-java 25 | ${mysql-connector-java.version} 26 | test 27 | 28 | 29 | 30 | 31 | com.github.sakserv 32 | hadoop-mini-clusters-common 33 | ${project.version} 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-hyperscaledb/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # HSQLDB 2 | hsqldb.hostname=127.0.0.1 3 | hsqldb.port=44111 4 | hsqldb.temp.dir=embedded_hsqldb 5 | hsqldb.database.name=testdb 6 | hsqldb.compatibility.mode=mysql 7 | hsqldb.jdbc.driver=org.hsqldb.jdbc.JDBCDriver 8 | hsqldb.jdbc.connection.string.prefix=jdbc:hsqldb:hsql:// -------------------------------------------------------------------------------- /hadoop-mini-clusters-hyperscaledb/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-hyperscaledb/src/test/java/com/github/sakserv/minicluster/impl/HsqldbLocalServerIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.impl; 15 | 16 | import static org.hamcrest.CoreMatchers.containsString; 17 | import static org.junit.Assert.assertThat; 18 | import static org.junit.Assert.assertTrue; 19 | 20 | import java.io.IOException; 21 | import java.sql.Connection; 22 | import java.sql.DriverManager; 23 | import java.sql.ResultSet; 24 | import java.sql.SQLException; 25 | import java.sql.Statement; 26 | 27 | import org.junit.AfterClass; 28 | import org.junit.BeforeClass; 29 | import org.junit.Test; 30 | import org.slf4j.Logger; 31 | import org.slf4j.LoggerFactory; 32 | 33 | import com.github.sakserv.minicluster.config.ConfigVars; 34 | import com.github.sakserv.propertyparser.PropertyParser; 35 | 36 | public class HsqldbLocalServerIntegrationTest { 37 | 38 | // Logger 39 | private static final Logger LOG = LoggerFactory.getLogger(HsqldbLocalServerIntegrationTest.class); 40 | 41 | // Setup the property parser 42 | private static PropertyParser propertyParser; 43 | static { 44 | try { 45 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 46 | propertyParser.parsePropsFile(); 47 | } catch(IOException e) { 48 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 49 | } 50 | } 51 | 52 | private static HsqldbLocalServer hsqldbLocalServer; 53 | 54 | @BeforeClass 55 | public static void setUp() throws Exception { 56 | hsqldbLocalServer = new HsqldbLocalServer.Builder() 57 | .setHsqldbHostName(propertyParser.getProperty(ConfigVars.HSQLDB_HOSTNAME_KEY)) 58 | .setHsqldbPort(propertyParser.getProperty(ConfigVars.HSQLDB_PORT_KEY)) 59 | .setHsqldbTempDir(propertyParser.getProperty(ConfigVars.HSQLDB_TEMP_DIR_KEY)) 60 | .setHsqldbDatabaseName(propertyParser.getProperty(ConfigVars.HSQLDB_DATABASE_NAME_KEY)) 61 | .setHsqldbCompatibilityMode(propertyParser.getProperty(ConfigVars.HSQLDB_COMPATIBILITY_MODE_KEY)) 62 | .setHsqldbJdbcDriver(propertyParser.getProperty(ConfigVars.HSQLDB_JDBC_DRIVER_KEY)) 63 | .setHsqldbJdbcConnectionStringPrefix(propertyParser.getProperty( 64 | ConfigVars.HSQLDB_JDBC_CONNECTION_STRING_PREFIX_KEY)) 65 | .build(); 66 | hsqldbLocalServer.start(); 67 | } 68 | 69 | @AfterClass 70 | public static void tearDown() throws Exception { 71 | hsqldbLocalServer.stop(); 72 | } 73 | 74 | @Test 75 | public void testHsqldbLocalServer() throws ClassNotFoundException, SQLException { 76 | 77 | LOG.info("HSQLDB: Running User: {}", System.getProperty("user.name")); 78 | 79 | LOG.info("HSQLDB: Loading the JDBC Driver: {}", propertyParser.getProperty(ConfigVars.HSQLDB_JDBC_DRIVER_KEY)); 80 | Class.forName(propertyParser.getProperty(ConfigVars.HSQLDB_JDBC_DRIVER_KEY)); 81 | 82 | // Get the connection 83 | Connection connection = DriverManager.getConnection( 84 | propertyParser.getProperty(ConfigVars.HSQLDB_JDBC_CONNECTION_STRING_PREFIX_KEY) + 85 | propertyParser.getProperty(ConfigVars.HSQLDB_HOSTNAME_KEY) + ":" + 86 | propertyParser.getProperty(ConfigVars.HSQLDB_PORT_KEY) + "/" + 87 | propertyParser.getProperty(ConfigVars.HSQLDB_DATABASE_NAME_KEY), 88 | "SA", ""); 89 | assertThat(connection.getMetaData().getURL(), 90 | containsString(propertyParser.getProperty(ConfigVars.HSQLDB_DATABASE_NAME_KEY))); 91 | } 92 | 93 | @Test 94 | public void testHsqldbMysqlCompatibilityMode() throws SQLException { 95 | Connection connection = DriverManager.getConnection( 96 | propertyParser.getProperty(ConfigVars.HSQLDB_JDBC_CONNECTION_STRING_PREFIX_KEY) + 97 | propertyParser.getProperty(ConfigVars.HSQLDB_HOSTNAME_KEY) + ":" + 98 | propertyParser.getProperty(ConfigVars.HSQLDB_PORT_KEY) + "/" + 99 | propertyParser.getProperty(ConfigVars.HSQLDB_DATABASE_NAME_KEY), 100 | "SA", ""); 101 | Statement statement = connection.createStatement(); 102 | statement.executeQuery(hsqldbLocalServer.getHsqldbCompatibilityModeStatement()); 103 | 104 | statement = connection.createStatement(); 105 | ResultSet resultSet = statement.executeQuery("SELECT PROPERTY_VALUE FROM INFORMATION_SCHEMA.SYSTEM_PROPERTIES WHERE PROPERTY_NAME = 'sql.syntax_mys'"); 106 | while(resultSet.next()) { 107 | assertTrue(Boolean.parseBoolean(resultSet.getString(1))); 108 | } 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-kafka/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-kafka 11 | 12 | 13 | 1.3.7 14 | 15 | 16 | 17 | 18 | 19 | 20 | org.apache.kafka 21 | ${kafka.artifactid.version} 22 | ${kafka.version} 23 | 24 | 25 | 26 | 27 | com.github.sakserv 28 | hadoop-mini-clusters-zookeeper 29 | ${project.version} 30 | test 31 | 32 | 33 | 34 | 35 | org.codehaus.jettison 36 | jettison 37 | ${jettison.version} 38 | test 39 | 40 | 41 | 42 | 43 | com.github.sakserv 44 | hadoop-mini-clusters-common 45 | ${project.version} 46 | 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-kafka/src/main/java/com/github/sakserv/minicluster/systemtime/LocalSystemTime.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.systemtime; 15 | 16 | import org.apache.kafka.common.utils.Time; 17 | 18 | public class LocalSystemTime implements Time { 19 | 20 | @Override 21 | public long milliseconds() { 22 | return System.currentTimeMillis(); 23 | } 24 | 25 | @Override 26 | public long nanoseconds() { 27 | return System.nanoTime(); 28 | } 29 | 30 | @Override 31 | public void sleep(long ms) { 32 | try { 33 | Thread.sleep(ms); 34 | } catch (InterruptedException e) { 35 | // no stress 36 | } 37 | } 38 | 39 | @Override 40 | public long hiResClockMs() { 41 | return System.currentTimeMillis(); 42 | } 43 | 44 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-kafka/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # Zookeeper 2 | zookeeper.temp.dir=embedded_zk 3 | zookeeper.host=127.0.0.1 4 | zookeeper.port=22010 5 | zookeeper.connection.string=127.0.0.1:22010 6 | 7 | # Kafka 8 | kafka.hostname=localhost 9 | kafka.port=20111 10 | 11 | # Kafka Test 12 | kafka.test.topic=testtopic 13 | kafka.test.message.count=10 14 | kafka.test.broker.id=0 15 | kafka.test.temp.dir=embedded_kafka -------------------------------------------------------------------------------- /hadoop-mini-clusters-kafka/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-kafka/src/test/java/com/github/sakserv/minicluster/datatime/GenerateRandomDay.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.datatime; 16 | 17 | import java.util.GregorianCalendar; 18 | 19 | public class GenerateRandomDay { 20 | 21 | public static String genRandomDay() { 22 | 23 | GregorianCalendar gc = new GregorianCalendar(); 24 | 25 | int year = randBetween(2013, 2014); 26 | 27 | gc.set(gc.YEAR, year); 28 | 29 | int dayOfYear = randBetween(1, gc.getActualMaximum(gc.DAY_OF_YEAR)); 30 | 31 | gc.set(gc.DAY_OF_YEAR, dayOfYear); 32 | 33 | return String.format("%04d-%02d-%02d", gc.get(gc.YEAR), gc.get(gc.MONTH), gc.get(gc.DAY_OF_MONTH)); 34 | 35 | } 36 | 37 | public static int randBetween(int start, int end) { 38 | return start + (int)Math.round(Math.random() * (end - start)); 39 | } 40 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-kafka/src/test/java/com/github/sakserv/minicluster/impl/KafkaLocalBrokerIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.impl; 16 | 17 | import java.io.IOException; 18 | import java.util.ArrayList; 19 | import java.util.List; 20 | import java.util.Properties; 21 | 22 | import org.junit.AfterClass; 23 | import org.junit.Assert; 24 | import org.junit.BeforeClass; 25 | import org.junit.Test; 26 | import org.slf4j.Logger; 27 | import org.slf4j.LoggerFactory; 28 | 29 | import com.github.sakserv.minicluster.config.ConfigVars; 30 | import com.github.sakserv.minicluster.kafka.consumer.KafkaTestConsumer; 31 | import com.github.sakserv.minicluster.kafka.producer.KafkaSimpleTestProducer; 32 | import com.github.sakserv.propertyparser.PropertyParser; 33 | 34 | public class KafkaLocalBrokerIntegrationTest { 35 | 36 | // Logger 37 | private static final Logger LOG = LoggerFactory.getLogger(KafkaLocalBrokerIntegrationTest.class); 38 | 39 | // Setup the property parser 40 | private static PropertyParser propertyParser; 41 | static { 42 | try { 43 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 44 | propertyParser.parsePropsFile(); 45 | } catch(IOException e) { 46 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 47 | } 48 | } 49 | 50 | private static ZookeeperLocalCluster zookeeperLocalCluster; 51 | private static KafkaLocalBroker kafkaLocalBroker; 52 | 53 | @BeforeClass 54 | public static void setUp() throws Exception { 55 | zookeeperLocalCluster = new ZookeeperLocalCluster.Builder() 56 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) 57 | .setTempDir(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY)) 58 | .setZookeeperConnectionString(propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY)) 59 | .build(); 60 | zookeeperLocalCluster.start(); 61 | 62 | kafkaLocalBroker = new KafkaLocalBroker.Builder() 63 | .setKafkaHostname(propertyParser.getProperty(ConfigVars.KAFKA_HOSTNAME_KEY)) 64 | .setKafkaPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_PORT_KEY))) 65 | .setKafkaBrokerId(Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_TEST_BROKER_ID_KEY))) 66 | .setKafkaProperties(new Properties()) 67 | .setKafkaTempDir(propertyParser.getProperty(ConfigVars.KAFKA_TEST_TEMP_DIR_KEY)) 68 | .setZookeeperConnectionString(propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY)) 69 | .build(); 70 | kafkaLocalBroker.start(); 71 | 72 | } 73 | 74 | @AfterClass 75 | public static void tearDown() throws Exception { 76 | 77 | kafkaLocalBroker.stop(); 78 | zookeeperLocalCluster.stop(); 79 | } 80 | 81 | @Test 82 | public void testKafkaLocalBroker() throws Exception { 83 | 84 | // Producer 85 | KafkaSimpleTestProducer kafkaTestProducer = new KafkaSimpleTestProducer.Builder() 86 | .setKafkaHostname(propertyParser.getProperty(ConfigVars.KAFKA_HOSTNAME_KEY)) 87 | .setKafkaPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_PORT_KEY))) 88 | .setTopic(propertyParser.getProperty(ConfigVars.KAFKA_TEST_TOPIC_KEY)) 89 | .setMessageCount(Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY))) 90 | .build(); 91 | kafkaTestProducer.produceMessages(); 92 | 93 | // Consumer 94 | List seeds = new ArrayList(); 95 | seeds.add(kafkaLocalBroker.getKafkaHostname()); 96 | KafkaTestConsumer kafkaTestConsumer = new KafkaTestConsumer(); 97 | kafkaTestConsumer.consumeMessages( 98 | Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY)), 99 | propertyParser.getProperty(ConfigVars.KAFKA_TEST_TOPIC_KEY), 100 | 0, 101 | seeds, 102 | kafkaLocalBroker.getKafkaPort()); 103 | 104 | 105 | 106 | // Assert num of messages produced = num of message consumed 107 | Assert.assertEquals(Long.parseLong(propertyParser.getProperty(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY)), 108 | kafkaTestConsumer.getNumRead()); 109 | 110 | } 111 | 112 | } 113 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-kafka/src/test/java/com/github/sakserv/minicluster/kafka/producer/KafkaSimpleTestProducer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.kafka.producer; 15 | 16 | import java.util.HashMap; 17 | import java.util.Map; 18 | 19 | import org.apache.kafka.clients.producer.KafkaProducer; 20 | import org.apache.kafka.clients.producer.ProducerConfig; 21 | import org.apache.kafka.clients.producer.ProducerRecord; 22 | import org.codehaus.jettison.json.JSONException; 23 | import org.codehaus.jettison.json.JSONObject; 24 | import org.slf4j.Logger; 25 | import org.slf4j.LoggerFactory; 26 | 27 | import com.github.sakserv.minicluster.datatime.GenerateRandomDay; 28 | 29 | public class KafkaSimpleTestProducer { 30 | 31 | // Logger 32 | private static final Logger LOG = LoggerFactory.getLogger(KafkaSimpleTestProducer.class); 33 | 34 | private String kafkaHostname; 35 | private Integer kafkaPort; 36 | private String topic; 37 | private Integer messageCount; 38 | 39 | private KafkaSimpleTestProducer(Builder builder) { 40 | this.kafkaHostname = builder.kafkaHostname; 41 | this.kafkaPort = builder.kafkaPort; 42 | this.topic = builder.topic; 43 | this.messageCount = builder.messageCount; 44 | } 45 | 46 | public String getKafkaHostname() { 47 | return kafkaHostname; 48 | } 49 | 50 | public Integer getKafkaPort() { 51 | return kafkaPort; 52 | } 53 | 54 | public String getTopic() { 55 | return topic; 56 | } 57 | 58 | public Integer getMessageCount() { 59 | return messageCount; 60 | } 61 | 62 | public static class Builder { 63 | private String kafkaHostname; 64 | private Integer kafkaPort; 65 | private String topic; 66 | private Integer messageCount; 67 | 68 | public Builder setKafkaHostname(String kafkaHostname) { 69 | this.kafkaHostname = kafkaHostname; 70 | return this; 71 | } 72 | 73 | public Builder setKafkaPort(Integer kafkaPort) { 74 | this.kafkaPort = kafkaPort; 75 | return this; 76 | } 77 | 78 | public Builder setTopic(String topic) { 79 | this.topic = topic; 80 | return this; 81 | } 82 | 83 | public Builder setMessageCount(Integer messageCount) { 84 | this.messageCount = messageCount; 85 | return this; 86 | } 87 | 88 | public KafkaSimpleTestProducer build() { 89 | KafkaSimpleTestProducer kafkaSimpleTestProducer = new KafkaSimpleTestProducer(this); 90 | return kafkaSimpleTestProducer; 91 | } 92 | 93 | } 94 | 95 | public Map createConfig() { 96 | Map config = new HashMap(); 97 | config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaHostname() + ":" + getKafkaPort()); 98 | config.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 99 | config.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 100 | return config; 101 | } 102 | 103 | public void produceMessages() { 104 | 105 | KafkaProducer producer = new KafkaProducer(createConfig()); 106 | 107 | int count = 0; 108 | while(count < getMessageCount()) { 109 | 110 | // Create the JSON object 111 | JSONObject obj = new JSONObject(); 112 | try { 113 | obj.put("id", String.valueOf(count)); 114 | obj.put("msg", "test-message" + 1); 115 | obj.put("dt", GenerateRandomDay.genRandomDay()); 116 | } catch(JSONException e) { 117 | e.printStackTrace(); 118 | } 119 | String payload = obj.toString(); 120 | 121 | producer.send(new ProducerRecord(getTopic(), payload)); 122 | LOG.info("Sent message: {}", payload.toString()); 123 | count++; 124 | } 125 | } 126 | 127 | } 128 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-kdc/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-kdc 11 | 12 | 13 | 14 | 15 | 16 | org.apache.hadoop 17 | hadoop-minikdc 18 | ${hadoop.version} 19 | 20 | 21 | org.apache.directory.jdbm 22 | apacheds-jdbm1 23 | 24 | 25 | 26 | 27 | org.apache.hadoop 28 | hadoop-common 29 | ${hadoop.version} 30 | tests 31 | 32 | 33 | org.apache.directory.server 34 | apacheds-jdbm 35 | 2.0.0-M5 36 | 37 | 38 | 39 | 40 | com.github.sakserv 41 | hadoop-mini-clusters-common 42 | ${project.version} 43 | 44 | 45 | 46 | 47 | com.github.sakserv 48 | hadoop-mini-clusters-hdfs 49 | ${project.version} 50 | test 51 | 52 | 53 | 54 | 55 | com.github.sakserv 56 | hadoop-mini-clusters-hbase 57 | ${project.version} 58 | test 59 | 60 | 61 | com.github.sakserv 62 | hadoop-mini-clusters-zookeeper 63 | ${project.version} 64 | test 65 | 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-kdc/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # KDC 2 | kdc.host=127.0.0.1 3 | kdc.port=34340 4 | kdc.basedir=embedded_kdc 5 | kdc.org.domain=ORG 6 | kdc.org.name=ACME 7 | kdc.principals=hdfs,hbase,yarn,oozie,oozie_user,zookeeper,storm,mapreduce,HTTP 8 | kdc.krbinstance=127.0.0.1 9 | kdc.instance=DefaultKrbServer 10 | kdc.transport=TCP 11 | kdc.max.ticket.lifetime=86400000 12 | kdc.max.renewable.lifetime=604800000 13 | kdc.debug=false 14 | 15 | # HDFS 16 | hdfs.namenode.port=20112 17 | hdfs.namenode.http.port=50070 18 | hdfs.temp.dir=embedded_hdfs 19 | hdfs.num.datanodes=1 20 | hdfs.enable.permissions=false 21 | hdfs.format=true 22 | hdfs.enable.running.user.as.proxy.user=true 23 | 24 | # HDFS Test 25 | hdfs.test.file=/tmp/testing 26 | hdfs.test.string=TESTING 27 | 28 | # Zookeeper 29 | zookeeper.temp.dir=embedded_zk 30 | zookeeper.host=127.0.0.1 31 | zookeeper.port=22010 32 | zookeeper.connection.string=127.0.0.1:22010 33 | 34 | # HBase 35 | hbase.master.port=25111 36 | hbase.master.info.port=-1 37 | hbase.num.region.servers=1 38 | hbase.root.dir=embedded_hbase 39 | hbase.znode.parent=/hbase-secure 40 | hbase.wal.replication.enabled=false -------------------------------------------------------------------------------- /hadoop-mini-clusters-kdc/src/test/java/com/github/sakserv/minicluster/impl/KdcLocalClusterHdfsIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.impl; 16 | 17 | import com.github.sakserv.minicluster.config.ConfigVars; 18 | import com.github.sakserv.propertyparser.PropertyParser; 19 | import org.apache.hadoop.conf.Configuration; 20 | import org.apache.hadoop.fs.FSDataInputStream; 21 | import org.apache.hadoop.fs.FSDataOutputStream; 22 | import org.apache.hadoop.fs.FileSystem; 23 | import org.apache.hadoop.fs.Path; 24 | import org.apache.hadoop.hdfs.HdfsConfiguration; 25 | import org.apache.hadoop.security.AccessControlException; 26 | import org.apache.hadoop.security.UserGroupInformation; 27 | import org.junit.AfterClass; 28 | import org.junit.BeforeClass; 29 | import org.junit.Test; 30 | import org.slf4j.Logger; 31 | import org.slf4j.LoggerFactory; 32 | 33 | import java.io.IOException; 34 | 35 | import static org.junit.Assert.*; 36 | 37 | public class KdcLocalClusterHdfsIntegrationTest { 38 | 39 | // Logger 40 | private static final Logger LOG = LoggerFactory.getLogger(KdcLocalClusterHdfsIntegrationTest.class); 41 | 42 | // Setup the property parser 43 | private static PropertyParser propertyParser; 44 | 45 | static { 46 | try { 47 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 48 | propertyParser.parsePropsFile(); 49 | } catch (IOException e) { 50 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 51 | } 52 | } 53 | 54 | private static KdcLocalCluster kdcLocalCluster; 55 | private static HdfsLocalCluster hdfsLocalCluster; 56 | 57 | @BeforeClass 58 | public static void setUp() throws Exception { 59 | 60 | //System.setProperty("sun.security.krb5.debug", "true"); 61 | 62 | // KDC 63 | kdcLocalCluster = new KdcLocalCluster.Builder() 64 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_PORT_KEY))) 65 | .setHost(propertyParser.getProperty(ConfigVars.KDC_HOST_KEY)) 66 | .setBaseDir(propertyParser.getProperty(ConfigVars.KDC_BASEDIR_KEY)) 67 | .setOrgDomain(propertyParser.getProperty(ConfigVars.KDC_ORG_DOMAIN_KEY)) 68 | .setOrgName(propertyParser.getProperty(ConfigVars.KDC_ORG_NAME_KEY)) 69 | .setPrincipals(propertyParser.getProperty(ConfigVars.KDC_PRINCIPALS_KEY).split(",")) 70 | .setKrbInstance(propertyParser.getProperty(ConfigVars.KDC_KRBINSTANCE_KEY)) 71 | .setInstance(propertyParser.getProperty(ConfigVars.KDC_INSTANCE_KEY)) 72 | .setTransport(propertyParser.getProperty(ConfigVars.KDC_TRANSPORT)) 73 | .setMaxTicketLifetime(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_MAX_TICKET_LIFETIME_KEY))) 74 | .setMaxRenewableLifetime(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_MAX_RENEWABLE_LIFETIME))) 75 | .setDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.KDC_DEBUG))) 76 | .build(); 77 | kdcLocalCluster.start(); 78 | 79 | Configuration baseConf = kdcLocalCluster.getBaseConf(); 80 | 81 | //HDFS 82 | Configuration hdfsConfig = new HdfsConfiguration(); 83 | hdfsConfig.addResource(baseConf); 84 | hdfsLocalCluster = new HdfsLocalCluster.Builder() 85 | .setHdfsNamenodePort(Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NAMENODE_PORT_KEY))) 86 | .setHdfsNamenodeHttpPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY))) 87 | .setHdfsTempDir(propertyParser.getProperty(ConfigVars.HDFS_TEMP_DIR_KEY)) 88 | .setHdfsNumDatanodes(Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NUM_DATANODES_KEY))) 89 | .setHdfsEnablePermissions( 90 | Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.HDFS_ENABLE_PERMISSIONS_KEY))) 91 | .setHdfsFormat(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.HDFS_FORMAT_KEY))) 92 | .setHdfsEnableRunningUserAsProxyUser(Boolean.parseBoolean( 93 | propertyParser.getProperty(ConfigVars.HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER))) 94 | .setHdfsConfig(hdfsConfig) 95 | .build(); 96 | hdfsLocalCluster.start(); 97 | } 98 | 99 | @AfterClass 100 | public static void tearDown() throws Exception { 101 | hdfsLocalCluster.stop(); 102 | kdcLocalCluster.stop(); 103 | } 104 | 105 | @Test 106 | public void testHdfs() throws Exception { 107 | FileSystem hdfsFsHandle = hdfsLocalCluster.getHdfsFileSystemHandle(); 108 | 109 | UserGroupInformation.loginUserFromKeytab(kdcLocalCluster.getKrbPrincipalWithRealm("hdfs"), kdcLocalCluster.getKeytabForPrincipal("hdfs")); 110 | 111 | assertTrue(UserGroupInformation.isSecurityEnabled()); 112 | assertTrue(UserGroupInformation.isLoginKeytabBased()); 113 | 114 | // Write a file to HDFS containing the test string 115 | FSDataOutputStream writer = hdfsFsHandle.create( 116 | new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY))); 117 | writer.writeUTF(propertyParser.getProperty(ConfigVars.HDFS_TEST_STRING_KEY)); 118 | writer.close(); 119 | 120 | // Read the file and compare to test string 121 | FSDataInputStream reader = hdfsFsHandle.open( 122 | new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY))); 123 | assertEquals(reader.readUTF(), propertyParser.getProperty(ConfigVars.HDFS_TEST_STRING_KEY)); 124 | reader.close(); 125 | 126 | // Log out 127 | UserGroupInformation.getLoginUser().logoutUserFromKeytab(); 128 | 129 | UserGroupInformation.reset(); 130 | 131 | try { 132 | Configuration conf = new Configuration(); 133 | UserGroupInformation.setConfiguration(conf); 134 | FileSystem.get(hdfsFsHandle.getUri(), conf).open( 135 | new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY))); 136 | fail(); 137 | } catch (AccessControlException e) { 138 | LOG.info("Not authenticated!"); 139 | } 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-kdc/src/test/java/com/github/sakserv/minicluster/impl/KdcLocalClusterZookeeperIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.impl; 16 | 17 | import com.github.sakserv.minicluster.auth.Jaas; 18 | import com.github.sakserv.minicluster.config.ConfigVars; 19 | import com.github.sakserv.minicluster.util.FileUtils; 20 | import com.github.sakserv.propertyparser.PropertyParser; 21 | import org.apache.curator.framework.CuratorFramework; 22 | import org.apache.curator.framework.CuratorFrameworkFactory; 23 | import org.apache.curator.retry.ExponentialBackoffRetry; 24 | import org.apache.zookeeper.CreateMode; 25 | import org.apache.zookeeper.KeeperException; 26 | import org.apache.zookeeper.ZooDefs; 27 | import org.apache.zookeeper.data.ACL; 28 | import org.junit.AfterClass; 29 | import org.junit.BeforeClass; 30 | import org.junit.Test; 31 | import org.slf4j.Logger; 32 | import org.slf4j.LoggerFactory; 33 | 34 | import java.io.IOException; 35 | import java.util.ArrayList; 36 | import java.util.HashMap; 37 | import java.util.List; 38 | import java.util.Map; 39 | 40 | import static org.junit.Assert.fail; 41 | 42 | public class KdcLocalClusterZookeeperIntegrationTest { 43 | 44 | // Logger 45 | private static final Logger LOG = LoggerFactory.getLogger(KdcLocalClusterZookeeperIntegrationTest.class); 46 | 47 | // Setup the property parser 48 | private static PropertyParser propertyParser; 49 | 50 | static { 51 | try { 52 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 53 | propertyParser.parsePropsFile(); 54 | } catch (IOException e) { 55 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 56 | } 57 | } 58 | 59 | private static KdcLocalCluster kdcLocalCluster; 60 | private static ZookeeperLocalCluster zookeeperLocalCluster; 61 | 62 | @BeforeClass 63 | public static void setUp() throws Exception { 64 | 65 | //System.setProperty("sun.security.krb5.debug", "true"); 66 | 67 | // Force clean 68 | FileUtils.deleteFolder(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY)); 69 | 70 | // KDC 71 | kdcLocalCluster = new KdcLocalCluster.Builder() 72 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_PORT_KEY))) 73 | .setHost(propertyParser.getProperty(ConfigVars.KDC_HOST_KEY)) 74 | .setBaseDir(propertyParser.getProperty(ConfigVars.KDC_BASEDIR_KEY)) 75 | .setOrgDomain(propertyParser.getProperty(ConfigVars.KDC_ORG_DOMAIN_KEY)) 76 | .setOrgName(propertyParser.getProperty(ConfigVars.KDC_ORG_NAME_KEY)) 77 | .setPrincipals(propertyParser.getProperty(ConfigVars.KDC_PRINCIPALS_KEY).split(",")) 78 | .setKrbInstance(propertyParser.getProperty(ConfigVars.KDC_KRBINSTANCE_KEY)) 79 | .setInstance(propertyParser.getProperty(ConfigVars.KDC_INSTANCE_KEY)) 80 | .setTransport(propertyParser.getProperty(ConfigVars.KDC_TRANSPORT)) 81 | .setMaxTicketLifetime(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_MAX_TICKET_LIFETIME_KEY))) 82 | .setMaxRenewableLifetime(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_MAX_RENEWABLE_LIFETIME))) 83 | .setDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.KDC_DEBUG))) 84 | .build(); 85 | kdcLocalCluster.start(); 86 | 87 | // Zookeeper 88 | Jaas jaas = new Jaas() 89 | .addServiceEntry("Server", kdcLocalCluster.getKrbPrincipal("zookeeper"), kdcLocalCluster.getKeytabForPrincipal("zookeeper"), "zookeeper"); 90 | javax.security.auth.login.Configuration.setConfiguration(jaas); 91 | 92 | Map properties = new HashMap<>(); 93 | properties.put("authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider"); 94 | properties.put("requireClientAuthScheme", "sasl"); 95 | properties.put("sasl.serverconfig", "Server"); 96 | properties.put("kerberos.removeHostFromPrincipal", "true"); 97 | properties.put("kerberos.removeRealmFromPrincipal", "true"); 98 | 99 | zookeeperLocalCluster = new ZookeeperLocalCluster.Builder() 100 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) 101 | .setTempDir(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY)) 102 | .setZookeeperConnectionString(propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY)) 103 | .setCustomProperties(properties) 104 | .build(); 105 | zookeeperLocalCluster.start(); 106 | } 107 | 108 | @AfterClass 109 | public static void tearDown() throws Exception { 110 | zookeeperLocalCluster.stop(); 111 | kdcLocalCluster.stop(); 112 | } 113 | 114 | @Test 115 | public void testZookeeper() throws Exception { 116 | 117 | try (CuratorFramework client = CuratorFrameworkFactory.newClient(zookeeperLocalCluster.getZookeeperConnectionString(), 118 | new ExponentialBackoffRetry(1000, 3))) { 119 | client.start(); 120 | client.getChildren().forPath("/"); 121 | fail(); 122 | } catch (KeeperException.AuthFailedException e) { 123 | LOG.debug("Not authenticated!"); 124 | } 125 | 126 | System.setProperty("zookeeper.sasl.client", "true"); 127 | System.setProperty("zookeeper.sasl.clientconfig", "Client"); 128 | javax.security.auth.login.Configuration.setConfiguration(new Jaas() 129 | .addEntry("Client", kdcLocalCluster.getKrbPrincipalWithRealm("guest"), kdcLocalCluster.getKeytabForPrincipal("guest"))); 130 | 131 | try (CuratorFramework client = CuratorFrameworkFactory.newClient(zookeeperLocalCluster.getZookeeperConnectionString(), 132 | new ExponentialBackoffRetry(1000, 3))) { 133 | client.start(); 134 | client.getChildren().forPath("/").forEach(LOG::debug); 135 | 136 | List perms = new ArrayList<>(); 137 | perms.add(new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.AUTH_IDS)); 138 | perms.add(new ACL(ZooDefs.Perms.READ, ZooDefs.Ids.ANYONE_ID_UNSAFE)); 139 | 140 | client.create().withMode(CreateMode.PERSISTENT).withACL(perms).forPath(propertyParser.getProperty(ConfigVars.HBASE_ZNODE_PARENT_KEY)); 141 | } 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-knox/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-knox 11 | 12 | 13 | 14 | 15 | 16 | org.apache.knox 17 | gateway-server 18 | ${knox.version} 19 | 20 | 21 | org.apache.knox 22 | gateway-provider-rewrite 23 | ${knox.version} 24 | 25 | 26 | org.apache.knox 27 | gateway-provider-rewrite-step-secure-query 28 | ${knox.version} 29 | 30 | 31 | org.apache.knox 32 | gateway-provider-rewrite-step-encrypt-uri 33 | ${knox.version} 34 | 35 | 36 | org.apache.knox 37 | gateway-provider-rewrite-func-hostmap-static 38 | ${knox.version} 39 | 40 | 41 | org.apache.knox 42 | gateway-provider-rewrite-func-service-registry 43 | ${knox.version} 44 | 45 | 46 | org.apache.knox 47 | gateway-service-definitions 48 | ${knox.version} 49 | 50 | 51 | org.apache.knox 52 | gateway-test 53 | ${knox.version} 54 | 55 | 56 | com.mycila.xmltool 57 | xmltool 58 | 3.3 59 | 60 | 61 | 62 | 63 | com.github.sakserv 64 | hadoop-mini-clusters-common 65 | ${project.version} 66 | 67 | 68 | 70 | 71 | org.apache.httpcomponents 72 | httpclient 73 | ${httpclient.version} 74 | test 75 | 76 | 77 | 78 | 79 | com.github.sakserv 80 | hadoop-mini-clusters-hdfs 81 | ${project.version} 82 | test 83 | 84 | 85 | 86 | 87 | com.github.sakserv 88 | hadoop-mini-clusters-hbase 89 | ${project.version} 90 | test 91 | 92 | 93 | com.github.sakserv 94 | hadoop-mini-clusters-zookeeper 95 | ${project.version} 96 | test 97 | 98 | 99 | 100 | 101 | org.apache.knox 102 | gateway-shell 103 | ${knox.version} 104 | test 105 | 106 | 107 | org.apache.knox 108 | gateway-provider-security-authc-anon 109 | ${knox.version} 110 | test 111 | 112 | 113 | org.apache.knox 114 | gateway-provider-identity-assertion-pseudo 115 | ${knox.version} 116 | test 117 | 118 | 119 | org.apache.knox 120 | gateway-service-webhdfs 121 | ${knox.version} 122 | test 123 | 124 | 125 | org.apache.knox 126 | gateway-service-hbase 127 | ${knox.version} 128 | test 129 | 130 | 131 | 132 | 133 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-knox/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # HDFS 2 | hdfs.namenode.port=20112 3 | hdfs.namenode.http.port=50070 4 | hdfs.temp.dir=embedded_hdfs 5 | hdfs.num.datanodes=1 6 | hdfs.enable.permissions=false 7 | hdfs.format=true 8 | hdfs.enable.running.user.as.proxy.user=true 9 | 10 | # HDFS Test 11 | hdfs.test.file=/tmp/testing 12 | hdfs.test.string=TESTING 13 | 14 | # Zookeeper 15 | zookeeper.temp.dir=embedded_zk 16 | zookeeper.host=127.0.0.1 17 | zookeeper.port=22010 18 | zookeeper.connection.string=127.0.0.1:22010 19 | 20 | # HBase 21 | hbase.master.port=25111 22 | hbase.master.info.port=-1 23 | hbase.num.region.servers=1 24 | hbase.root.dir=embedded_hbase 25 | hbase.znode.parent=/hbase-unsecure 26 | hbase.wal.replication.enabled=false 27 | 28 | # HBase REST 29 | hbase.rest.port=28000 30 | hbase.rest.readonly=false 31 | hbase.rest.info.port=28080 32 | hbase.rest.host=0.0.0.0 33 | hbase.rest.threads.max=100 34 | hbase.rest.threads.min=2 35 | 36 | # HBase Test 37 | hbase.test.table.name=hbase_test_table 38 | hbase.test.col.family.name=cf1 39 | hbase.test.col.qualifier.name=cq1 40 | hbase.test.num.rows.to.put=50 41 | 42 | # KNOX 43 | knox.host=localhost 44 | knox.port=8888 45 | knox.path=gateway 46 | knox.cluster=mycluster 47 | knox.home.dir=embedded_knox -------------------------------------------------------------------------------- /hadoop-mini-clusters-knox/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n 9 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-knox/src/test/java/com/github/sakserv/minicluster/impl/KnoxLocalClusterTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.impl; 16 | 17 | import com.github.sakserv.minicluster.config.ConfigVars; 18 | import com.github.sakserv.propertyparser.PropertyParser; 19 | import com.mycila.xmltool.XMLDoc; 20 | import org.junit.BeforeClass; 21 | import org.junit.Rule; 22 | import org.junit.Test; 23 | import org.junit.rules.ExpectedException; 24 | import org.slf4j.Logger; 25 | import org.slf4j.LoggerFactory; 26 | 27 | import java.io.IOException; 28 | 29 | import static org.junit.Assert.assertEquals; 30 | 31 | /** 32 | * @author Vincent Devillers 33 | */ 34 | public class KnoxLocalClusterTest { 35 | 36 | // Logger 37 | private static final Logger LOG = LoggerFactory.getLogger(KnoxLocalClusterTest.class); 38 | 39 | // Setup the property parser 40 | private static PropertyParser propertyParser; 41 | 42 | static { 43 | try { 44 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 45 | propertyParser.parsePropsFile(); 46 | } catch (IOException e) { 47 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 48 | } 49 | } 50 | 51 | @Rule 52 | public ExpectedException exception = ExpectedException.none(); 53 | 54 | private static KnoxLocalCluster knoxLocalCluster; 55 | 56 | @BeforeClass 57 | public static void setUp() { 58 | knoxLocalCluster = new KnoxLocalCluster.Builder() 59 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KNOX_PORT_KEY))) 60 | .setPath(propertyParser.getProperty(ConfigVars.KNOX_PATH_KEY)) 61 | .setHomeDir(propertyParser.getProperty(ConfigVars.KNOX_HOME_DIR_KEY)) 62 | .setCluster(propertyParser.getProperty(ConfigVars.KNOX_CLUSTER_KEY)) 63 | .setTopology(XMLDoc.newDocument(true) 64 | .addRoot("topology") 65 | .addTag("service") 66 | .addTag("role").addText("WEBHDFS") 67 | .addTag("url").addText("http://localhost:20112/webhdfs") 68 | .gotoRoot().toString()) 69 | .build(); 70 | } 71 | 72 | @Test 73 | public void testKnoxPort() { 74 | assertEquals(Integer.parseInt(propertyParser.getProperty(ConfigVars.KNOX_PORT_KEY)), 75 | (int) knoxLocalCluster.getPort()); 76 | } 77 | 78 | @Test 79 | public void testMissingKnoxPort() { 80 | exception.expect(IllegalArgumentException.class); 81 | knoxLocalCluster = new KnoxLocalCluster.Builder() 82 | .setHomeDir(propertyParser.getProperty(ConfigVars.KNOX_HOME_DIR_KEY)) 83 | .build(); 84 | } 85 | 86 | @Test 87 | public void testKnoxPath() { 88 | assertEquals(propertyParser.getProperty(ConfigVars.KNOX_PATH_KEY), 89 | knoxLocalCluster.getPath()); 90 | } 91 | 92 | @Test 93 | public void testMissingKnoxPath() { 94 | exception.expect(IllegalArgumentException.class); 95 | knoxLocalCluster = new KnoxLocalCluster.Builder() 96 | .setHomeDir(propertyParser.getProperty(ConfigVars.KNOX_HOME_DIR_KEY)) 97 | .build(); 98 | } 99 | 100 | @Test 101 | public void testKnoxTempDir() { 102 | assertEquals(propertyParser.getProperty(ConfigVars.KNOX_HOME_DIR_KEY), 103 | knoxLocalCluster.getHomeDir()); 104 | } 105 | 106 | @Test 107 | public void testMissingKnoxTempDir() { 108 | exception.expect(IllegalArgumentException.class); 109 | knoxLocalCluster = new KnoxLocalCluster.Builder() 110 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KNOX_PORT_KEY))) 111 | .build(); 112 | } 113 | 114 | @Test 115 | public void testKnoxCluster() { 116 | assertEquals(propertyParser.getProperty(ConfigVars.KNOX_CLUSTER_KEY), 117 | knoxLocalCluster.getCluster()); 118 | } 119 | 120 | @Test 121 | public void testMissingKnoxCluster() { 122 | exception.expect(IllegalArgumentException.class); 123 | knoxLocalCluster = new KnoxLocalCluster.Builder() 124 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KNOX_CLUSTER_KEY))) 125 | .build(); 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-mapreduce/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-mapreduce 11 | 12 | 13 | 14 | 15 | 16 | org.apache.hadoop 17 | hadoop-client 18 | ${hadoop.version} 19 | 20 | 21 | 22 | 23 | org.apache.hadoop 24 | hadoop-minicluster 25 | ${hadoop.version} 26 | 27 | 28 | 29 | 30 | com.github.sakserv 31 | hadoop-mini-clusters-hdfs 32 | ${project.version} 33 | test 34 | 35 | 36 | 37 | 38 | com.github.sakserv 39 | hadoop-mini-clusters-common 40 | ${project.version} 41 | 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-mapreduce/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # HDFS 2 | hdfs.namenode.port=20112 3 | hdfs.temp.dir=embedded_hdfs 4 | hdfs.num.datanodes=1 5 | hdfs.enable.permissions=false 6 | hdfs.format=true 7 | hdfs.enable.running.user.as.proxy.user=true 8 | 9 | # YARN 10 | yarn.num.node.managers=1 11 | yarn.num.local.dirs=1 12 | yarn.num.log.dirs=1 13 | yarn.resource.manager.address=localhost:37001 14 | yarn.resource.manager.hostname=localhost 15 | yarn.resource.manager.scheduler.address=localhost:37002 16 | yarn.resource.manager.resource.tracker.address=localhost:37003 17 | yarn.resource.manager.webapp.address=localhost:37004 18 | yarn.use.in.jvm.container.executor=false 19 | 20 | # MR 21 | mr.job.history.address=localhost:37005 22 | 23 | # MR Test 24 | mr.test.data.filename=mr_input.txt 25 | mr.test.data.hdfs.input.dir=/tmp/mr_input 26 | mr.test.data.hdfs.output.dir=/tmp/mr_output -------------------------------------------------------------------------------- /hadoop-mini-clusters-mapreduce/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-mapreduce/src/test/java/com/github/sakserv/minicluster/mapreduce/Driver.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.mapreduce; 15 | 16 | import org.apache.hadoop.conf.Configuration; 17 | import org.apache.hadoop.fs.Path; 18 | import org.apache.hadoop.io.IntWritable; 19 | import org.apache.hadoop.io.Text; 20 | import org.apache.hadoop.mapreduce.Job; 21 | import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; 22 | import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; 23 | import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; 24 | import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; 25 | 26 | public class Driver { 27 | 28 | private static Configuration configuration; 29 | 30 | public Configuration getConfiguration() { 31 | return configuration; 32 | } 33 | 34 | public void setConfiguration(Configuration configuration) { 35 | this.configuration = configuration; 36 | } 37 | 38 | public static void main(String[] args) throws Exception { 39 | 40 | if (args.length != 2) { 41 | System.out.println("usage: [input] [output]"); 42 | System.exit(-1); 43 | } 44 | 45 | if (null == configuration) { 46 | configuration = new Configuration(); 47 | } 48 | 49 | Job job = Job.getInstance(configuration); 50 | job.setOutputKeyClass(Text.class); 51 | job.setOutputValueClass(IntWritable.class); 52 | 53 | job.setMapperClass(WordMapper.class); 54 | job.setReducerClass(SumReducer.class); 55 | 56 | job.setInputFormatClass(TextInputFormat.class); 57 | job.setOutputFormatClass(TextOutputFormat.class); 58 | 59 | FileInputFormat.setInputPaths(job, new Path(args[0])); 60 | FileOutputFormat.setOutputPath(job, new Path(args[1])); 61 | 62 | job.setJarByClass(Driver.class); 63 | 64 | job.waitForCompletion(true); 65 | 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-mapreduce/src/test/java/com/github/sakserv/minicluster/mapreduce/SumReducer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.mapreduce; 15 | 16 | import java.io.IOException; 17 | import java.util.Iterator; 18 | 19 | import org.apache.hadoop.io.IntWritable; 20 | import org.apache.hadoop.io.Text; 21 | import org.apache.hadoop.mapreduce.Reducer; 22 | 23 | 24 | public class SumReducer extends Reducer { 25 | 26 | private IntWritable totalWordCount = new IntWritable(); 27 | 28 | @Override 29 | public void reduce(Text key, Iterable values, Context context) 30 | throws IOException, InterruptedException { 31 | int wordCount = 0; 32 | Iterator it=values.iterator(); 33 | while (it.hasNext()) { 34 | wordCount += it.next().get(); 35 | } 36 | totalWordCount.set(wordCount); 37 | context.write(key, totalWordCount); 38 | } 39 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-mapreduce/src/test/java/com/github/sakserv/minicluster/mapreduce/WordMapper.java: -------------------------------------------------------------------------------- 1 | package com.github.sakserv.minicluster.mapreduce; 2 | 3 | import java.io.IOException; 4 | import java.util.StringTokenizer; 5 | 6 | import org.apache.hadoop.io.IntWritable; 7 | import org.apache.hadoop.io.Text; 8 | import org.apache.hadoop.mapreduce.Mapper; 9 | 10 | public class WordMapper extends Mapper { 11 | private Text word = new Text(); 12 | private final static IntWritable one = new IntWritable(1); 13 | 14 | @Override 15 | public void map(Object key, Text value, 16 | Context contex) throws IOException, InterruptedException { 17 | // Break line into words for processing 18 | StringTokenizer wordList = new StringTokenizer(value.toString()); 19 | while (wordList.hasMoreTokens()) { 20 | word.set(wordList.nextToken()); 21 | contex.write(word, one); 22 | } 23 | } 24 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-mapreduce/src/test/resources/mr_input.txt: -------------------------------------------------------------------------------- 1 | This file has exactly ten words in it. Yep, ten. -------------------------------------------------------------------------------- /hadoop-mini-clusters-mongodb/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-mongodb 11 | 12 | 13 | 14 | 15 | 16 | de.flapdoodle.embed 17 | de.flapdoodle.embed.mongo 18 | ${embedded-mongo.version} 19 | 20 | 21 | 22 | 23 | org.mongodb 24 | mongo-java-driver 25 | ${mongo-java-driver.version} 26 | test 27 | 28 | 29 | 30 | 31 | com.github.sakserv 32 | hadoop-mini-clusters-common 33 | ${project.version} 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-mongodb/src/main/java/com/github/sakserv/minicluster/impl/MongodbLocalServer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.impl; 16 | 17 | import org.slf4j.Logger; 18 | import org.slf4j.LoggerFactory; 19 | 20 | import com.github.sakserv.minicluster.MiniCluster; 21 | 22 | import de.flapdoodle.embed.mongo.MongodExecutable; 23 | import de.flapdoodle.embed.mongo.MongodProcess; 24 | import de.flapdoodle.embed.mongo.MongodStarter; 25 | import de.flapdoodle.embed.mongo.config.IMongodConfig; 26 | import de.flapdoodle.embed.mongo.config.MongodConfigBuilder; 27 | import de.flapdoodle.embed.mongo.config.Net; 28 | import de.flapdoodle.embed.mongo.distribution.Version; 29 | 30 | public class MongodbLocalServer implements MiniCluster { 31 | 32 | // Logger 33 | private static final Logger LOG = LoggerFactory.getLogger(MongodbLocalServer.class); 34 | 35 | private String ip; 36 | private Integer port; 37 | 38 | private MongodStarter starter; 39 | private MongodExecutable mongodExe; 40 | private MongodProcess mongod; 41 | private IMongodConfig conf; 42 | 43 | private MongodbLocalServer(Builder builder) { 44 | this.ip = builder.ip; 45 | this.port = builder.port; 46 | } 47 | 48 | public String getIp() { 49 | return ip; 50 | } 51 | 52 | public Integer getPort() { 53 | return port; 54 | } 55 | 56 | public static class Builder { 57 | private String ip; 58 | private Integer port; 59 | 60 | public Builder setIp(String ip) { 61 | this.ip = ip; 62 | return this; 63 | } 64 | 65 | public Builder setPort(int port){ 66 | this.port = port; 67 | return this; 68 | } 69 | 70 | public MongodbLocalServer build() { 71 | MongodbLocalServer mongodbLocalServer = new MongodbLocalServer(this); 72 | validateObject(mongodbLocalServer); 73 | return mongodbLocalServer; 74 | } 75 | 76 | private void validateObject(MongodbLocalServer mongodbLocalServer) { 77 | if(mongodbLocalServer.ip == null) { 78 | throw new IllegalArgumentException("ERROR: Missing required config: MongoDB IP"); 79 | } 80 | 81 | if(mongodbLocalServer.port == null) { 82 | throw new IllegalArgumentException("ERROR: Missing required config: MongoDB Port"); 83 | } 84 | } 85 | 86 | } 87 | 88 | @Override 89 | public void start() throws Exception { 90 | LOG.info("MONGODB: Starting MongoDB on {}:{}", ip, port); 91 | starter = MongodStarter.getDefaultInstance(); 92 | configure(); 93 | mongodExe = starter.prepare(conf); 94 | mongod = mongodExe.start(); 95 | } 96 | 97 | @Override 98 | public void stop() throws Exception { 99 | stop(true); 100 | } 101 | 102 | @Override 103 | public void stop(boolean cleanUp) throws Exception { 104 | LOG.info("MONGODB: Stopping MongoDB on {}:{}", ip, port); 105 | mongod.stop(); 106 | mongodExe.stop(); 107 | if(cleanUp) { 108 | cleanUp(); 109 | } 110 | } 111 | 112 | @Override 113 | public void configure() throws Exception { 114 | conf = new MongodConfigBuilder() 115 | .version(Version.Main.PRODUCTION) 116 | .net(new Net(ip, port, false)) 117 | .build(); 118 | } 119 | 120 | @Override 121 | public void cleanUp() throws Exception { 122 | 123 | } 124 | 125 | } 126 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-mongodb/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # MongoDB 2 | mongo.ip=127.0.0.1 3 | mongo.port=13333 4 | mongo.database.name=test_database 5 | mongo.collection.name=test_collection -------------------------------------------------------------------------------- /hadoop-mini-clusters-mongodb/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-mongodb/src/test/java/com/github/sakserv/minicluster/impl/MongodbLocalServerIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.impl; 16 | 17 | import static org.junit.Assert.assertEquals; 18 | 19 | import java.io.IOException; 20 | import java.util.Date; 21 | 22 | import org.junit.AfterClass; 23 | import org.junit.BeforeClass; 24 | import org.junit.Test; 25 | import org.slf4j.Logger; 26 | import org.slf4j.LoggerFactory; 27 | 28 | import com.github.sakserv.minicluster.config.ConfigVars; 29 | import com.github.sakserv.propertyparser.PropertyParser; 30 | import com.mongodb.BasicDBObject; 31 | import com.mongodb.DB; 32 | import com.mongodb.DBCollection; 33 | import com.mongodb.DBCursor; 34 | import com.mongodb.MongoClient; 35 | 36 | public class MongodbLocalServerIntegrationTest { 37 | 38 | // Logger 39 | private static final Logger LOG = LoggerFactory.getLogger(MongodbLocalServerIntegrationTest.class); 40 | 41 | // Setup the property parser 42 | private static PropertyParser propertyParser; 43 | static { 44 | try { 45 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 46 | propertyParser.parsePropsFile(); 47 | } catch(IOException e) { 48 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 49 | } 50 | } 51 | 52 | private static MongodbLocalServer mongodbLocalServer; 53 | 54 | @BeforeClass 55 | public static void setUp() throws Exception { 56 | mongodbLocalServer = new MongodbLocalServer.Builder() 57 | .setIp(propertyParser.getProperty(ConfigVars.MONGO_IP_KEY)) 58 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.MONGO_PORT_KEY))) 59 | .build(); 60 | mongodbLocalServer.start(); 61 | } 62 | 63 | @AfterClass 64 | public static void tearDown() throws Exception { 65 | mongodbLocalServer.stop(); 66 | } 67 | 68 | @Test 69 | public void testMongodbLocalServer() throws Exception { 70 | MongoClient mongo = new MongoClient(mongodbLocalServer.getIp(), mongodbLocalServer.getPort()); 71 | 72 | DB db = mongo.getDB(propertyParser.getProperty(ConfigVars.MONGO_DATABASE_NAME_KEY)); 73 | DBCollection col = db.createCollection(propertyParser.getProperty(ConfigVars.MONGO_COLLECTION_NAME_KEY), 74 | new BasicDBObject()); 75 | 76 | col.save(new BasicDBObject("testDoc", new Date())); 77 | LOG.info("MONGODB: Number of items in collection: {}", col.count()); 78 | assertEquals(1, col.count()); 79 | 80 | DBCursor cursor = col.find(); 81 | while(cursor.hasNext()) { 82 | LOG.info("MONGODB: Document output: {}", cursor.next()); 83 | } 84 | cursor.close(); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-mongodb/src/test/java/com/github/sakserv/minicluster/impl/MongodbLocalServerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.impl; 16 | 17 | import static org.junit.Assert.assertEquals; 18 | 19 | import java.io.IOException; 20 | 21 | import org.junit.BeforeClass; 22 | import org.junit.Rule; 23 | import org.junit.Test; 24 | import org.junit.rules.ExpectedException; 25 | import org.slf4j.Logger; 26 | import org.slf4j.LoggerFactory; 27 | 28 | import com.github.sakserv.minicluster.config.ConfigVars; 29 | import com.github.sakserv.propertyparser.PropertyParser; 30 | 31 | public class MongodbLocalServerTest { 32 | 33 | // Logger 34 | private static final Logger LOG = LoggerFactory.getLogger(MongodbLocalServerTest.class); 35 | 36 | // Setup the property parser 37 | private static PropertyParser propertyParser; 38 | static { 39 | try { 40 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 41 | propertyParser.parsePropsFile(); 42 | } catch(IOException e) { 43 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 44 | } 45 | } 46 | 47 | @Rule 48 | public ExpectedException exception = ExpectedException.none(); 49 | 50 | private static MongodbLocalServer mongodbLocalServer; 51 | 52 | @BeforeClass 53 | public static void setUp() { 54 | mongodbLocalServer = new MongodbLocalServer.Builder() 55 | .setIp(propertyParser.getProperty(ConfigVars.MONGO_IP_KEY)) 56 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.MONGO_PORT_KEY))) 57 | .build(); 58 | } 59 | 60 | @Test 61 | public void testIp() { 62 | assertEquals(propertyParser.getProperty(ConfigVars.MONGO_IP_KEY), mongodbLocalServer.getIp()); 63 | } 64 | 65 | @Test 66 | public void testMissingIp() { 67 | exception.expect(IllegalArgumentException.class); 68 | MongodbLocalServer mongodbLocalServer = new MongodbLocalServer.Builder() 69 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.MONGO_PORT_KEY))) 70 | .build(); 71 | } 72 | 73 | @Test 74 | public void testPort() { 75 | assertEquals(Integer.parseInt(propertyParser.getProperty(ConfigVars.MONGO_PORT_KEY)), 76 | (int) mongodbLocalServer.getPort()); 77 | } 78 | 79 | @Test 80 | public void testMissingPort() { 81 | exception.expect(IllegalArgumentException.class); 82 | MongodbLocalServer mongodbLocalServer = new MongodbLocalServer.Builder() 83 | .setIp(propertyParser.getProperty(ConfigVars.MONGO_IP_KEY)) 84 | .build(); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-oozie/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-oozie 11 | 12 | 13 | 14 | 15 | 16 | org.apache.oozie.test 17 | oozie-mini 18 | ${oozie.version} 19 | 20 | 21 | 22 | org.apache.oozie 23 | oozie-core 24 | ${oozie.version} 25 | test-jar 26 | 27 | 28 | 29 | org.apache.oozie 30 | oozie-core 31 | ${oozie.version} 32 | 33 | 34 | 35 | org.apache.oozie 36 | oozie-tools 37 | ${oozie.version} 38 | 39 | 40 | 41 | 42 | com.github.sakserv 43 | hadoop-mini-clusters-hdfs 44 | ${project.version} 45 | test 46 | 47 | 48 | 49 | 50 | com.github.sakserv 51 | hadoop-mini-clusters-mapreduce 52 | ${project.version} 53 | test 54 | 55 | 56 | 57 | 58 | com.github.sakserv 59 | hadoop-mini-clusters-common 60 | ${project.version} 61 | 62 | 63 | 64 | 65 | com.github.sakserv 66 | property-parser 67 | ${property-parser.version} 68 | 69 | 70 | 71 | 72 | 73 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-oozie/src/main/java/com/github/sakserv/minicluster/oozie/sharelib/Framework.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package com.github.sakserv.minicluster.oozie.sharelib; 18 | 19 | public enum Framework { 20 | OOZIE("oozie"), 21 | HCATALOG("hcatalog"), 22 | DISTCP("distcp"), 23 | MAPREDUCE_STREAMING("mapreduce-streaming"), 24 | PIG("pig"), 25 | HIVE("hive"), 26 | HIVE2("hive2"), 27 | SQOOP("sqoop"), 28 | SPARK("spark"); 29 | 30 | private final String id; 31 | 32 | Framework(String id) { 33 | this.id = id; 34 | } 35 | 36 | public String getValue() { 37 | return id; 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-oozie/src/main/java/com/github/sakserv/minicluster/oozie/util/OozieConfigUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.oozie.util; 15 | 16 | import java.io.File; 17 | import java.io.FileOutputStream; 18 | import java.io.IOException; 19 | 20 | import org.apache.hadoop.conf.Configuration; 21 | import org.slf4j.Logger; 22 | import org.slf4j.LoggerFactory; 23 | 24 | public class OozieConfigUtil { 25 | 26 | // Logger 27 | private static final Logger LOG = LoggerFactory.getLogger(OozieConfigUtil.class); 28 | 29 | public void writeXml(Configuration configuration, String outputLocation) throws IOException { 30 | new File(new File(outputLocation).getParent()).mkdirs(); 31 | configuration.writeXml(new FileOutputStream(outputLocation)); 32 | } 33 | 34 | } 35 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-oozie/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # HDFS 2 | hdfs.namenode.port=20112 3 | hdfs.temp.dir=embedded_hdfs 4 | hdfs.num.datanodes=1 5 | hdfs.enable.permissions=false 6 | hdfs.format=true 7 | hdfs.enable.running.user.as.proxy.user=true 8 | 9 | # YARN 10 | yarn.num.node.managers=1 11 | yarn.num.local.dirs=1 12 | yarn.num.log.dirs=1 13 | yarn.resource.manager.address=localhost:37001 14 | yarn.resource.manager.hostname=localhost 15 | yarn.resource.manager.scheduler.address=localhost:37002 16 | yarn.resource.manager.resource.tracker.address=localhost:37003 17 | yarn.resource.manager.webapp.address=localhost:37004 18 | yarn.use.in.jvm.container.executor=false 19 | 20 | # MR 21 | mr.job.history.address=localhost:37005 22 | 23 | # Oozie 24 | oozie.test.dir=embedded_oozie 25 | oozie.home.dir=oozie_home 26 | oozie.username=blah 27 | oozie.groupname=testgroup 28 | oozie.hdfs.share.lib.dir=/tmp/share_lib 29 | oozie.share.lib.create=true 30 | oozie.local.share.lib.cache.dir=./share_lib_cache 31 | oozie.purge.local.share.lib.cache=false -------------------------------------------------------------------------------- /hadoop-mini-clusters-oozie/src/main/resources/localoozie-log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-oozie/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-oozie/src/main/resources/sharelib.properties: -------------------------------------------------------------------------------- 1 | 2.6.5.0.url=http://s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.5.0/tars/oozie/oozie-4.2.0.2.6.5.0-292-distro.tar.gz 2 | 2.6.3.0.url=http://s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.3.0/tars/oozie/oozie-4.2.0.2.6.3.0-235-distro.tar.gz 3 | 2.6.2.0.url=http://s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.2.0/tars/oozie/oozie-4.2.0.2.6.2.0-205-distro.tar.gz 4 | 2.6.1.0.url=http://s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.1.0/tars/oozie/oozie-4.2.0.2.6.1.0-129-distro.tar.gz 5 | 2.6.0.3.url=http://s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.0.3/tars/oozie/oozie-4.2.0.2.6.0.3-8-distro.tar.gz 6 | 2.5.3.0.url=http://s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.5.3.0/tars/oozie/oozie-4.2.0.2.5.3.0-37-distro.tar.gz 7 | 2.5.0.0.url=http://s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0/tars/oozie/oozie-4.2.0.2.5.0.0-1245-distro.tar.gz 8 | 2.4.2.0.url=http://s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.4.2.0/tars/oozie-4.2.0.2.4.2.0-258-distro.tar.gz 9 | 2.4.0.0.url=http://s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.4.0.0/tars/oozie-4.2.0.2.4.0.0-169-distro.tar.gz 10 | 2.3.4.0.url=http://s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.3.4.0/tars/oozie-4.2.0.2.3.4.0-3485-distro.tar.gz 11 | 2.3.2.0.url=http://s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.3.2.0/tars/oozie-4.2.0.2.3.2.0-2950-distro.tar.gz 12 | 2.3.0.0.url=http://s3.amazonaws.com/public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.3.0.0/tars/oozie-4.2.0.2.3.0.0-2557-distro.tar.gz -------------------------------------------------------------------------------- /hadoop-mini-clusters-oozie/src/main/resources/test_input.txt: -------------------------------------------------------------------------------- 1 | foo 2 | bar -------------------------------------------------------------------------------- /hadoop-mini-clusters-storm/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-storm 11 | 12 | 13 | 14 | 15 | 16 | org.apache.storm 17 | storm-core 18 | ${storm.version} 19 | provided 20 | 21 | 22 | 23 | 24 | com.github.sakserv 25 | hadoop-mini-clusters-zookeeper 26 | ${project.version} 27 | test 28 | 29 | 30 | 31 | 32 | com.github.sakserv 33 | hadoop-mini-clusters-common 34 | ${project.version} 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-storm/src/main/java/com/github/sakserv/minicluster/impl/StormLocalCluster.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.impl; 16 | 17 | import com.github.sakserv.minicluster.util.FileUtils; 18 | import org.apache.storm.ILocalCluster; 19 | import org.apache.storm.Testing; 20 | import org.apache.storm.generated.*; 21 | import org.slf4j.Logger; 22 | import org.slf4j.LoggerFactory; 23 | 24 | import com.github.sakserv.minicluster.MiniCluster; 25 | 26 | import org.apache.storm.Config; 27 | import org.apache.storm.LocalCluster; 28 | 29 | import java.util.ArrayList; 30 | import java.util.Arrays; 31 | import java.util.HashMap; 32 | import java.util.List; 33 | 34 | public class StormLocalCluster implements MiniCluster { 35 | 36 | // Logger 37 | private static final Logger LOG = LoggerFactory.getLogger(StormLocalCluster.class); 38 | 39 | private String zookeeperHost; 40 | private Long zookeeperPort; 41 | private Boolean enableDebug; 42 | private Integer numWorkers; 43 | private Config stormConf; 44 | private ILocalCluster localCluster; 45 | 46 | private StormLocalCluster(Builder builder) { 47 | this.zookeeperHost = builder.zookeeperHost; 48 | this.zookeeperPort = builder.zookeeperPort; 49 | this.enableDebug = builder.enableDebug; 50 | this.numWorkers = builder.numWorkers; 51 | this.stormConf = builder.stormConf; 52 | } 53 | 54 | public String getZookeeperHost() { 55 | return zookeeperHost; 56 | } 57 | 58 | public Long getZookeeperPort() { 59 | return zookeeperPort; 60 | } 61 | 62 | public Boolean getEnableDebug() { 63 | return enableDebug; 64 | } 65 | 66 | public Integer getNumWorkers() { return numWorkers; } 67 | 68 | public Config getStormConf() { return stormConf; } 69 | 70 | public static class Builder { 71 | private String zookeeperHost; 72 | private Long zookeeperPort; 73 | private Boolean enableDebug; 74 | private Integer numWorkers; 75 | private Config stormConf; 76 | 77 | public Builder setZookeeperHost(String zookeeperHost) { 78 | this.zookeeperHost = zookeeperHost; 79 | return this; 80 | } 81 | 82 | public Builder setZookeeperPort(Long zookeeperPort) { 83 | this.zookeeperPort = zookeeperPort; 84 | return this; 85 | } 86 | 87 | public Builder setEnableDebug(Boolean enableDebug) { 88 | this.enableDebug = enableDebug; 89 | return this; 90 | } 91 | 92 | public Builder setNumWorkers(Integer numWorkers) { 93 | this.numWorkers = numWorkers; 94 | return this; 95 | } 96 | 97 | public Builder setStormConfig(Config stormConf) { 98 | this.stormConf = stormConf; 99 | return this; 100 | } 101 | 102 | public StormLocalCluster build() { 103 | StormLocalCluster stormLocalCluster = new StormLocalCluster(this); 104 | validateObject(stormLocalCluster); 105 | return stormLocalCluster; 106 | } 107 | 108 | public void validateObject(StormLocalCluster stormLocalCluster) { 109 | if (stormLocalCluster.getZookeeperHost() == null) { 110 | throw new IllegalArgumentException("ERROR: Missing required config: Zookeeper Host"); 111 | } 112 | 113 | if (stormLocalCluster.getZookeeperPort() == null) { 114 | throw new IllegalArgumentException("ERROR: Missing required config: Zookeeper Port"); 115 | } 116 | 117 | if (stormLocalCluster.getEnableDebug() == null) { 118 | throw new IllegalArgumentException("ERROR: Missing required config: Enable Debug"); 119 | } 120 | 121 | if (stormLocalCluster.getNumWorkers() == null) { 122 | throw new IllegalArgumentException("ERROR: Missing required config: Num Workers"); 123 | } 124 | 125 | if (stormLocalCluster.getStormConf() == null) { 126 | throw new IllegalArgumentException("ERROR: Missing required config: Storm Config"); 127 | } 128 | } 129 | } 130 | 131 | @Override 132 | public void start() throws Exception { 133 | LOG.info("STORM: Starting StormLocalCluster"); 134 | configure(); 135 | localCluster = Testing.getLocalCluster(stormConf); 136 | } 137 | 138 | @Override 139 | public void stop() throws Exception { 140 | stop(true); 141 | } 142 | 143 | @Override 144 | public void stop(boolean cleanUp) throws Exception { 145 | LOG.info("STORM: Stopping StormLocalCluster"); 146 | localCluster.shutdown(); 147 | if(cleanUp) { 148 | cleanUp(); 149 | } 150 | } 151 | 152 | @Override 153 | public void configure() throws Exception { 154 | stormConf.setDebug(enableDebug); 155 | stormConf.setNumWorkers(numWorkers); 156 | stormConf.put("nimbus-daemon", true); 157 | List stormNimbusSeeds = new ArrayList<>(); 158 | stormNimbusSeeds.add("localhost"); 159 | stormConf.put(Config.NIMBUS_SEEDS, stormNimbusSeeds); 160 | stormConf.put(Config.NIMBUS_THRIFT_PORT, 6627); 161 | stormConf.put(Config.STORM_THRIFT_TRANSPORT_PLUGIN, "org.apache.storm.security.auth.SimpleTransportPlugin"); 162 | stormConf.put(Config.STORM_NIMBUS_RETRY_INTERVAL_CEILING, 60000); 163 | stormConf.put(Config.STORM_NIMBUS_RETRY_TIMES, 5); 164 | stormConf.put(Config.STORM_NIMBUS_RETRY_INTERVAL, 2000); 165 | stormConf.put(Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE, 1048576); 166 | stormConf.put(Config.STORM_ZOOKEEPER_SERVERS, Arrays.asList(getZookeeperHost())); 167 | stormConf.put(Config.STORM_ZOOKEEPER_PORT, getZookeeperPort()); 168 | } 169 | 170 | @Override 171 | public void cleanUp() throws Exception { 172 | FileUtils.deleteFolder("logs"); 173 | } 174 | 175 | public void submitTopology(String topologyName, Config conf, StormTopology topology) 176 | throws AlreadyAliveException, InvalidTopologyException { 177 | localCluster.submitTopology(topologyName, conf, topology); 178 | } 179 | 180 | public void stop(String topologyName) throws Exception { 181 | try { 182 | localCluster.killTopology(topologyName); 183 | } catch (NotAliveException e) { 184 | LOG.debug("Topology not running: " + topologyName); 185 | } 186 | stop(); 187 | } 188 | 189 | } 190 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-storm/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # Zookeeper 2 | zookeeper.temp.dir=embedded_zk 3 | zookeeper.host=127.0.0.1 4 | zookeeper.port=22010 5 | zookeeper.connection.string=127.0.0.1:22010 6 | 7 | # Storm 8 | storm.enable.debug=true 9 | storm.num.workers=1 10 | storm.topology.name=test -------------------------------------------------------------------------------- /hadoop-mini-clusters-storm/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-storm/src/test/java/com/github/sakserv/minicluster/impl/StormLocalClusterIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.impl; 16 | 17 | import java.io.IOException; 18 | 19 | import org.apache.storm.utils.NimbusClient; 20 | import org.junit.AfterClass; 21 | import org.junit.BeforeClass; 22 | import org.junit.Test; 23 | import org.slf4j.Logger; 24 | import org.slf4j.LoggerFactory; 25 | 26 | import com.github.sakserv.minicluster.config.ConfigVars; 27 | import com.github.sakserv.minicluster.storm.bolt.PrinterBolt; 28 | import com.github.sakserv.minicluster.storm.spout.RandomSentenceSpout; 29 | import com.github.sakserv.propertyparser.PropertyParser; 30 | 31 | import org.apache.storm.Config; 32 | import org.apache.storm.topology.TopologyBuilder; 33 | 34 | import static org.junit.Assert.assertTrue; 35 | 36 | public class StormLocalClusterIntegrationTest { 37 | 38 | // Logger 39 | private static final Logger LOG = LoggerFactory.getLogger(StormLocalClusterIntegrationTest.class); 40 | 41 | // Setup the property parser 42 | private static PropertyParser propertyParser; 43 | static { 44 | try { 45 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 46 | propertyParser.parsePropsFile(); 47 | } catch(IOException e) { 48 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 49 | } 50 | } 51 | 52 | private static ZookeeperLocalCluster zookeeperLocalCluster; 53 | private static StormLocalCluster stormLocalCluster; 54 | 55 | @BeforeClass 56 | public static void setUp() throws Exception { 57 | zookeeperLocalCluster = new ZookeeperLocalCluster.Builder() 58 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) 59 | .setTempDir(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY)) 60 | .setZookeeperConnectionString(propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY)) 61 | .build(); 62 | zookeeperLocalCluster.start(); 63 | 64 | stormLocalCluster = new StormLocalCluster.Builder() 65 | .setZookeeperHost(propertyParser.getProperty(ConfigVars.ZOOKEEPER_HOST_KEY)) 66 | .setZookeeperPort(Long.parseLong(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) 67 | .setEnableDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.STORM_ENABLE_DEBUG_KEY))) 68 | .setNumWorkers(Integer.parseInt(propertyParser.getProperty(ConfigVars.STORM_NUM_WORKERS_KEY))) 69 | .setStormConfig(new Config()) 70 | .build(); 71 | stormLocalCluster.start(); 72 | } 73 | 74 | @AfterClass 75 | public static void tearDown() throws Exception { 76 | stormLocalCluster.stop(propertyParser.getProperty(ConfigVars.STORM_TOPOLOGY_NAME_KEY)); 77 | zookeeperLocalCluster.stop(); 78 | } 79 | 80 | @Test 81 | public void testStormCluster() throws Exception { 82 | TopologyBuilder builder = new TopologyBuilder(); 83 | builder.setSpout("randomsentencespout", new RandomSentenceSpout(), 1); 84 | builder.setBolt("print", new PrinterBolt(), 1).shuffleGrouping("randomsentencespout"); 85 | stormLocalCluster.submitTopology(propertyParser.getProperty(ConfigVars.STORM_TOPOLOGY_NAME_KEY), 86 | stormLocalCluster.getStormConf(), builder.createTopology()); 87 | 88 | try { 89 | Thread.sleep(5000L); 90 | } catch (InterruptedException e) { 91 | LOG.info("SUCCESSFULLY COMPLETED"); 92 | } 93 | } 94 | 95 | @Test 96 | public void testStormNimbusClient() throws Exception { 97 | Config conf = stormLocalCluster.getStormConf(); 98 | NimbusClient nimbusClient = NimbusClient.getConfiguredClient(conf); 99 | assertTrue(nimbusClient.getClient().getNimbusConf().length() > 0); 100 | } 101 | 102 | } 103 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-storm/src/test/java/com/github/sakserv/minicluster/impl/StormLocalClusterTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.impl; 16 | 17 | import static org.junit.Assert.assertEquals; 18 | import static org.junit.Assert.assertTrue; 19 | 20 | import java.io.IOException; 21 | 22 | import org.junit.BeforeClass; 23 | import org.junit.Rule; 24 | import org.junit.Test; 25 | import org.junit.rules.ExpectedException; 26 | import org.slf4j.Logger; 27 | import org.slf4j.LoggerFactory; 28 | 29 | import com.github.sakserv.minicluster.config.ConfigVars; 30 | import com.github.sakserv.propertyparser.PropertyParser; 31 | 32 | import org.apache.storm.Config; 33 | 34 | public class StormLocalClusterTest { 35 | 36 | // Logger 37 | private static final Logger LOG = LoggerFactory.getLogger(StormLocalClusterTest.class); 38 | 39 | // Setup the property parser 40 | private static PropertyParser propertyParser; 41 | static { 42 | try { 43 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 44 | propertyParser.parsePropsFile(); 45 | } catch(IOException e) { 46 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 47 | } 48 | } 49 | 50 | @Rule 51 | public ExpectedException exception = ExpectedException.none(); 52 | 53 | private static StormLocalCluster stormLocalCluster; 54 | 55 | @BeforeClass 56 | public static void setUp() { 57 | stormLocalCluster = new StormLocalCluster.Builder() 58 | .setZookeeperHost(propertyParser.getProperty(ConfigVars.ZOOKEEPER_HOST_KEY)) 59 | .setZookeeperPort(Long.parseLong(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) 60 | .setEnableDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.STORM_ENABLE_DEBUG_KEY))) 61 | .setNumWorkers(Integer.parseInt(propertyParser.getProperty(ConfigVars.STORM_NUM_WORKERS_KEY))) 62 | .setStormConfig(new Config()) 63 | .build(); 64 | } 65 | 66 | @Test 67 | public void testZookeeperHost() { 68 | assertEquals(propertyParser.getProperty(ConfigVars.ZOOKEEPER_HOST_KEY), 69 | stormLocalCluster.getZookeeperHost()); 70 | } 71 | 72 | @Test 73 | public void testMissingZookeeperHost() { 74 | exception.expect(IllegalArgumentException.class); 75 | stormLocalCluster = new StormLocalCluster.Builder() 76 | .setZookeeperPort(Long.parseLong(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) 77 | .setEnableDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.STORM_ENABLE_DEBUG_KEY))) 78 | .setNumWorkers(Integer.parseInt(propertyParser.getProperty(ConfigVars.STORM_NUM_WORKERS_KEY))) 79 | .setStormConfig(new Config()) 80 | .build(); 81 | } 82 | 83 | @Test 84 | public void testZookeeperPort() { 85 | assertEquals(Long.parseLong(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY)), 86 | (long) stormLocalCluster.getZookeeperPort()); 87 | } 88 | 89 | @Test 90 | public void testMissingZookeeperPort() { 91 | exception.expect(IllegalArgumentException.class); 92 | stormLocalCluster = new StormLocalCluster.Builder() 93 | .setZookeeperHost(propertyParser.getProperty(ConfigVars.ZOOKEEPER_HOST_KEY)) 94 | .setEnableDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.STORM_ENABLE_DEBUG_KEY))) 95 | .setNumWorkers(Integer.parseInt(propertyParser.getProperty(ConfigVars.STORM_NUM_WORKERS_KEY))) 96 | .setStormConfig(new Config()) 97 | .build(); 98 | } 99 | 100 | @Test 101 | public void testEnableDebug() { 102 | assertEquals(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.STORM_ENABLE_DEBUG_KEY)), 103 | stormLocalCluster.getEnableDebug()); 104 | } 105 | 106 | @Test 107 | public void testMissingEnableDebug() { 108 | exception.expect(IllegalArgumentException.class); 109 | stormLocalCluster = new StormLocalCluster.Builder() 110 | .setZookeeperHost(propertyParser.getProperty(ConfigVars.ZOOKEEPER_HOST_KEY)) 111 | .setZookeeperPort(Long.parseLong(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) 112 | .setNumWorkers(Integer.parseInt(propertyParser.getProperty(ConfigVars.STORM_NUM_WORKERS_KEY))) 113 | .setStormConfig(new Config()) 114 | .build(); 115 | } 116 | 117 | @Test 118 | public void testNumWorkers() { 119 | assertEquals(Integer.parseInt(propertyParser.getProperty(ConfigVars.STORM_NUM_WORKERS_KEY)), 120 | (int) stormLocalCluster.getNumWorkers()); 121 | } 122 | 123 | @Test 124 | public void testMissingNumWorkers() { 125 | exception.expect(IllegalArgumentException.class); 126 | stormLocalCluster = new StormLocalCluster.Builder() 127 | .setZookeeperHost(propertyParser.getProperty(ConfigVars.ZOOKEEPER_HOST_KEY)) 128 | .setZookeeperPort(Long.parseLong(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) 129 | .setEnableDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.STORM_ENABLE_DEBUG_KEY))) 130 | .setStormConfig(new Config()) 131 | .build(); 132 | } 133 | 134 | @Test 135 | public void testStormConf() { 136 | assertTrue(stormLocalCluster.getStormConf() instanceof org.apache.storm.Config); 137 | 138 | } 139 | 140 | @Test 141 | public void testMissingStormConf() { 142 | exception.expect(IllegalArgumentException.class); 143 | stormLocalCluster = new StormLocalCluster.Builder() 144 | .setZookeeperHost(propertyParser.getProperty(ConfigVars.ZOOKEEPER_HOST_KEY)) 145 | .setZookeeperPort(Long.parseLong(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) 146 | .setEnableDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.STORM_ENABLE_DEBUG_KEY))) 147 | .setNumWorkers(Integer.parseInt(propertyParser.getProperty(ConfigVars.STORM_NUM_WORKERS_KEY))) 148 | .build(); 149 | } 150 | 151 | } 152 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-storm/src/test/java/com/github/sakserv/minicluster/storm/bolt/PrinterBolt.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package com.github.sakserv.minicluster.storm.bolt; 20 | 21 | import org.apache.storm.topology.BasicOutputCollector; 22 | import org.apache.storm.topology.OutputFieldsDeclarer; 23 | import org.apache.storm.topology.base.BaseBasicBolt; 24 | import org.apache.storm.tuple.Tuple; 25 | 26 | public class PrinterBolt extends BaseBasicBolt { 27 | 28 | @Override 29 | public void execute(Tuple tuple, BasicOutputCollector collector) { 30 | 31 | System.out.println("PRINTER BOLT TUPLE: " + tuple); 32 | System.out.println("PRINTER BOLT FIELDS: " + tuple.getFields().toString()); 33 | } 34 | 35 | @Override 36 | public void declareOutputFields(OutputFieldsDeclarer ofd) { 37 | } 38 | 39 | } 40 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-storm/src/test/java/com/github/sakserv/minicluster/storm/spout/RandomSentenceSpout.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.storm.spout; 16 | 17 | import java.util.Map; 18 | import java.util.Random; 19 | 20 | import org.apache.storm.spout.SpoutOutputCollector; 21 | import org.apache.storm.task.TopologyContext; 22 | import org.apache.storm.topology.OutputFieldsDeclarer; 23 | import org.apache.storm.topology.base.BaseRichSpout; 24 | import org.apache.storm.tuple.Fields; 25 | import org.apache.storm.tuple.Values; 26 | import org.apache.storm.utils.Utils; 27 | 28 | public class RandomSentenceSpout extends BaseRichSpout { 29 | SpoutOutputCollector _collector; 30 | Random _rand; 31 | 32 | 33 | @Override 34 | public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { 35 | _collector = collector; 36 | _rand = new Random(); 37 | } 38 | 39 | @Override 40 | public void nextTuple() { 41 | Utils.sleep(100); 42 | String[] sentences = new String[]{ "the cow jumped over the moon", "an apple a day keeps the doctor away", 43 | "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" }; 44 | String sentence = sentences[_rand.nextInt(sentences.length)]; 45 | _collector.emit(new Values(sentence)); 46 | } 47 | 48 | @Override 49 | public void ack(Object id) { 50 | } 51 | 52 | @Override 53 | public void fail(Object id) { 54 | } 55 | 56 | @Override 57 | public void declareOutputFields(OutputFieldsDeclarer declarer) { 58 | declarer.declare(new Fields("word")); 59 | } 60 | 61 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-yarn 11 | 12 | 13 | 14 | 15 | 16 | org.apache.hadoop 17 | hadoop-minicluster 18 | ${hadoop.version} 19 | 20 | 21 | 22 | 23 | net.sf.jopt-simple 24 | jopt-simple 25 | ${jopt-simple.version} 26 | 27 | 28 | 29 | 30 | org.springframework 31 | spring-core 32 | ${spring-core.version} 33 | 34 | 35 | 36 | 37 | com.github.sakserv 38 | hadoop-mini-clusters-common 39 | ${project.version} 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/main/java/com/github/sakserv/minicluster/yarn/SystemExitException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.yarn; 15 | 16 | /** 17 | * @author Oleg Zhurakousky 18 | * 19 | */ 20 | class SystemExitException extends RuntimeException { 21 | 22 | /** 23 | * 24 | */ 25 | public SystemExitException() { 26 | 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/main/java/com/github/sakserv/minicluster/yarn/util/EnvironmentUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.yarn.util; 15 | 16 | import java.lang.reflect.Field; 17 | import java.util.Collections; 18 | import java.util.HashMap; 19 | import java.util.Map; 20 | 21 | /** 22 | * @author Oleg Zhurakousky 23 | * 24 | */ 25 | public class EnvironmentUtils { 26 | 27 | /** 28 | * Allows dynamic update to the environment variables. 29 | * 30 | * @param key 31 | * @param value 32 | */ 33 | public static void put(String key, String value) throws Exception { 34 | Map environemnt = new HashMap(System.getenv()); 35 | environemnt.put(key, value); 36 | updateEnvironment(environemnt); 37 | } 38 | 39 | public static synchronized void putAll(Map additionalEnvironment) throws Exception { 40 | Map environemnt = new HashMap(System.getenv()); 41 | environemnt.putAll(additionalEnvironment); 42 | updateEnvironment(environemnt); 43 | } 44 | 45 | /** 46 | * 47 | * @param newenv 48 | */ 49 | @SuppressWarnings("unchecked") 50 | private static void updateEnvironment(Map environemnt) throws Exception { 51 | 52 | Class[] classes = Collections.class.getDeclaredClasses(); 53 | for (Class clazz : classes) { 54 | if ("java.util.Collections$UnmodifiableMap".equals(clazz.getName())) { 55 | Field field = ReflectionUtils.getFieldAndMakeAccessible(clazz, "m"); 56 | Object obj = field.get(System.getenv()); 57 | Map map = (Map) obj; 58 | map.clear(); 59 | map.putAll(environemnt); 60 | } 61 | } 62 | 63 | } 64 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/main/java/com/github/sakserv/minicluster/yarn/util/ExecJavaCliParser.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.yarn.util; 15 | 16 | import java.util.ArrayList; 17 | import java.util.Iterator; 18 | import java.util.List; 19 | import java.util.regex.Matcher; 20 | import java.util.regex.Pattern; 21 | 22 | import joptsimple.OptionParser; 23 | import joptsimple.OptionSet; 24 | import joptsimple.OptionSpec; 25 | 26 | /** 27 | * @author Oleg Zhurakousky 28 | * 29 | */ 30 | public class ExecJavaCliParser { 31 | 32 | private final OptionParser optionParser; 33 | 34 | private final OptionSpec xVmOptions; 35 | 36 | private final OptionSpec sysPropOptions; 37 | 38 | private final OptionSpec mainArgumentsOptions; 39 | 40 | private final OptionSet optionSet; 41 | 42 | private final List mainArguments; 43 | 44 | private String main; 45 | 46 | 47 | 48 | /** 49 | * 50 | * @param line 51 | */ 52 | public ExecJavaCliParser(String line){ 53 | this.optionParser = new OptionParser("D:X:"); 54 | this.optionParser.allowsUnrecognizedOptions(); 55 | this.xVmOptions = this.optionParser.accepts("X").withRequiredArg(); 56 | this.sysPropOptions = this.optionParser.accepts("D").withRequiredArg(); 57 | this.mainArgumentsOptions = this.optionParser.nonOptions(); 58 | this.optionSet = this.optionParser.parse(line.split(" ")); 59 | Pattern p = Pattern.compile("([\\p{L}_$][\\p{L}\\p{N}_$]*\\.)*[\\p{L}_$][\\p{L}\\p{N}_$]*"); 60 | this.mainArguments = new ArrayList(this.mainArgumentsOptions.values(this.optionSet)); 61 | Iterator mainArgumentsIter = this.mainArguments.iterator(); 62 | boolean mainFound = false; 63 | while (mainArgumentsIter.hasNext()){ 64 | String value = mainArgumentsIter.next(); 65 | Matcher m = p.matcher(value); 66 | boolean matches = m.matches(); 67 | if (matches && !mainFound){ 68 | mainFound = true; 69 | this.main = value; 70 | mainArgumentsIter.remove(); 71 | break; 72 | } 73 | else if (!mainFound){ 74 | mainArgumentsIter.remove(); 75 | } 76 | } 77 | } 78 | 79 | public String[] getXValues(){ 80 | return this.xVmOptions.values(this.optionSet).toArray(new String[]{}); 81 | } 82 | 83 | public String[] getSysProperties(){ 84 | return this.sysPropOptions.values(this.optionSet).toArray(new String[]{}); 85 | } 86 | 87 | public String getMain() { 88 | return this.main; 89 | } 90 | 91 | public String[] getMainArguments() { 92 | return this.mainArguments.toArray(new String[]{}); 93 | } 94 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/main/java/com/github/sakserv/minicluster/yarn/util/ExecShellCliParser.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.yarn.util; 15 | 16 | import java.io.BufferedReader; 17 | import java.io.File; 18 | import java.io.IOException; 19 | import java.io.InputStream; 20 | import java.io.InputStreamReader; 21 | 22 | import org.slf4j.Logger; 23 | import org.slf4j.LoggerFactory; 24 | 25 | public class ExecShellCliParser { 26 | 27 | // Logger 28 | private static final Logger LOG = LoggerFactory.getLogger(ExecShellCliParser.class); 29 | 30 | private String cliString; 31 | 32 | public ExecShellCliParser(String cliString) { 33 | this.cliString = cliString; 34 | } 35 | 36 | public String getCommand() { 37 | String[] cliStringParts = cliString.split(" "); 38 | StringBuilder sb = new StringBuilder(); 39 | for (String part: cliStringParts) { 40 | if (!part.startsWith("1>") && !part.startsWith("2>") && !part.startsWith(">")) { 41 | sb.append(part); 42 | sb.append(' '); 43 | } 44 | } 45 | return sb.toString().trim(); 46 | } 47 | 48 | public String getStdoutPath() { 49 | String[] cliStringParts = cliString.split(" "); 50 | StringBuilder sb = new StringBuilder(); 51 | for (String part : cliStringParts) { 52 | if (part.startsWith("1>")) { 53 | sb.append(part.replace("1>", "")); 54 | sb.append(' '); 55 | } 56 | } 57 | return sb.toString().trim(); 58 | } 59 | 60 | public String getStderrPath() { 61 | String[] cliStringParts = cliString.split(" "); 62 | StringBuilder sb = new StringBuilder(); 63 | for (String part : cliStringParts) { 64 | if (part.startsWith("2>")) { 65 | sb.append(part.replace("2>", "")); 66 | sb.append(' '); 67 | } 68 | } 69 | return sb.toString().trim(); 70 | } 71 | 72 | public int runCommand() throws Exception { 73 | String command = getCommand(); 74 | String stdoutFile = getStdoutPath(); 75 | String stderrFile = getStderrPath(); 76 | 77 | Process p = Runtime.getRuntime().exec(command.split(" ")); 78 | 79 | String stdout = getOutput(p.getInputStream()); 80 | String stderr = getOutput(p.getErrorStream()); 81 | 82 | writeOutputToFile(stdout, new File(stdoutFile)); 83 | writeOutputToFile(stderr, new File(stderrFile)); 84 | 85 | p.waitFor(); 86 | return p.exitValue(); 87 | } 88 | 89 | public String getOutput(InputStream inputStream) throws IOException { 90 | StringBuilder sb = new StringBuilder(); 91 | String line; 92 | BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream)); 93 | while((line = bufferedReader.readLine()) != null) { 94 | sb.append(line); 95 | sb.append("\n"); 96 | } 97 | return sb.toString().trim(); 98 | } 99 | 100 | public void writeOutputToFile(String output, File outputFile) throws IOException { 101 | File parentDir = outputFile.getAbsoluteFile().getParentFile(); 102 | parentDir.mkdirs(); 103 | org.apache.commons.io.FileUtils.writeStringToFile(outputFile, output); 104 | } 105 | 106 | } 107 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/main/java/com/github/sakserv/minicluster/yarn/util/ReflectionUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.yarn.util; 15 | 16 | import java.lang.reflect.Field; 17 | import java.lang.reflect.Method; 18 | 19 | /** 20 | * @author Oleg Zhurakousky 21 | * 22 | */ 23 | public class ReflectionUtils { 24 | 25 | /** 26 | * 27 | * @param clazz 28 | * @param name 29 | * @param arguments 30 | * @return 31 | */ 32 | public static Method getMethodAndMakeAccessible(Class clazz, String name, Class... arguments) { 33 | 34 | try { 35 | Method m = org.springframework.util.ReflectionUtils.findMethod(clazz, name, arguments); 36 | m.setAccessible(true); 37 | return m; 38 | } 39 | catch (Exception e) { 40 | throw new IllegalArgumentException(e); 41 | } 42 | } 43 | 44 | /** 45 | * 46 | * @param clazz 47 | * @param fieldName 48 | * @return 49 | */ 50 | public static Field getFieldAndMakeAccessible(Class clazz, String fieldName) { 51 | Class searchType = clazz; 52 | while (!Object.class.equals(searchType) && searchType != null) { 53 | Field[] fields = searchType.getDeclaredFields(); 54 | for (Field field : fields) { 55 | if (fieldName == null || fieldName.equals(field.getName())) { 56 | field.setAccessible(true); 57 | return field; 58 | } 59 | } 60 | searchType = searchType.getSuperclass(); 61 | } 62 | return null; 63 | } 64 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # YARN 2 | yarn.num.node.managers=1 3 | yarn.num.local.dirs=1 4 | yarn.num.log.dirs=1 5 | yarn.resource.manager.address=localhost:37001 6 | yarn.resource.manager.hostname=localhost 7 | yarn.resource.manager.scheduler.address=localhost:37002 8 | yarn.resource.manager.resource.tracker.address=localhost:37003 9 | yarn.resource.manager.webapp.address=localhost:37004 10 | yarn.use.in.jvm.container.executor=false -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/test/java/com/github/sakserv/minicluster/impl/YarnLocalClusterInJvmContainerExecutorTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.impl; 15 | 16 | import static org.junit.Assert.assertEquals; 17 | 18 | import java.io.File; 19 | import java.io.IOException; 20 | import java.nio.charset.Charset; 21 | import java.nio.file.Files; 22 | import java.nio.file.Paths; 23 | 24 | import org.apache.hadoop.conf.Configuration; 25 | import org.junit.AfterClass; 26 | import org.junit.BeforeClass; 27 | import org.junit.Test; 28 | import org.slf4j.Logger; 29 | import org.slf4j.LoggerFactory; 30 | 31 | import com.github.sakserv.minicluster.config.ConfigVars; 32 | import com.github.sakserv.minicluster.yarn.InJvmContainerExecutor; 33 | import com.github.sakserv.minicluster.yarn.simpleyarnapp.Client; 34 | import com.github.sakserv.propertyparser.PropertyParser; 35 | 36 | public class YarnLocalClusterInJvmContainerExecutorTest { 37 | 38 | // Logger 39 | private static final Logger LOG = LoggerFactory.getLogger(YarnLocalClusterInJvmContainerExecutorTest.class); 40 | 41 | // Setup the property parser 42 | private static PropertyParser propertyParser; 43 | static { 44 | try { 45 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 46 | propertyParser.parsePropsFile(); 47 | } catch(IOException e) { 48 | LOG.error("Unable to load property file: " + propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 49 | } 50 | } 51 | 52 | private static YarnLocalCluster yarnLocalCluster; 53 | 54 | @BeforeClass 55 | public static void setUp() throws Exception { 56 | yarnLocalCluster = new YarnLocalCluster.Builder() 57 | .setNumNodeManagers(Integer.parseInt(propertyParser.getProperty(ConfigVars.YARN_NUM_NODE_MANAGERS_KEY))) 58 | .setNumLocalDirs(Integer.parseInt(propertyParser.getProperty(ConfigVars.YARN_NUM_LOCAL_DIRS_KEY))) 59 | .setNumLogDirs(Integer.parseInt(propertyParser.getProperty(ConfigVars.YARN_NUM_LOG_DIRS_KEY))) 60 | .setResourceManagerAddress(propertyParser.getProperty(ConfigVars.YARN_RESOURCE_MANAGER_ADDRESS_KEY)) 61 | .setResourceManagerHostname(propertyParser.getProperty(ConfigVars.YARN_RESOURCE_MANAGER_HOSTNAME_KEY)) 62 | .setResourceManagerSchedulerAddress(propertyParser.getProperty( 63 | ConfigVars.YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY)) 64 | .setResourceManagerResourceTrackerAddress(propertyParser.getProperty( 65 | ConfigVars.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY)) 66 | .setResourceManagerWebappAddress(propertyParser.getProperty( 67 | ConfigVars.YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_KEY)) 68 | .setUseInJvmContainerExecutor(true) 69 | .setConfig(new Configuration()) 70 | .build(); 71 | 72 | yarnLocalCluster.start(); 73 | } 74 | 75 | @AfterClass 76 | public static void tearDown() throws Exception { 77 | // We want the cluster to be able to shut down 78 | System.setSecurityManager(new InJvmContainerExecutor.SystemExitAllowSecurityManager()); 79 | yarnLocalCluster.stop(); 80 | } 81 | 82 | @Test 83 | public void testYarnLocalClusterWithInJvmContainerExecutor() { 84 | 85 | String[] args = new String[7]; 86 | args[0] = "whoami"; 87 | args[1] = "1"; 88 | args[2] = getClass().getClassLoader().getResource("simple-yarn-app-1.1.0.jar").toString(); 89 | args[3] = yarnLocalCluster.getResourceManagerAddress(); 90 | args[4] = yarnLocalCluster.getResourceManagerHostname(); 91 | args[5] = yarnLocalCluster.getResourceManagerSchedulerAddress(); 92 | args[6] = yarnLocalCluster.getResourceManagerResourceTrackerAddress(); 93 | 94 | try { 95 | Client.main(args); 96 | } catch(Exception e) { 97 | e.printStackTrace(); 98 | } 99 | 100 | // simple yarn app running "whoami", 101 | // validate the container contents matches the java user.name 102 | if (!System.getProperty("user.name").equals("travis")) { 103 | assertEquals(System.getProperty("user.name"), getStdoutContents()); 104 | } 105 | 106 | } 107 | 108 | public String getStdoutContents() { 109 | String contents = ""; 110 | try { 111 | byte[] encoded = Files.readAllBytes(Paths.get(getStdoutPath())); 112 | contents = new String(encoded, Charset.defaultCharset()).trim(); 113 | } catch (IOException e) { 114 | e.printStackTrace(); 115 | } 116 | return contents; 117 | } 118 | 119 | public String getStdoutPath() { 120 | File dir = new File("./target/" + yarnLocalCluster.getTestName()); 121 | String[] nmDirs = dir.list(); 122 | for (String nmDir: nmDirs) { 123 | if (nmDir.contains("logDir")) { 124 | String[] appDirs = new File(dir.toString() + "/" + nmDir).list(); 125 | for (String appDir: appDirs) { 126 | if (appDir.contains("0001")) { 127 | String[] containerDirs = new File(dir.toString() + "/" + nmDir + "/" + appDir).list(); 128 | for (String containerDir: containerDirs) { 129 | if(containerDir.contains("000002")) { 130 | return dir.toString() + "/" + nmDir + "/" + appDir + "/" + containerDir + "/stdout"; 131 | } 132 | } 133 | } 134 | } 135 | } 136 | } 137 | return ""; 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/test/java/com/github/sakserv/minicluster/impl/YarnLocalClusterIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | package com.github.sakserv.minicluster.impl; 15 | 16 | import static org.junit.Assert.assertEquals; 17 | 18 | import java.io.File; 19 | import java.io.IOException; 20 | import java.nio.charset.Charset; 21 | import java.nio.file.Files; 22 | import java.nio.file.Paths; 23 | 24 | import org.apache.hadoop.conf.Configuration; 25 | import org.junit.AfterClass; 26 | import org.junit.BeforeClass; 27 | import org.junit.Test; 28 | import org.slf4j.Logger; 29 | import org.slf4j.LoggerFactory; 30 | 31 | import com.github.sakserv.minicluster.config.ConfigVars; 32 | import com.github.sakserv.minicluster.yarn.simpleyarnapp.Client; 33 | import com.github.sakserv.propertyparser.PropertyParser; 34 | 35 | public class YarnLocalClusterIntegrationTest { 36 | 37 | // Logger 38 | private static final Logger LOG = LoggerFactory.getLogger(YarnLocalClusterIntegrationTest.class); 39 | 40 | // Setup the property parser 41 | private static PropertyParser propertyParser; 42 | static { 43 | try { 44 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 45 | propertyParser.parsePropsFile(); 46 | } catch(IOException e) { 47 | LOG.error("Unable to load property file: " + propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 48 | } 49 | } 50 | 51 | private static YarnLocalCluster yarnLocalCluster; 52 | 53 | @BeforeClass 54 | public static void setUp() throws Exception { 55 | yarnLocalCluster = new YarnLocalCluster.Builder() 56 | .setNumNodeManagers(Integer.parseInt(propertyParser.getProperty(ConfigVars.YARN_NUM_NODE_MANAGERS_KEY))) 57 | .setNumLocalDirs(Integer.parseInt(propertyParser.getProperty(ConfigVars.YARN_NUM_LOCAL_DIRS_KEY))) 58 | .setNumLogDirs(Integer.parseInt(propertyParser.getProperty(ConfigVars.YARN_NUM_LOG_DIRS_KEY))) 59 | .setResourceManagerAddress(propertyParser.getProperty(ConfigVars.YARN_RESOURCE_MANAGER_ADDRESS_KEY)) 60 | .setResourceManagerHostname(propertyParser.getProperty(ConfigVars.YARN_RESOURCE_MANAGER_HOSTNAME_KEY)) 61 | .setResourceManagerSchedulerAddress(propertyParser.getProperty( 62 | ConfigVars.YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY)) 63 | .setResourceManagerResourceTrackerAddress(propertyParser.getProperty( 64 | ConfigVars.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY)) 65 | .setResourceManagerWebappAddress(propertyParser.getProperty( 66 | ConfigVars.YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_KEY)) 67 | .setUseInJvmContainerExecutor(Boolean.parseBoolean(propertyParser.getProperty( 68 | ConfigVars.YARN_USE_IN_JVM_CONTAINER_EXECUTOR_KEY))) 69 | .setConfig(new Configuration()) 70 | .build(); 71 | 72 | yarnLocalCluster.start(); 73 | } 74 | 75 | @AfterClass 76 | public static void tearDown() throws Exception { 77 | yarnLocalCluster.stop(); 78 | } 79 | 80 | @Test 81 | public void testYarnLocalClusterIntegrationTest() { 82 | 83 | String[] args = new String[7]; 84 | args[0] = "whoami"; 85 | args[1] = "1"; 86 | args[2] = getClass().getClassLoader().getResource("simple-yarn-app-1.1.0.jar").toString(); 87 | args[3] = yarnLocalCluster.getResourceManagerAddress(); 88 | args[4] = yarnLocalCluster.getResourceManagerHostname(); 89 | args[5] = yarnLocalCluster.getResourceManagerSchedulerAddress(); 90 | args[6] = yarnLocalCluster.getResourceManagerResourceTrackerAddress(); 91 | 92 | try { 93 | Client.main(args); 94 | } catch(Exception e) { 95 | e.printStackTrace(); 96 | } 97 | 98 | // simple yarn app running "whoami", 99 | // validate the container contents matches the java user.name 100 | if (!System.getProperty("user.name").equals("travis")) { 101 | assertEquals(System.getProperty("user.name"), getStdoutContents()); 102 | } 103 | 104 | } 105 | 106 | public String getStdoutContents() { 107 | String contents = ""; 108 | try { 109 | byte[] encoded = Files.readAllBytes(Paths.get(getStdoutPath())); 110 | contents = new String(encoded, Charset.defaultCharset()).trim(); 111 | } catch (IOException e) { 112 | e.printStackTrace(); 113 | } 114 | return contents; 115 | } 116 | 117 | public String getStdoutPath() { 118 | File dir = new File("./target/" + yarnLocalCluster.getTestName()); 119 | String[] nmDirs = dir.list(); 120 | for (String nmDir: nmDirs) { 121 | if (nmDir.contains("logDir")) { 122 | String[] appDirs = new File(dir.toString() + "/" + nmDir).list(); 123 | for (String appDir: appDirs) { 124 | if (appDir.contains("0001")) { 125 | String[] containerDirs = new File(dir.toString() + "/" + nmDir + "/" + appDir).list(); 126 | for (String containerDir: containerDirs) { 127 | if(containerDir.contains("000002")) { 128 | return dir.toString() + "/" + nmDir + "/" + appDir + "/" + containerDir + "/stdout"; 129 | } 130 | } 131 | } 132 | } 133 | } 134 | } 135 | return ""; 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/test/java/com/github/sakserv/minicluster/yarn/InJvmContainerExecutorTest.java: -------------------------------------------------------------------------------- 1 | package com.github.sakserv.minicluster.yarn; 2 | 3 | import org.junit.Test; 4 | 5 | /* 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */public class InJvmContainerExecutorTest { 18 | 19 | @Test 20 | public void testLaunchContainer() throws Exception { 21 | 22 | } 23 | 24 | @Test 25 | public void testIsContainerActive() throws Exception { 26 | 27 | } 28 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/test/java/com/github/sakserv/minicluster/yarn/SystemExitExceptionTest.java: -------------------------------------------------------------------------------- 1 | package com.github.sakserv.minicluster.yarn; 2 | 3 | import static org.junit.Assert.assertTrue; 4 | 5 | import org.junit.Test; 6 | 7 | /* 8 | * Licensed under the Apache License, Version 2.0 (the "License"); 9 | * you may not use this file except in compliance with the License. 10 | * You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */public class SystemExitExceptionTest { 20 | 21 | @Test 22 | public void testSystemExitException() { 23 | assertTrue(new SystemExitException() instanceof RuntimeException); 24 | } 25 | 26 | 27 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/test/java/com/github/sakserv/minicluster/yarn/util/EnvironmentUtilsTest.java: -------------------------------------------------------------------------------- 1 | package com.github.sakserv.minicluster.yarn.util; 2 | 3 | import java.util.HashMap; 4 | import java.util.Map; 5 | 6 | import org.junit.Test; 7 | 8 | /* 9 | * Licensed under the Apache License, Version 2.0 (the "License"); 10 | * you may not use this file except in compliance with the License. 11 | * You may obtain a copy of the License at 12 | * 13 | * http://www.apache.org/licenses/LICENSE-2.0 14 | * 15 | * Unless required by applicable law or agreed to in writing, software 16 | * distributed under the License is distributed on an "AS IS" BASIS, 17 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | * See the License for the specific language governing permissions and 19 | * limitations under the License. 20 | */public class EnvironmentUtilsTest { 21 | 22 | EnvironmentUtils environmentUtils = new EnvironmentUtils(); 23 | 24 | @Test 25 | public void testPut() throws Exception { 26 | environmentUtils.put("test", "test"); 27 | } 28 | 29 | @Test 30 | public void testPutAll() throws Exception { 31 | Map map = new HashMap<>(); 32 | map.put("test", "test"); 33 | environmentUtils.putAll(map); 34 | } 35 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/test/java/com/github/sakserv/minicluster/yarn/util/ExecJavaCliParserTest.java: -------------------------------------------------------------------------------- 1 | package com.github.sakserv.minicluster.yarn.util; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | 5 | import org.junit.BeforeClass; 6 | import org.junit.Test; 7 | import org.slf4j.Logger; 8 | import org.slf4j.LoggerFactory; 9 | 10 | /* 11 | * Licensed under the Apache License, Version 2.0 (the "License"); 12 | * you may not use this file except in compliance with the License. 13 | * You may obtain a copy of the License at 14 | * 15 | * http://www.apache.org/licenses/LICENSE-2.0 16 | * 17 | * Unless required by applicable law or agreed to in writing, software 18 | * distributed under the License is distributed on an "AS IS" BASIS, 19 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 | * See the License for the specific language governing permissions and 21 | * limitations under the License. 22 | */public class ExecJavaCliParserTest { 23 | 24 | // Logger 25 | private static final Logger LOG = LoggerFactory.getLogger(ExecJavaCliParserTest.class); 26 | 27 | private static final String command = "java -Xmx=1024m -Dhadoop.user=foo com.test.foo"; 28 | private static ExecJavaCliParser execJavaCliParser; 29 | 30 | @BeforeClass 31 | public static void setUp() throws Exception { 32 | execJavaCliParser = new ExecJavaCliParser(command); 33 | } 34 | 35 | @Test 36 | public void testGetXValues() throws Exception { 37 | assertEquals("mx=1024m", execJavaCliParser.getXValues()[0]); 38 | 39 | } 40 | 41 | @Test 42 | public void testGetSysProperties() throws Exception { 43 | assertEquals("hadoop.user=foo", execJavaCliParser.getSysProperties()[0]); 44 | 45 | } 46 | 47 | @Test 48 | public void testGetMainArguments() throws Exception { 49 | assertEquals("com.test.foo", execJavaCliParser.getMainArguments()[0]); 50 | } 51 | 52 | 53 | @Test 54 | public void testGetMain() throws Exception { 55 | assertEquals("java", execJavaCliParser.getMain()); 56 | } 57 | 58 | @Test 59 | public void noMain() throws Exception { 60 | ExecJavaCliParser execJavaCliParser = new ExecJavaCliParser(""); 61 | assertEquals(null, execJavaCliParser.getMain()); 62 | } 63 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/test/java/com/github/sakserv/minicluster/yarn/util/ExecShellCliParserTest.java: -------------------------------------------------------------------------------- 1 | package com.github.sakserv.minicluster.yarn.util; 2 | 3 | import static org.junit.Assert.assertEquals; 4 | 5 | import org.junit.Before; 6 | import org.junit.Test; 7 | import org.slf4j.Logger; 8 | import org.slf4j.LoggerFactory; 9 | 10 | /* 11 | * Licensed under the Apache License, Version 2.0 (the "License"); 12 | * you may not use this file except in compliance with the License. 13 | * You may obtain a copy of the License at 14 | * 15 | * http://www.apache.org/licenses/LICENSE-2.0 16 | * 17 | * Unless required by applicable law or agreed to in writing, software 18 | * distributed under the License is distributed on an "AS IS" BASIS, 19 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 | * See the License for the specific language governing permissions and 21 | * limitations under the License. 22 | */ 23 | 24 | public class ExecShellCliParserTest { 25 | 26 | // Logger 27 | private static final Logger LOG = LoggerFactory.getLogger(ExecShellCliParserTest.class); 28 | 29 | private static final String command = "whoami"; 30 | private static final String stdoutFile = "./target/com.github.sakserv.minicluster.impl.YarnLocalCluster/com.github.sakserv.minicluster.impl.YarnLocalCluster-logDir-nm-0_0/application_1431983196063_0001/container_1431983196063_0001_01_000002/stdout"; 31 | private static final String stderrFile = "./target/com.github.sakserv.minicluster.impl.YarnLocalCluster/com.github.sakserv.minicluster.impl.YarnLocalCluster-logDir-nm-0_0/application_1431983196063_0001/container_1431983196063_0001_01_000002/stderr"; 32 | private static final String cliString = command + " 1>" + stdoutFile + " 2>" + stderrFile; 33 | 34 | private static ExecShellCliParser execShellCliParser; 35 | 36 | @Before 37 | public void setUp() { 38 | execShellCliParser = new ExecShellCliParser(cliString); 39 | } 40 | 41 | @Test 42 | public void testGetCommand() { 43 | assertEquals(command, execShellCliParser.getCommand()); 44 | 45 | } 46 | 47 | @Test 48 | public void testGetStdoutPath() { 49 | assertEquals(stdoutFile, execShellCliParser.getStdoutPath()); 50 | } 51 | 52 | @Test 53 | public void testGetStderrPath() { 54 | assertEquals(stderrFile, execShellCliParser.getStderrPath()); 55 | } 56 | 57 | @Test 58 | public void testRunCommand() throws Exception { 59 | assertEquals(0, execShellCliParser.runCommand()); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/test/java/com/github/sakserv/minicluster/yarn/util/ReflectionUtilsTest.java: -------------------------------------------------------------------------------- 1 | package com.github.sakserv.minicluster.yarn.util; 2 | 3 | import static org.junit.Assert.assertNull; 4 | import static org.junit.Assert.assertTrue; 5 | 6 | import java.lang.reflect.Field; 7 | import java.lang.reflect.Method; 8 | 9 | import org.junit.Rule; 10 | import org.junit.Test; 11 | import org.junit.rules.ExpectedException; 12 | 13 | /* 14 | * Licensed under the Apache License, Version 2.0 (the "License"); 15 | * you may not use this file except in compliance with the License. 16 | * You may obtain a copy of the License at 17 | * 18 | * http://www.apache.org/licenses/LICENSE-2.0 19 | * 20 | * Unless required by applicable law or agreed to in writing, software 21 | * distributed under the License is distributed on an "AS IS" BASIS, 22 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 23 | * See the License for the specific language governing permissions and 24 | * limitations under the License. 25 | */public class ReflectionUtilsTest { 26 | 27 | @Rule 28 | public ExpectedException exception = ExpectedException.none(); 29 | 30 | ReflectionUtils reflectionUtils = new ReflectionUtils(); 31 | 32 | public class ReflectTester { 33 | 34 | private String foo = "baz"; 35 | 36 | public ReflectTester() { 37 | setFoo("bar"); 38 | } 39 | 40 | private String getFoo() { 41 | return foo; 42 | } 43 | 44 | private void setFoo(String foo) { 45 | this.foo = foo; 46 | } 47 | } 48 | 49 | @Test 50 | public void testGetMethodAndMakeAccessible() throws Exception { 51 | Method m = reflectionUtils.getMethodAndMakeAccessible(ReflectTester.class, "getFoo"); 52 | assertTrue(m.isAccessible()); 53 | } 54 | 55 | @Test 56 | public void testIllegalArgumentExceptionGetMethodAndMakeAccessible() { 57 | exception.expect(IllegalArgumentException.class); 58 | Method m = reflectionUtils.getMethodAndMakeAccessible(ReflectTester.class, "getBar"); 59 | } 60 | 61 | @Test 62 | public void testGetFieldAndMakeAccessible() throws Exception { 63 | Field f = reflectionUtils.getFieldAndMakeAccessible(ReflectTester.class, "foo"); 64 | assertTrue(f.isAccessible()); 65 | } 66 | 67 | @Test 68 | public void testVoidGetFieldAndMakeAccessible() { 69 | assertNull(reflectionUtils.getFieldAndMakeAccessible(ReflectTester.class, "bar")); 70 | } 71 | 72 | @Test 73 | public void testGetFieldValue() throws Exception { 74 | } 75 | 76 | @Test 77 | public void testGetInvocableConstructor() throws Exception { 78 | 79 | } 80 | } -------------------------------------------------------------------------------- /hadoop-mini-clusters-yarn/src/test/resources/simple-yarn-app-1.1.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/hadoop-mini-clusters-yarn/src/test/resources/simple-yarn-app-1.1.0.jar -------------------------------------------------------------------------------- /hadoop-mini-clusters-zookeeper/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | hadoop-mini-clusters 5 | com.github.sakserv 6 | 0.1.17-SNAPSHOT 7 | 8 | 4.0.0 9 | 10 | hadoop-mini-clusters-zookeeper 11 | 12 | 13 | 14 | 15 | 16 | org.apache.curator 17 | curator-test 18 | ${curator.version} 19 | 20 | 21 | 22 | 23 | com.github.sakserv 24 | hadoop-mini-clusters-common 25 | ${project.version} 26 | 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /hadoop-mini-clusters-zookeeper/src/main/resources/default.properties: -------------------------------------------------------------------------------- 1 | # Zookeeper 2 | zookeeper.temp.dir=embedded_zk 3 | zookeeper.host=127.0.0.1 4 | zookeeper.port=22010 5 | zookeeper.connection.string=127.0.0.1:22010 6 | zookeeper.max.client.cnxns=60 7 | zookeeper.election.port=22001 8 | zookeeper.quorum.port=22002 9 | zookeeper.delete.data.directory.on.close=false 10 | zookeeper.server.id=1 11 | zookeeper.ticktime=2000 -------------------------------------------------------------------------------- /hadoop-mini-clusters-zookeeper/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -------------------------------------------------------------------------------- /hadoop-mini-clusters-zookeeper/src/test/java/com/github/sakserv/minicluster/impl/ZookeeperLocalClusterIntegrationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed under the Apache License, Version 2.0 (the "License"); 3 | * you may not use this file except in compliance with the License. 4 | * You may obtain a copy of the License at 5 | * 6 | * http://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * Unless required by applicable law or agreed to in writing, software 9 | * distributed under the License is distributed on an "AS IS" BASIS, 10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | * See the License for the specific language governing permissions and 12 | * limitations under the License. 13 | */ 14 | 15 | package com.github.sakserv.minicluster.impl; 16 | 17 | import com.github.sakserv.minicluster.config.ConfigVars; 18 | import com.github.sakserv.propertyparser.PropertyParser; 19 | import org.apache.curator.framework.CuratorFramework; 20 | import org.apache.curator.framework.CuratorFrameworkFactory; 21 | import org.apache.curator.retry.ExponentialBackoffRetry; 22 | import org.apache.zookeeper.CreateMode; 23 | import org.apache.zookeeper.ZooDefs; 24 | import org.apache.zookeeper.data.ACL; 25 | import org.junit.AfterClass; 26 | import org.junit.BeforeClass; 27 | import org.junit.Test; 28 | import org.slf4j.Logger; 29 | import org.slf4j.LoggerFactory; 30 | 31 | import java.io.IOException; 32 | import java.util.ArrayList; 33 | import java.util.List; 34 | 35 | import static org.junit.Assert.assertEquals; 36 | import static org.junit.Assert.assertTrue; 37 | 38 | public class ZookeeperLocalClusterIntegrationTest { 39 | 40 | // Logger 41 | private static final Logger LOG = LoggerFactory.getLogger(ZookeeperLocalClusterIntegrationTest.class); 42 | 43 | // Setup the property parser 44 | private static PropertyParser propertyParser; 45 | 46 | static { 47 | try { 48 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE); 49 | propertyParser.parsePropsFile(); 50 | } catch (IOException e) { 51 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE)); 52 | } 53 | } 54 | 55 | private static ZookeeperLocalCluster zookeeperLocalCluster; 56 | 57 | @BeforeClass 58 | public static void setUp() throws Exception { 59 | zookeeperLocalCluster = new ZookeeperLocalCluster.Builder() 60 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) 61 | .setTempDir(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY)) 62 | .setZookeeperConnectionString(propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY)) 63 | .setMaxClientCnxns(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_MAX_CLIENT_CNXNS_KEY))) 64 | .setElectionPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_ELECTION_PORT_KEY))) 65 | .setQuorumPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_QUORUM_PORT_KEY))) 66 | .setDeleteDataDirectoryOnClose(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.ZOOKEEPER_DELETE_DATA_DIRECTORY_ON_CLOSE_KEY))) 67 | .setServerId(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_SERVER_ID_KEY))) 68 | .setTickTime(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TICKTIME_KEY))) 69 | .build(); 70 | zookeeperLocalCluster.start(); 71 | } 72 | 73 | @AfterClass 74 | public static void tearDown() throws Exception { 75 | zookeeperLocalCluster.stop(); 76 | } 77 | 78 | @Test 79 | public void testZookeeperCluster() throws Exception { 80 | assertEquals(propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY), 81 | zookeeperLocalCluster.getZookeeperConnectionString()); 82 | 83 | String znode = "/zooooooo"; 84 | String child1 = "child1"; 85 | String child2 = "child2"; 86 | 87 | try (CuratorFramework client = CuratorFrameworkFactory.newClient(zookeeperLocalCluster.getZookeeperConnectionString(), 88 | new ExponentialBackoffRetry(1000, 3))) { 89 | client.start(); 90 | 91 | client.create().withMode(CreateMode.PERSISTENT).forPath(znode); 92 | 93 | client.create().withMode(CreateMode.PERSISTENT).forPath(znode + "/" + child1); 94 | client.create().withMode(CreateMode.PERSISTENT).forPath(znode + "/" + child2); 95 | 96 | List children = client.getChildren().forPath(znode); 97 | assertEquals(2, children.size()); 98 | assertTrue(children.contains(child1)); 99 | assertTrue(children.contains(child2)); 100 | } 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /windows_libs/2.3.0.0/bin/winutils.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.3.0.0/bin/winutils.exe -------------------------------------------------------------------------------- /windows_libs/2.3.0.0/lib/hadoop.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.3.0.0/lib/hadoop.dll -------------------------------------------------------------------------------- /windows_libs/2.3.0.0/lib/hdfs.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.3.0.0/lib/hdfs.dll -------------------------------------------------------------------------------- /windows_libs/2.3.0.0/lib/libwinutils.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.3.0.0/lib/libwinutils.lib -------------------------------------------------------------------------------- /windows_libs/2.3.2.0/bin/winutils.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.3.2.0/bin/winutils.exe -------------------------------------------------------------------------------- /windows_libs/2.3.2.0/lib/hadoop.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.3.2.0/lib/hadoop.dll -------------------------------------------------------------------------------- /windows_libs/2.3.2.0/lib/hdfs.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.3.2.0/lib/hdfs.dll -------------------------------------------------------------------------------- /windows_libs/2.3.2.0/lib/libwinutils.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.3.2.0/lib/libwinutils.lib -------------------------------------------------------------------------------- /windows_libs/2.3.4.0/bin/winutils.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.3.4.0/bin/winutils.exe -------------------------------------------------------------------------------- /windows_libs/2.3.4.0/lib/hadoop.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.3.4.0/lib/hadoop.dll -------------------------------------------------------------------------------- /windows_libs/2.3.4.0/lib/hdfs.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.3.4.0/lib/hdfs.dll -------------------------------------------------------------------------------- /windows_libs/2.3.4.0/lib/libwinutils.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.3.4.0/lib/libwinutils.lib -------------------------------------------------------------------------------- /windows_libs/2.4.0.0/bin/winutils.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.4.0.0/bin/winutils.exe -------------------------------------------------------------------------------- /windows_libs/2.4.0.0/lib/hadoop.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.4.0.0/lib/hadoop.dll -------------------------------------------------------------------------------- /windows_libs/2.4.0.0/lib/hdfs.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.4.0.0/lib/hdfs.dll -------------------------------------------------------------------------------- /windows_libs/2.4.0.0/lib/libwinutils.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.4.0.0/lib/libwinutils.lib -------------------------------------------------------------------------------- /windows_libs/2.4.2.0/bin/winutils.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.4.2.0/bin/winutils.exe -------------------------------------------------------------------------------- /windows_libs/2.4.2.0/lib/hadoop.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.4.2.0/lib/hadoop.dll -------------------------------------------------------------------------------- /windows_libs/2.4.2.0/lib/hdfs.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.4.2.0/lib/hdfs.dll -------------------------------------------------------------------------------- /windows_libs/2.4.2.0/lib/libwinutils.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.4.2.0/lib/libwinutils.lib -------------------------------------------------------------------------------- /windows_libs/2.5.0.0/bin/winutils.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.5.0.0/bin/winutils.exe -------------------------------------------------------------------------------- /windows_libs/2.5.0.0/lib/hadoop.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.5.0.0/lib/hadoop.dll -------------------------------------------------------------------------------- /windows_libs/2.5.0.0/lib/hdfs.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.5.0.0/lib/hdfs.dll -------------------------------------------------------------------------------- /windows_libs/2.5.0.0/lib/libwinutils.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.5.0.0/lib/libwinutils.lib -------------------------------------------------------------------------------- /windows_libs/2.5.3.0/bin/winutils.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.5.3.0/bin/winutils.exe -------------------------------------------------------------------------------- /windows_libs/2.5.3.0/lib/hadoop.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.5.3.0/lib/hadoop.dll -------------------------------------------------------------------------------- /windows_libs/2.5.3.0/lib/hdfs.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.5.3.0/lib/hdfs.dll -------------------------------------------------------------------------------- /windows_libs/2.5.3.0/lib/libwinutils.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.5.3.0/lib/libwinutils.lib -------------------------------------------------------------------------------- /windows_libs/2.6.0.3/bin/winutils.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.6.0.3/bin/winutils.exe -------------------------------------------------------------------------------- /windows_libs/2.6.0.3/lib/hadoop.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.6.0.3/lib/hadoop.dll -------------------------------------------------------------------------------- /windows_libs/2.6.0.3/lib/hdfs.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.6.0.3/lib/hdfs.dll -------------------------------------------------------------------------------- /windows_libs/2.6.0.3/lib/libwinutils.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.6.0.3/lib/libwinutils.lib -------------------------------------------------------------------------------- /windows_libs/2.6.1.0/bin/winutils.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.6.1.0/bin/winutils.exe -------------------------------------------------------------------------------- /windows_libs/2.6.1.0/lib/hadoop.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.6.1.0/lib/hadoop.dll -------------------------------------------------------------------------------- /windows_libs/2.6.1.0/lib/hdfs.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.6.1.0/lib/hdfs.dll -------------------------------------------------------------------------------- /windows_libs/2.6.1.0/lib/libwinutils.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.6.1.0/lib/libwinutils.lib -------------------------------------------------------------------------------- /windows_libs/2.6.2.0/bin/winutils.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.6.2.0/bin/winutils.exe -------------------------------------------------------------------------------- /windows_libs/2.6.2.0/lib/hadoop.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.6.2.0/lib/hadoop.dll -------------------------------------------------------------------------------- /windows_libs/2.6.2.0/lib/hdfs.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.6.2.0/lib/hdfs.dll -------------------------------------------------------------------------------- /windows_libs/2.6.2.0/lib/libwinutils.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sakserv/hadoop-mini-clusters/787a02c728a1035c0742a22468a227616dc748d0/windows_libs/2.6.2.0/lib/libwinutils.lib --------------------------------------------------------------------------------