├── .dockerignore
├── .gitignore
├── .travis.yml
├── CHANGES.md
├── Dockerfile
├── LICENSE
├── README.md
├── checkstyle.xml
├── docker_build.sh
├── hadoop-mini-clusters-activemq
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ └── impl
│ │ │ └── ActivemqLocalBroker.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ └── impl
│ ├── ActivemqLocalBrokerIntegrationTest.java
│ └── ActivemqLocalBrokerTest.java
├── hadoop-mini-clusters-common
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ ├── MiniCluster.java
│ │ │ ├── auth
│ │ │ └── Jaas.java
│ │ │ ├── config
│ │ │ ├── ConfigVars.java
│ │ │ └── package-info.java
│ │ │ ├── http
│ │ │ └── HttpUtils.java
│ │ │ └── util
│ │ │ ├── FileUtils.java
│ │ │ └── WindowsLibsUtils.java
│ └── resources
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ ├── http
│ └── HttpUtilsTest.java
│ └── util
│ └── FileUtilsTest.java
├── hadoop-mini-clusters-hbase
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ └── impl
│ │ │ ├── HbaseLocalCluster.java
│ │ │ └── HbaseRestLocalCluster.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ └── impl
│ ├── HbaseLocalClusterIntegrationTest.java
│ └── HbaseLocalClusterTest.java
├── hadoop-mini-clusters-hdfs
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ └── impl
│ │ │ └── HdfsLocalCluster.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ └── impl
│ ├── HdfsLocalClusterIntegrationTest.java
│ └── HdfsLocalClusterTest.java
├── hadoop-mini-clusters-hivemetastore
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ └── impl
│ │ │ └── HiveLocalMetaStore.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ └── impl
│ ├── HiveLocalMetaStoreIntegrationTest.java
│ └── HiveLocalMetaStoreTest.java
├── hadoop-mini-clusters-hiveserver2
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ └── impl
│ │ │ └── HiveLocalServer2.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ └── impl
│ ├── HiveLocalServer2IntegrationTest.java
│ └── HiveLocalServer2Test.java
├── hadoop-mini-clusters-hyperscaledb
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ └── impl
│ │ │ └── HsqldbLocalServer.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ └── impl
│ ├── HsqldbLocalServerIntegrationTest.java
│ └── HsqldbLocalServerTest.java
├── hadoop-mini-clusters-kafka
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ ├── impl
│ │ │ └── KafkaLocalBroker.java
│ │ │ └── systemtime
│ │ │ └── LocalSystemTime.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ ├── datatime
│ └── GenerateRandomDay.java
│ ├── impl
│ ├── KafkaLocalBrokerIntegrationTest.java
│ └── KafkaLocalBrokerTest.java
│ └── kafka
│ ├── consumer
│ └── KafkaTestConsumer.java
│ └── producer
│ └── KafkaSimpleTestProducer.java
├── hadoop-mini-clusters-kdc
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ └── impl
│ │ │ └── KdcLocalCluster.java
│ └── resources
│ │ └── default.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ └── impl
│ ├── KdcLocalClusterHBaseIntegrationTest.java
│ ├── KdcLocalClusterHdfsIntegrationTest.java
│ ├── KdcLocalClusterTest.java
│ └── KdcLocalClusterZookeeperIntegrationTest.java
├── hadoop-mini-clusters-knox
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ ├── com
│ │ │ └── github
│ │ │ │ └── sakserv
│ │ │ │ └── minicluster
│ │ │ │ └── impl
│ │ │ │ ├── KnoxLocalCluster.java
│ │ │ │ └── LocalGatewayConfig.java
│ │ └── org
│ │ │ └── apache
│ │ │ └── hadoop
│ │ │ └── gateway
│ │ │ ├── GatewayServer.java
│ │ │ └── GatewayServlet.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ └── impl
│ ├── KnoxLocalClusterIntegrationTest.java
│ └── KnoxLocalClusterTest.java
├── hadoop-mini-clusters-mapreduce
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ └── impl
│ │ │ └── MRLocalCluster.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ ├── java
│ └── com
│ │ └── github
│ │ └── sakserv
│ │ └── minicluster
│ │ ├── impl
│ │ ├── MRLocalClusterIntegrationTest.java
│ │ └── MRLocalClusterTest.java
│ │ └── mapreduce
│ │ ├── Driver.java
│ │ ├── SumReducer.java
│ │ └── WordMapper.java
│ └── resources
│ └── mr_input.txt
├── hadoop-mini-clusters-mongodb
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ └── impl
│ │ │ └── MongodbLocalServer.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ └── impl
│ ├── MongodbLocalServerIntegrationTest.java
│ └── MongodbLocalServerTest.java
├── hadoop-mini-clusters-oozie
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ ├── impl
│ │ │ └── OozieLocalServer.java
│ │ │ └── oozie
│ │ │ ├── sharelib
│ │ │ ├── Framework.java
│ │ │ └── util
│ │ │ │ └── OozieShareLibUtil.java
│ │ │ └── util
│ │ │ └── OozieConfigUtil.java
│ └── resources
│ │ ├── default.properties
│ │ ├── localoozie-log4j.properties
│ │ ├── log4j.properties
│ │ ├── sharelib.properties
│ │ └── test_input.txt
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ └── impl
│ ├── OozieLocalServerIntegrationTest.java
│ └── OozieLocalServerTest.java
├── hadoop-mini-clusters-storm
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ └── impl
│ │ │ └── StormLocalCluster.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ ├── impl
│ ├── StormLocalClusterIntegrationTest.java
│ └── StormLocalClusterTest.java
│ └── storm
│ ├── bolt
│ └── PrinterBolt.java
│ └── spout
│ └── RandomSentenceSpout.java
├── hadoop-mini-clusters-yarn
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ ├── impl
│ │ │ └── YarnLocalCluster.java
│ │ │ └── yarn
│ │ │ ├── InJvmContainerExecutor.java
│ │ │ ├── SystemExitException.java
│ │ │ └── util
│ │ │ ├── EnvironmentUtils.java
│ │ │ ├── ExecJavaCliParser.java
│ │ │ ├── ExecShellCliParser.java
│ │ │ └── ReflectionUtils.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ ├── java
│ └── com
│ │ └── github
│ │ └── sakserv
│ │ └── minicluster
│ │ ├── impl
│ │ ├── YarnLocalClusterInJvmContainerExecutorTest.java
│ │ ├── YarnLocalClusterIntegrationTest.java
│ │ └── YarnLocalClusterTest.java
│ │ └── yarn
│ │ ├── InJvmContainerExecutorTest.java
│ │ ├── SystemExitExceptionTest.java
│ │ ├── simpleyarnapp
│ │ └── Client.java
│ │ └── util
│ │ ├── EnvironmentUtilsTest.java
│ │ ├── ExecJavaCliParserTest.java
│ │ ├── ExecShellCliParserTest.java
│ │ └── ReflectionUtilsTest.java
│ └── resources
│ └── simple-yarn-app-1.1.0.jar
├── hadoop-mini-clusters-zookeeper
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── github
│ │ │ └── sakserv
│ │ │ └── minicluster
│ │ │ └── impl
│ │ │ └── ZookeeperLocalCluster.java
│ └── resources
│ │ ├── default.properties
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── com
│ └── github
│ └── sakserv
│ └── minicluster
│ └── impl
│ ├── ZookeeperLocalClusterIntegrationTest.java
│ └── ZookeeperLocalClusterTest.java
├── pom.xml
└── windows_libs
├── 2.3.0.0
├── bin
│ └── winutils.exe
└── lib
│ ├── hadoop.dll
│ ├── hdfs.dll
│ └── libwinutils.lib
├── 2.3.2.0
├── bin
│ └── winutils.exe
└── lib
│ ├── hadoop.dll
│ ├── hdfs.dll
│ └── libwinutils.lib
├── 2.3.4.0
├── bin
│ └── winutils.exe
└── lib
│ ├── hadoop.dll
│ ├── hdfs.dll
│ └── libwinutils.lib
├── 2.4.0.0
├── bin
│ └── winutils.exe
└── lib
│ ├── hadoop.dll
│ ├── hdfs.dll
│ └── libwinutils.lib
├── 2.4.2.0
├── bin
│ └── winutils.exe
└── lib
│ ├── hadoop.dll
│ ├── hdfs.dll
│ └── libwinutils.lib
├── 2.5.0.0
├── bin
│ └── winutils.exe
└── lib
│ ├── hadoop.dll
│ ├── hdfs.dll
│ └── libwinutils.lib
├── 2.5.3.0
├── bin
│ └── winutils.exe
└── lib
│ ├── hadoop.dll
│ ├── hdfs.dll
│ └── libwinutils.lib
├── 2.6.0.3
├── bin
│ └── winutils.exe
└── lib
│ ├── hadoop.dll
│ ├── hdfs.dll
│ └── libwinutils.lib
├── 2.6.1.0
├── bin
│ └── winutils.exe
└── lib
│ ├── hadoop.dll
│ ├── hdfs.dll
│ └── libwinutils.lib
└── 2.6.2.0
├── bin
└── winutils.exe
└── lib
├── hadoop.dll
├── hdfs.dll
└── libwinutils.lib
/.dockerignore:
--------------------------------------------------------------------------------
1 | *
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.class
2 |
3 | .DS_Store
4 |
5 | # Mobile Tools for Java (J2ME)
6 | .mtj.tmp/
7 |
8 | # Package Files #
9 | #*.jar
10 | *.war
11 | *.ear
12 |
13 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
14 | hs_err_pid*
15 |
16 | # Build
17 | target/
18 | build/
19 |
20 | # Idea
21 | .idea
22 | */*.iml
23 | *.iml
24 |
25 | # Coverage report
26 | coverage-error.log
27 |
28 | # Oozie sharelib cache
29 | share_lib_cache
30 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: java
2 |
3 | jdk:
4 | - oraclejdk8
5 |
6 | cache:
7 | directories:
8 | - $HOME/.m2
9 |
10 | script: travis_wait 60 mvn -B clean test
11 |
12 | after_success: travis_wait 60 mvn -B clean cobertura:cobertura coveralls:report
13 |
--------------------------------------------------------------------------------
/CHANGES.md:
--------------------------------------------------------------------------------
1 | Change Log
2 | ==========
3 | ### 0.1.16 - 1/18/2019
4 | * Add support for HDP 2.6.5.0
5 |
6 | ### 0.1.15 - 9/14/2018
7 | * Add support for HDP 2.6.3.0
8 | * Fix kdc NPE
9 | * Fix HADOOP_HOME on Windows
10 | * Fix missing libx on Windows
11 |
12 | ### 0.1.14 - 9/25/2017
13 | * Add mini kdc (Thanks @treydone)
14 | * Improve oozie documentation
15 | * Add support for HDP 2.6.2.0
16 |
17 | ### 0.1.13 - 7/11/2017
18 | * Add proxy support for sharelib downloads (Thanks @rajesh-kumar)
19 | * Work around for Knox dependency conflicts (Thanks @jetoile)
20 |
21 | ### 0.1.12 - 6/29/2017
22 | * Fix YARN RM address and hostname for issue #27
23 | * Remove Intellij assets
24 | * Fix directory clean up; add cleanup for Knox and Storm
25 | * Oozie share lib fixes for "real" workflows
26 | * HBase InfoServer is properly shutdown
27 | * Allow NimbusClient to work with StormLocalCluster
28 | * Fixed ports for MapReduce Job History Server
29 | * Add support for HDP 2.6.0 and 2.6.1
30 |
31 | ### 0.1.11 - 1/20/2017
32 | * Make RestBuilder public (Thanks @jetoile)
33 |
34 | ### 0.1.10 - 1/16/2017
35 | * HBase REST support (Thanks @jetoile)
36 | * Knox support (Thanks Treydone)
37 |
38 | ### 0.1.9 - 12/26/2016
39 | * Make JDK 8 the default
40 | * Add support for HDP 2.5.3
41 | * Add Kafka 2.10.1 (Thanks @timvw)
42 | * Fix LocalOozie logging for issue #21
43 |
44 | ### 0.1.8 - 10/03/2016
45 | * Add support for HDP 2.5.0
46 | * Eliminate fat jar for hbase (Thanks @isendel)
47 | * Coordinator support for oozie (Thanks @RomanKovalik)
48 |
49 | ### 0.1.7 - 05/12/2016
50 | * Add support for HDP 2.4.2
51 |
52 | ### 0.1.6 - 05/09/2016
53 | * Add support for Curator InstanceSpec to improve ZK flexibility
54 |
55 | ### 0.1.5 - 04/06/2016
56 | * Add support for HDP 2.4.0
57 | * Handle the evolving Kafka apis via reflection
58 | * Fix LMAX disruptor conflict in storm-core + hbase-testing-utils
59 | * Added script to allow running tests in a docker container
60 |
61 | ### 0.1.4 - 02/10/2016
62 | * Add support for HDP 2.3.4
63 |
64 | ### 0.1.3 - 12/21/2015
65 | * Add support for WebHDFS via setHdfsNamenodeHttpPort
66 |
67 | ### 0.1.2 - 12/01/2015
68 | * Add more flexibility to windows libs location via HADOOP_HOME
69 |
70 | ### 0.1.1 - 11/02/2015
71 | * Fix maven build vars as a workaround for the maven release plugin
72 |
73 | ### 0.1.0 - 11/01/2015
74 | * Major changes in this release
75 | * Moved each mini cluster to a seperate module to reduce deps - fixes #3
76 | * Removed the shade plugin
77 | * Added maven profile support to allow for supporting multiple versions of HDP
78 | * Added Oozie Share Lib support - fixes #2
79 | * Added Windows support - fixes #1
80 | * Avoid needlessly creating Strings for log messages
81 | * Cleaned up imports
82 |
83 | ### 0.0.15 - 08/24/2015
84 | * Upgraded dependencies to Hadoop 2.7.1 (HDP 2.3)
85 |
86 | ### 0.0.14 - 07/28/2015
87 | * Added Oozie Support
88 | * Added optional argument to HDFS to enable the current user as a proxy user
89 |
90 | ### 0.0.13 - 07/04/2015
91 | * Added YARN Support
92 | * Added MRv2 Support
93 | * Added HBase Support
94 | * Added support for the InJvmContainerExecutor
95 | * Updated dependencies to apache releases due to HWX repo issues
96 | * Added additional details to the README
97 | * 98% code coverage for all classes (less InJvmContainerExecutor)
98 |
99 | ### 0.0.12 - 02/08/2015
100 | * Added HyperSQL support
101 |
102 | ### 0.0.11 - 02/02/2015
103 | * Fixed shade plugin to resolve corrupt jar issues
104 | * Added usage examples to README
105 |
106 | ### 0.0.10 - 02/02/2015 - DO NOT USE
107 | * RELEASE NUKED DUE TO SHADE PLUGIN PRODUCING A BAD JAR
108 | * Breaking Change: Moved all mini clusters to the builder pattern
109 | * Moved configuration to properties file
110 | * Split unit and integration tests
111 | * Refactored the pom
112 |
113 | ### 0.0.9 - 01/19/2015 - DO NOT USE
114 | * RELEASE NUKED DUE TO SHADE PLUGIN PRODUCING A BAD JAR
115 | * Moved to log4j
116 | * Added proper assertions
117 | * Option to wait on topology kill for StormLocalCluster
118 | * Added ASL headers
119 | * Added a proper README
120 |
121 | ### 0.0.8 - 01/08/2015
122 | * Added embedded MongodbLocalServer
123 |
124 | ### 0.0.7 - 01/07/2015
125 | * Added missing calls to cleanUp()
126 |
127 | ### 0.0.6 - 01/06/2015
128 | * First Release
129 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:latest
2 |
3 | RUN yum install maven wget unzip -y
4 | RUN cd /tmp && wget https://github.com/sakserv/hadoop-mini-clusters/archive/master.zip && unzip master.zip && cd hadoop-mini-clusters-master && mvn clean test
5 |
--------------------------------------------------------------------------------
/docker_build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SCRIPT_NAME=$(basename $0)
4 | SCRIPT_PATH=$(cd `dirname $0` && pwd)
5 |
6 | docker build --no-cache -t hadoop-mini-clusters $SCRIPT_PATH
7 | docker run -m 6g -d sakserv/hadoop-mini-clusters
8 | #docker rm $(docker ps -a --filter ancestor=hadoop-mini-clusters)
9 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-activemq/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | hadoop-mini-clusters
5 | com.github.sakserv
6 | 0.1.17-SNAPSHOT
7 |
8 | 4.0.0
9 |
10 | hadoop-mini-clusters-activemq
11 |
12 |
13 |
14 |
15 |
16 | org.apache.activemq
17 | activemq-all
18 | ${activemq.version}
19 |
20 |
21 |
22 |
23 | com.github.sakserv
24 | hadoop-mini-clusters-common
25 | ${project.version}
26 |
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-activemq/src/main/resources/default.properties:
--------------------------------------------------------------------------------
1 | # ActiveMQ
2 | activemq.hostname=localhost
3 | activemq.port=61616
4 | activemq.queue=defaultQueue
5 | activemq.store.dir=activemq-data
6 | activemq.uri.prefix=vm://
7 | activemq.uri.postfix=?create=false
--------------------------------------------------------------------------------
/hadoop-mini-clusters-activemq/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Root logger option
2 | log4j.rootLogger=INFO, stdout
3 |
4 | # Direct log messages to stdout
5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
6 | log4j.appender.stdout.Target=System.out
7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
--------------------------------------------------------------------------------
/hadoop-mini-clusters-activemq/src/test/java/com/github/sakserv/minicluster/impl/ActivemqLocalBrokerIntegrationTest.java:
--------------------------------------------------------------------------------
1 | package com.github.sakserv.minicluster.impl;
2 |
3 | import static org.junit.Assert.assertEquals;
4 |
5 | import java.io.IOException;
6 |
7 | import javax.jms.JMSException;
8 |
9 | import org.junit.AfterClass;
10 | import org.junit.BeforeClass;
11 | import org.junit.Test;
12 | import org.slf4j.Logger;
13 | import org.slf4j.LoggerFactory;
14 |
15 | import com.github.sakserv.minicluster.config.ConfigVars;
16 | import com.github.sakserv.propertyparser.PropertyParser;
17 |
18 | public class ActivemqLocalBrokerIntegrationTest {
19 |
20 | // Logger
21 | private static final Logger LOG = LoggerFactory.getLogger(ActivemqLocalBrokerIntegrationTest.class);
22 |
23 | // Setup the property parser
24 | private static PropertyParser propertyParser;
25 | static {
26 | try {
27 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE);
28 | propertyParser.parsePropsFile();
29 | } catch(IOException e) {
30 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE));
31 | }
32 | }
33 |
34 | // Setup the activemq broker before running tests
35 | private static ActivemqLocalBroker amq;
36 |
37 | @BeforeClass
38 | public static void setUp() throws Exception {
39 | amq = new ActivemqLocalBroker.Builder()
40 | .setHostName(propertyParser.getProperty(ConfigVars.ACTIVEMQ_HOSTNAME_KEY))
41 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ACTIVEMQ_PORT_KEY)))
42 | .setQueueName(propertyParser.getProperty(ConfigVars.ACTIVEMQ_QUEUE_NAME_KEY))
43 | .setStoreDir(propertyParser.getProperty(ConfigVars.ACTIVEMQ_STORE_DIR_KEY))
44 | .setUriPrefix(propertyParser.getProperty(ConfigVars.ACTIVEMQ_URI_PREFIX_KEY))
45 | .setUriPostfix(propertyParser.getProperty(ConfigVars.ACTIVEMQ_URI_POSTFIX_KEY))
46 | .build();
47 |
48 | amq.start();
49 | }
50 |
51 |
52 | // Stop and cleanup when tests are finished
53 | @AfterClass
54 | public static void tearDown() throws Exception {
55 | amq.stop();
56 | }
57 |
58 | @Test
59 | /*
60 | sends lots of short messages and one long one
61 | */
62 | public void testMessageProcessing() throws JMSException {
63 | int n = 10000;
64 | String msg;
65 |
66 | LOG.info("ACTIVEMQ: Sending {} messages", n);
67 |
68 | //send a lot of messages
69 | for (int i = 0; i < n; i++) {
70 | msg = "hello from active mq. " + n;
71 | amq.sendTextMessage(msg);
72 | assertEquals(msg,amq.getTextMessage());
73 | }
74 |
75 | //send a really long message
76 | StringBuilder sb = new StringBuilder(n);
77 | for (int i = 0; i < n; i++) {
78 | sb.append(n).append(" ");
79 | }
80 | msg = sb.toString();
81 | amq.sendTextMessage(msg);
82 | assertEquals(msg,amq.getTextMessage());
83 |
84 | }
85 |
86 | }
87 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-common/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | hadoop-mini-clusters
5 | com.github.sakserv
6 | 0.1.17-SNAPSHOT
7 |
8 | 4.0.0
9 |
10 | hadoop-mini-clusters-common
11 |
12 |
13 |
14 |
15 | org.apache.hadoop
16 | hadoop-client
17 | ${hadoop.version}
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-common/src/main/java/com/github/sakserv/minicluster/MiniCluster.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 |
15 | package com.github.sakserv.minicluster;
16 |
17 | public interface MiniCluster {
18 |
19 | public void start() throws Exception;
20 |
21 | public void stop() throws Exception;
22 |
23 | public void stop(boolean cleanUp) throws Exception;
24 |
25 | public void configure() throws Exception;
26 |
27 | public void cleanUp() throws Exception;
28 |
29 | }
--------------------------------------------------------------------------------
/hadoop-mini-clusters-common/src/main/java/com/github/sakserv/minicluster/auth/Jaas.java:
--------------------------------------------------------------------------------
1 | package com.github.sakserv.minicluster.auth;
2 |
3 | import javax.security.auth.login.AppConfigurationEntry;
4 | import javax.security.auth.login.Configuration;
5 | import java.util.HashMap;
6 | import java.util.Map;
7 |
8 | public class Jaas extends Configuration {
9 |
10 | private static final String krb5LoginModuleName;
11 | public static final String NL = "\n";
12 |
13 | static {
14 | if (System.getProperty("java.vendor").contains("IBM")) {
15 | krb5LoginModuleName = "com.ibm.security.auth.module.Krb5LoginModule";
16 | } else {
17 | krb5LoginModuleName = "com.sun.security.auth.module.Krb5LoginModule";
18 | }
19 | }
20 |
21 | private Map entries = new HashMap();
22 |
23 | public Jaas addServiceEntry(String name, String principal, String keytab, String serviceName) {
24 | Map options = common(principal, keytab);
25 | options.put("serviceName", serviceName);
26 | entries.put(name, new AppConfigurationEntry(krb5LoginModuleName, AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options));
27 | return this;
28 | }
29 |
30 | public Jaas addEntry(String name, String principal, String keytab) {
31 | Map options = common(principal, keytab);
32 | entries.put(name, new AppConfigurationEntry(krb5LoginModuleName, AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options));
33 | return this;
34 | }
35 |
36 | protected static Map common(String principal, String keytab) {
37 | Map options = new HashMap<>();
38 | options.put("keyTab", keytab);
39 | options.put("principal", principal);
40 | options.put("useKeyTab", "true");
41 | options.put("storeKey", "true");
42 | options.put("useTicketCache", "false");
43 | options.put("debug", "true");
44 | return options;
45 | }
46 |
47 | public void removeEntry(String name) {
48 | entries.remove(name);
49 | }
50 |
51 | public void clear() {
52 | entries.clear();
53 | }
54 |
55 | public Map getEntries() {
56 | return entries;
57 | }
58 |
59 | public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
60 | return new AppConfigurationEntry[]{entries.get(name)};
61 | }
62 |
63 | public String toFile() {
64 | StringBuilder builder = new StringBuilder();
65 | entries.forEach((e, v) -> {
66 | builder
67 | .append(e).append(" {").append(NL)
68 | .append("\t").append(krb5LoginModuleName).append(" requiered").append(NL);
69 | v.getOptions().forEach((o, p) ->
70 | builder.append("\t").append(o).append(" = ").append("\"" + p + "\"").append(NL));
71 | builder.append("}");
72 | });
73 | return builder.toString();
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-common/src/main/java/com/github/sakserv/minicluster/config/package-info.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 |
15 | /**
16 | * Provides the classes that contain the list of supported configuration variables.
17 | *
18 | * @since 0.1.0
19 | * @author Shane Kumpf
20 | */
21 | package com.github.sakserv.minicluster.config;
--------------------------------------------------------------------------------
/hadoop-mini-clusters-common/src/main/java/com/github/sakserv/minicluster/http/HttpUtils.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 | package com.github.sakserv.minicluster.http;
15 |
16 | import java.io.BufferedInputStream;
17 | import java.io.BufferedOutputStream;
18 | import java.io.File;
19 | import java.io.FileOutputStream;
20 | import java.io.IOException;
21 | import java.net.*;
22 |
23 | import org.slf4j.Logger;
24 | import org.slf4j.LoggerFactory;
25 |
26 | public class HttpUtils {
27 |
28 | // Logger
29 | private static final Logger LOG = LoggerFactory.getLogger(HttpUtils.class);
30 |
31 | // Proxy properties
32 | private static final String PROXY_PROPERTY_NAME = "HTTP_PROXY";
33 | private static final String ALL_PROXY_PROPERTY_NAME = "ALL_PROXY";
34 |
35 | public static void downloadFileWithProgress(String fileUrl, String outputFilePath) throws IOException {
36 | String fileName = fileUrl.substring(fileUrl.lastIndexOf('/') + 1);
37 | URL url = new URL(fileUrl);
38 | HttpURLConnection httpURLConnection;
39 |
40 | //Check if system proxy is set
41 | Proxy proxySettings = returnProxyIfEnabled();
42 | if (proxySettings != null) {
43 | httpURLConnection = (HttpURLConnection) (url.openConnection(proxySettings));
44 | } else {
45 | httpURLConnection = (HttpURLConnection) (url.openConnection());
46 | }
47 | long fileSize = httpURLConnection.getContentLength();
48 |
49 | // Create the parent output directory if it doesn't exis
50 | if (!new File(outputFilePath).getParentFile().isDirectory()) {
51 | new File(outputFilePath).getParentFile().mkdirs();
52 | }
53 |
54 | BufferedInputStream bufferedInputStream = new BufferedInputStream(httpURLConnection.getInputStream());
55 | FileOutputStream fileOutputStream = new FileOutputStream(outputFilePath);
56 | BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(fileOutputStream, 1024);
57 |
58 | byte[] data = new byte[1024];
59 | long downloadedFileSize = 0;
60 |
61 | Integer previousProgress = 0;
62 | int x = 0;
63 | while ((x = bufferedInputStream.read(data, 0, 1024)) >= 0) {
64 | downloadedFileSize += x;
65 |
66 | final int currentProgress = (int) (((double) downloadedFileSize / (double) fileSize) * 100d);
67 | if (!previousProgress.equals(currentProgress)) {
68 | LOG.info("HTTP: Download Status: Filename {} - {}% ({}/{})", fileName, currentProgress,
69 | downloadedFileSize, fileSize);
70 | previousProgress = currentProgress;
71 | }
72 |
73 | bufferedOutputStream.write(data, 0, x);
74 | }
75 | bufferedOutputStream.close();
76 | bufferedInputStream.close();
77 | }
78 |
79 | public static Proxy returnProxyIfEnabled() {
80 | LOG.debug("returnProxyIfEnabled() start!!");
81 | String proxyStarturl = "http://";
82 |
83 | String proxyURLString = System.getProperty(PROXY_PROPERTY_NAME) != null ? System.getProperty(PROXY_PROPERTY_NAME)
84 | : System.getProperty(PROXY_PROPERTY_NAME.toLowerCase());
85 | String allproxyURLString = System.getProperty(ALL_PROXY_PROPERTY_NAME) != null
86 | ? System.getProperty(ALL_PROXY_PROPERTY_NAME) : System.getProperty(ALL_PROXY_PROPERTY_NAME.toLowerCase());
87 | //Pick PROXY URL from two widely used system properties
88 | String finalProxyString = proxyURLString != null ? proxyURLString : allproxyURLString;
89 | URL proxyURL = null;
90 |
91 | try {
92 | //If Proxy URL starts with HTTP then use HTTP PROXY settings
93 | if (finalProxyString != null && finalProxyString.toLowerCase().startsWith(proxyStarturl)) {
94 | // Basic method to validate proxy URL is correct or not.
95 | proxyURL = returnParsedURL(finalProxyString);
96 | LOG.debug("protocol of proxy used is: " + proxyURL.getProtocol());
97 | return new Proxy(Proxy.Type.HTTP, new InetSocketAddress(proxyURL.getHost(), proxyURL.getPort()));
98 | //If Proxy URL starts with no protocol then assume it is HTTP
99 | } else if (finalProxyString != null && !finalProxyString.contains("://")
100 | && finalProxyString.split(":").length == 2) {
101 |
102 | LOG.debug("protocol of proxy used is: http default");
103 | proxyURL = returnParsedURL(proxyStarturl.concat(finalProxyString));
104 | return proxyURL != null ? new Proxy(Proxy.Type.HTTP, new InetSocketAddress(proxyURL.getHost(), proxyURL.getPort())) : null;
105 | //If Proxy URL starts with SOCKS4 or SOCKS5 protocol then go for SOCKS settings
106 | } else if (finalProxyString != null && finalProxyString.toLowerCase().startsWith("sock")
107 | && finalProxyString.split("://").length == 2) {
108 | LOG.debug("protocol of proxy used is: Socks");
109 | proxyURL = returnParsedURL(proxyStarturl.concat(finalProxyString.split("://")[1]));
110 | return proxyURL != null ? new Proxy(Proxy.Type.SOCKS, new InetSocketAddress(proxyURL.getHost(), proxyURL.getPort())) : null;
111 | }
112 | } catch (MalformedURLException | URISyntaxException mUE) {
113 | LOG.error("Can not configure Proxy because URL {} is incorrect: " + mUE, finalProxyString);
114 | }
115 |
116 | return null;
117 | }
118 |
119 | private static URL returnParsedURL(String urlString) throws MalformedURLException, URISyntaxException {
120 | if (urlString != null) {
121 | URL url = new URL(urlString);
122 | url.toURI();
123 | LOG.info("System has been set to use proxy. Hence, configuring proxy URL: {}", urlString);
124 | return url;
125 | }
126 | return null;
127 | }
128 | }
129 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-common/src/main/java/com/github/sakserv/minicluster/util/FileUtils.java:
--------------------------------------------------------------------------------
1 | package com.github.sakserv.minicluster.util;
2 |
3 | import org.slf4j.LoggerFactory;
4 |
5 | import java.io.IOException;
6 | import java.nio.file.*;
7 | import java.nio.file.attribute.BasicFileAttributes;
8 |
9 | public final class FileUtils {
10 |
11 | // Logger
12 | private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(FileUtils.class);
13 |
14 | public static void deleteFolder(String directory, boolean quietly) {
15 | try {
16 | Path directoryPath = Paths.get(directory).toAbsolutePath();
17 | if (!quietly) {
18 | LOG.info("FILEUTILS: Deleting contents of directory: {}",
19 | directoryPath.toAbsolutePath().toString());
20 | }
21 | Files.walkFileTree(directoryPath, new SimpleFileVisitor() {
22 | @Override
23 | public FileVisitResult visitFile(Path file, BasicFileAttributes attrs)
24 | throws IOException {
25 | Files.delete(file);
26 | if (!quietly) {
27 | LOG.info("Removing file: {}", file.toAbsolutePath().toString());
28 | }
29 | return FileVisitResult.CONTINUE;
30 | }
31 |
32 | @Override
33 | public FileVisitResult postVisitDirectory(Path dir, IOException exc)
34 | throws IOException {
35 | Files.delete(dir);
36 | if (!quietly) {
37 | LOG.info("Removing directory: {}", dir.toAbsolutePath().toString());
38 | }
39 | return FileVisitResult.CONTINUE;
40 | }
41 | });
42 | } catch (IOException e) {
43 | LOG.error("FILEUTILS: Unable to remove {}", directory);
44 | }
45 | }
46 |
47 | public static void deleteFolder(String directory) {
48 | deleteFolder(directory, false);
49 | }
50 |
51 | @Override
52 | public String toString() {
53 | return "FileUtils";
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-common/src/main/java/com/github/sakserv/minicluster/util/WindowsLibsUtils.java:
--------------------------------------------------------------------------------
1 | package com.github.sakserv.minicluster.util;
2 |
3 | import java.io.File;
4 |
5 | import org.apache.hadoop.fs.Path;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | public class WindowsLibsUtils {
10 |
11 | // Logger
12 | private static final Logger LOG = LoggerFactory.getLogger(WindowsLibsUtils.class);
13 |
14 | public static void setHadoopHome() {
15 |
16 | // Set hadoop.home.dir to point to the windows lib dir
17 | if (System.getProperty("os.name").startsWith("Windows")) {
18 |
19 | String windowsLibDir = getHadoopHome();
20 |
21 | LOG.info("WINDOWS: Setting hadoop.home.dir: {}", windowsLibDir);
22 | System.setProperty("hadoop.home.dir", windowsLibDir);
23 | System.load(new File(windowsLibDir + Path.SEPARATOR + "lib" + Path.SEPARATOR + "hadoop.dll").getAbsolutePath());
24 | System.load(new File(windowsLibDir + Path.SEPARATOR + "lib" + Path.SEPARATOR + "hdfs.dll").getAbsolutePath());
25 |
26 | }
27 | }
28 |
29 | public static String getHadoopHome() {
30 |
31 | if(System.getProperty("HADOOP_HOME") != null) {
32 | LOG.info("HADOOP_HOME: " + System.getProperty("HADOOP_HOME"));
33 | return System.getProperty("HADOOP_HOME");
34 | } else if (System.getenv("HADOOP_HOME") != null) { //takes the hadoop home from system environment variable
35 | LOG.info("HADOOP_HOME: " + System.getenv("HADOOP_HOME"));
36 | return System.getenv("HADOOP_HOME");
37 | } else {
38 |
39 | File windowsLibDir = new File("." + Path.SEPARATOR + "windows_libs" +
40 | Path.SEPARATOR + System.getProperty("hdp.release.version"));
41 |
42 | if (!windowsLibDir.exists()) {
43 | windowsLibDir = new File(".." + Path.SEPARATOR + windowsLibDir);
44 | if (!windowsLibDir.exists()) {
45 | LOG.error("WINDOWS: ERROR: Could not find windows native libs");
46 | }
47 | }
48 | return windowsLibDir.getAbsolutePath();
49 | }
50 |
51 | }
52 |
53 | }
54 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-common/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Root logger option
2 | log4j.rootLogger=INFO, stdout
3 |
4 | # Direct log messages to stdout
5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
6 | log4j.appender.stdout.Target=System.out
7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
--------------------------------------------------------------------------------
/hadoop-mini-clusters-common/src/test/java/com/github/sakserv/minicluster/http/HttpUtilsTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to the Apache Software Foundation (ASF) under one or more
3 | * contributor license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright ownership.
5 | * The ASF licenses this file to You under the Apache License, Version 2.0
6 | * (the "License"); you may not use this file except in compliance with
7 | * the License. You may obtain a copy of the License at
8 | *
9 | * http://www.apache.org/licenses/LICENSE-2.0
10 | *
11 | * Unless required by applicable law or agreed to in writing, software
12 | * distributed under the License is distributed on an "AS IS" BASIS,
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | * See the License for the specific language governing permissions and
15 | * limitations under the License.
16 | */
17 | package com.github.sakserv.minicluster.http;
18 |
19 | import static org.junit.Assert.assertEquals;
20 | import static org.junit.Assert.assertNotNull;
21 | import static org.junit.Assert.assertNull;
22 |
23 | import java.net.Proxy;
24 |
25 | import org.junit.Test;
26 |
27 | public class HttpUtilsTest {
28 |
29 | @Test
30 | public void testReturnProxyIfProxyPropsAreSetToNull() {
31 | System.clearProperty("HTTP_PROXY");
32 | System.clearProperty("ALL_PROXY");
33 | assertNull(HttpUtils.returnProxyIfEnabled());
34 | }
35 |
36 | @Test
37 | public void testReturnProxyIfHTTPProxyIsSet() {
38 | System.setProperty("HTTP_PROXY", "http://104.207.145.113:3128");
39 | System.clearProperty("ALL_PROXY");
40 | assertNotNull(HttpUtils.returnProxyIfEnabled());
41 | assertEquals("/104.207.145.113:3128", HttpUtils.returnProxyIfEnabled().address().toString());
42 | assertEquals(Proxy.Type.HTTP, HttpUtils.returnProxyIfEnabled().type());
43 | }
44 |
45 | @Test
46 | public void testReturnProxyIfSOCKProxyIsSet() {
47 | System.setProperty("HTTP_PROXY", "sock5://207.98.253.161:10200");
48 | System.clearProperty("ALL_PROXY");
49 | assertNotNull(HttpUtils.returnProxyIfEnabled());
50 | assertEquals("/207.98.253.161:10200", HttpUtils.returnProxyIfEnabled().address().toString());
51 | assertEquals(Proxy.Type.SOCKS, HttpUtils.returnProxyIfEnabled().type());
52 | }
53 |
54 | @Test
55 | public void testReturnProxyIfSOCKProxyIsSetGnomeClient() {
56 | System.clearProperty("HTTP_PROXY");
57 | System.setProperty("ALL_PROXY", "sock5://207.98.253.161:10200");
58 | assertNotNull(HttpUtils.returnProxyIfEnabled());
59 | assertEquals("/207.98.253.161:10200", HttpUtils.returnProxyIfEnabled().address().toString());
60 | assertEquals(Proxy.Type.SOCKS, HttpUtils.returnProxyIfEnabled().type());
61 | }
62 |
63 | @Test
64 | public void testReturnProxyIfHTTPProxyIsSetGnomeClient() {
65 | System.clearProperty("HTTP_PROXY");
66 | System.setProperty("ALL_PROXY", "104.207.145.113:3128");
67 | assertNotNull(HttpUtils.returnProxyIfEnabled());
68 | assertEquals("/104.207.145.113:3128", HttpUtils.returnProxyIfEnabled().address().toString());
69 | assertEquals(Proxy.Type.HTTP, HttpUtils.returnProxyIfEnabled().type());
70 | }
71 |
72 | @Test
73 | public void testReturnProxyIfProxyHasInvalidUrl() {
74 | System.setProperty("HTTP_PROXY", "104.207.145.113");
75 | System.clearProperty("ALL_PROXY");
76 | assertNull(HttpUtils.returnProxyIfEnabled());
77 | }
78 |
79 | @Test
80 | public void testReturnProxyIfProxyHasInvalidUrlWithoutPort() {
81 | System.setProperty("HTTP_PROXY", "http104.207.145.113");
82 | System.clearProperty("ALL_PROXY");
83 | assertNull(HttpUtils.returnProxyIfEnabled());
84 | }
85 | }
--------------------------------------------------------------------------------
/hadoop-mini-clusters-common/src/test/java/com/github/sakserv/minicluster/util/FileUtilsTest.java:
--------------------------------------------------------------------------------
1 | package com.github.sakserv.minicluster.util;
2 |
3 | import static org.junit.Assert.assertEquals;
4 |
5 | import org.junit.Test;
6 |
7 | /*
8 | * Licensed under the Apache License, Version 2.0 (the "License");
9 | * you may not use this file except in compliance with the License.
10 | * You may obtain a copy of the License at
11 | *
12 | * http://www.apache.org/licenses/LICENSE-2.0
13 | *
14 | * Unless required by applicable law or agreed to in writing, software
15 | * distributed under the License is distributed on an "AS IS" BASIS,
16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 | * See the License for the specific language governing permissions and
18 | * limitations under the License.
19 | */public class FileUtilsTest {
20 |
21 | @Test
22 | public void testToString() throws Exception {
23 | FileUtils fileUtils = new FileUtils();
24 | assertEquals("FileUtils", fileUtils.toString());
25 |
26 | }
27 | }
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hbase/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | hadoop-mini-clusters
5 | com.github.sakserv
6 | 0.1.17-SNAPSHOT
7 |
8 | 4.0.0
9 |
10 | hadoop-mini-clusters-hbase
11 |
12 |
13 |
14 |
15 |
16 | org.apache.hbase
17 | hbase-rest
18 | ${hbase.version}
19 |
20 |
21 |
22 |
23 | org.apache.hbase
24 | hbase-client
25 | ${hbase.version}
26 |
27 |
28 |
29 |
30 | org.apache.hbase
31 | hbase-testing-util
32 | ${hbase.version}
33 |
34 |
35 |
36 |
37 | com.github.sakserv
38 | hadoop-mini-clusters-zookeeper
39 | ${project.version}
40 | test
41 |
42 |
43 |
44 |
45 | com.github.sakserv
46 | hadoop-mini-clusters-common
47 | ${project.version}
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 | org.apache.maven.plugins
56 | maven-shade-plugin
57 | 2.4.3
58 |
59 |
60 | package
61 |
62 | shade
63 |
64 |
65 | true
66 |
67 |
68 | com.lmax
69 | shaded.com.lmax
70 |
71 |
72 | com.lmax
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hbase/src/main/resources/default.properties:
--------------------------------------------------------------------------------
1 | # Zookeeper
2 | zookeeper.temp.dir=embedded_zk
3 | zookeeper.host=127.0.0.1
4 | zookeeper.port=22010
5 | zookeeper.connection.string=127.0.0.1:22010
6 |
7 | # HBase
8 | hbase.master.port=25111
9 | hbase.master.info.port=-1
10 | hbase.num.region.servers=1
11 | hbase.root.dir=embedded_hbase
12 | hbase.znode.parent=/hbase-unsecure
13 | hbase.wal.replication.enabled=false
14 |
15 | # HBase REST
16 | hbase.rest.port=28000
17 | hbase.rest.readonly=false
18 | hbase.rest.info.port=28080
19 | hbase.rest.host=0.0.0.0
20 | hbase.rest.threads.max=100
21 | hbase.rest.threads.min=2
22 |
23 | # HBase Test
24 | hbase.test.table.name=hbase_test_table
25 | hbase.test.col.family.name=cf1
26 | hbase.test.col.qualifier.name=cq1
27 | hbase.test.num.rows.to.put=50
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hbase/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Root logger option
2 | log4j.rootLogger=INFO, stdout
3 |
4 | # Direct log messages to stdout
5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
6 | log4j.appender.stdout.Target=System.out
7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hdfs/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | hadoop-mini-clusters
5 | com.github.sakserv
6 | 0.1.17-SNAPSHOT
7 |
8 | 4.0.0
9 |
10 | hadoop-mini-clusters-hdfs
11 |
12 |
13 |
14 |
15 |
16 | org.apache.hadoop
17 | hadoop-client
18 | ${hadoop.version}
19 |
20 |
21 |
22 |
23 | org.apache.hadoop
24 | hadoop-minicluster
25 | ${hadoop.version}
26 |
27 |
28 |
29 |
30 | com.github.sakserv
31 | hadoop-mini-clusters-common
32 | ${project.version}
33 |
34 |
35 |
37 |
38 | org.apache.httpcomponents
39 | httpclient
40 | ${httpclient.version}
41 | test
42 |
43 |
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hdfs/src/main/resources/default.properties:
--------------------------------------------------------------------------------
1 | # HDFS
2 | hdfs.namenode.port=20112
3 | hdfs.namenode.http.port=50070
4 | hdfs.temp.dir=embedded_hdfs
5 | hdfs.num.datanodes=1
6 | hdfs.enable.permissions=false
7 | hdfs.format=true
8 | hdfs.enable.running.user.as.proxy.user=true
9 |
10 | # HDFS Test
11 | hdfs.test.file=/tmp/testing
12 | hdfs.test.string=TESTING
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hdfs/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Root logger option
2 | log4j.rootLogger=INFO, stdout
3 |
4 | # Direct log messages to stdout
5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
6 | log4j.appender.stdout.Target=System.out
7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hdfs/src/test/java/com/github/sakserv/minicluster/impl/HdfsLocalClusterIntegrationTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 |
15 | package com.github.sakserv.minicluster.impl;
16 |
17 | import org.apache.http.client.HttpClient;
18 | import org.apache.http.impl.client.HttpClients;
19 | import static org.junit.Assert.assertEquals;
20 |
21 | import java.io.BufferedReader;
22 | import java.io.IOException;
23 | import java.io.InputStreamReader;
24 | import java.net.URL;
25 | import java.net.URLConnection;
26 |
27 | import org.apache.hadoop.conf.Configuration;
28 | import org.apache.hadoop.fs.FSDataInputStream;
29 | import org.apache.hadoop.fs.FSDataOutputStream;
30 | import org.apache.hadoop.fs.FileSystem;
31 | import org.apache.hadoop.fs.Path;
32 | import org.junit.AfterClass;
33 | import org.junit.BeforeClass;
34 | import org.junit.Test;
35 | import org.slf4j.Logger;
36 | import org.slf4j.LoggerFactory;
37 |
38 | import com.github.sakserv.minicluster.config.ConfigVars;
39 | import com.github.sakserv.propertyparser.PropertyParser;
40 |
41 | public class HdfsLocalClusterIntegrationTest {
42 |
43 | // Logger
44 | private static final Logger LOG = LoggerFactory.getLogger(HdfsLocalClusterIntegrationTest.class);
45 |
46 | // Setup the property parser
47 | private static PropertyParser propertyParser;
48 | static {
49 | try {
50 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE);
51 | propertyParser.parsePropsFile();
52 | } catch(IOException e) {
53 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE));
54 | }
55 | }
56 |
57 | private static HdfsLocalCluster dfsCluster;
58 |
59 | @BeforeClass
60 | public static void setUp() throws Exception {
61 | dfsCluster = new HdfsLocalCluster.Builder()
62 | .setHdfsNamenodePort(Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NAMENODE_PORT_KEY)))
63 | .setHdfsNamenodeHttpPort( Integer.parseInt( propertyParser.getProperty( ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY ) ) )
64 | .setHdfsTempDir(propertyParser.getProperty(ConfigVars.HDFS_TEMP_DIR_KEY))
65 | .setHdfsNumDatanodes(Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NUM_DATANODES_KEY)))
66 | .setHdfsEnablePermissions(
67 | Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.HDFS_ENABLE_PERMISSIONS_KEY)))
68 | .setHdfsFormat(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.HDFS_FORMAT_KEY)))
69 | .setHdfsEnableRunningUserAsProxyUser(Boolean.parseBoolean(
70 | propertyParser.getProperty(ConfigVars.HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER)))
71 | .setHdfsConfig(new Configuration())
72 | .build();
73 | dfsCluster.start();
74 | }
75 |
76 | @AfterClass
77 | public static void tearDown() throws Exception {
78 | dfsCluster.stop();
79 | }
80 |
81 | @Test
82 | public void testDfsClusterStart() throws Exception {
83 |
84 | // Write a file to HDFS containing the test string
85 | FileSystem hdfsFsHandle = dfsCluster.getHdfsFileSystemHandle();
86 | FSDataOutputStream writer = hdfsFsHandle.create(
87 | new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY)));
88 | writer.writeUTF(propertyParser.getProperty(ConfigVars.HDFS_TEST_STRING_KEY));
89 | writer.close();
90 |
91 | // Read the file and compare to test string
92 | FSDataInputStream reader = hdfsFsHandle.open(
93 | new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY)));
94 | assertEquals(reader.readUTF(), propertyParser.getProperty(ConfigVars.HDFS_TEST_STRING_KEY));
95 | reader.close();
96 | hdfsFsHandle.close();
97 |
98 | URL url = new URL(
99 | String.format( "http://localhost:%s/webhdfs/v1?op=GETHOMEDIRECTORY&user.name=guest",
100 | propertyParser.getProperty( ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY ) ) );
101 | URLConnection connection = url.openConnection();
102 | connection.setRequestProperty( "Accept-Charset", "UTF-8" );
103 | BufferedReader response = new BufferedReader( new InputStreamReader( connection.getInputStream() ) );
104 | String line = response.readLine();
105 | response.close();
106 | assertEquals( "{\"Path\":\"/user/guest\"}", line );
107 |
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hivemetastore/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | hadoop-mini-clusters
5 | com.github.sakserv
6 | 0.1.17-SNAPSHOT
7 |
8 | 4.0.0
9 |
10 | hadoop-mini-clusters-hivemetastore
11 |
12 |
13 |
14 |
15 |
16 |
17 | org.apache.hadoop
18 | hadoop-client
19 | ${hadoop.version}
20 |
21 |
22 |
23 |
24 | org.apache.hive
25 | hive-exec
26 | ${hive.version}
27 |
28 |
29 |
30 |
31 | com.github.sakserv
32 | hadoop-mini-clusters-common
33 | ${project.version}
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hivemetastore/src/main/resources/default.properties:
--------------------------------------------------------------------------------
1 | # Hive
2 | hive.scratch.dir=hive_scratch_dir
3 | hive.warehouse.dir=warehouse_dir
4 |
5 | # Hive Metastore
6 | hive.metastore.hostname=localhost
7 | hive.metastore.port=20102
8 | hive.metastore.derby.db.dir=metastore_db
9 |
10 | # Hive Test
11 | hive.test.database.name=default
12 | hive.test.table.name=test_table
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hivemetastore/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Root logger option
2 | log4j.rootLogger=INFO, stdout
3 |
4 | # Direct log messages to stdout
5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
6 | log4j.appender.stdout.Target=System.out
7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hiveserver2/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | hadoop-mini-clusters
5 | com.github.sakserv
6 | 0.1.17-SNAPSHOT
7 |
8 | 4.0.0
9 |
10 | hadoop-mini-clusters-hiveserver2
11 |
12 |
13 |
14 |
15 |
16 |
17 | org.apache.hadoop
18 | hadoop-client
19 | ${hadoop.version}
20 |
21 |
22 |
23 |
24 | org.apache.hive
25 | hive-service
26 | ${hive.version}
27 |
28 |
29 |
30 |
31 | org.apache.hive
32 | hive-jdbc
33 | ${hive.version}
34 | test
35 |
36 |
37 |
38 |
39 | com.github.sakserv
40 | hadoop-mini-clusters-zookeeper
41 | ${project.version}
42 | test
43 |
44 |
45 |
46 |
47 | com.github.sakserv
48 | hadoop-mini-clusters-hivemetastore
49 | ${project.version}
50 | test
51 |
52 |
53 |
54 |
55 | com.github.sakserv
56 | hadoop-mini-clusters-common
57 | ${project.version}
58 |
59 |
60 |
61 |
62 |
63 |
64 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hiveserver2/src/main/resources/default.properties:
--------------------------------------------------------------------------------
1 | # Zookeeper
2 | zookeeper.temp.dir=embedded_zk
3 | zookeeper.host=127.0.0.1
4 | zookeeper.port=22010
5 | zookeeper.connection.string=127.0.0.1:22010
6 |
7 | # Hive
8 | hive.scratch.dir=hive_scratch_dir
9 | hive.warehouse.dir=warehouse_dir
10 |
11 | # Hive Metastore
12 | hive.metastore.hostname=localhost
13 | hive.metastore.port=20202
14 | hive.metastore.derby.db.dir=metastore_db
15 |
16 | # Hive Server2
17 | hive.server2.hostname=localhost
18 | hive.server2.port=20203
19 |
20 | # Hive Test
21 | hive.test.database.name=default
22 | hive.test.table.name=test_table
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hiveserver2/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Root logger option
2 | log4j.rootLogger=INFO, stdout
3 |
4 | # Direct log messages to stdout
5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
6 | log4j.appender.stdout.Target=System.out
7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hyperscaledb/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | hadoop-mini-clusters
5 | com.github.sakserv
6 | 0.1.17-SNAPSHOT
7 |
8 | 4.0.0
9 |
10 | hadoop-mini-clusters-hyperscaledb
11 |
12 |
13 |
14 |
15 |
16 | org.hsqldb
17 | hsqldb
18 | ${hsqldb.version}
19 |
20 |
21 |
22 |
23 | mysql
24 | mysql-connector-java
25 | ${mysql-connector-java.version}
26 | test
27 |
28 |
29 |
30 |
31 | com.github.sakserv
32 | hadoop-mini-clusters-common
33 | ${project.version}
34 |
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hyperscaledb/src/main/resources/default.properties:
--------------------------------------------------------------------------------
1 | # HSQLDB
2 | hsqldb.hostname=127.0.0.1
3 | hsqldb.port=44111
4 | hsqldb.temp.dir=embedded_hsqldb
5 | hsqldb.database.name=testdb
6 | hsqldb.compatibility.mode=mysql
7 | hsqldb.jdbc.driver=org.hsqldb.jdbc.JDBCDriver
8 | hsqldb.jdbc.connection.string.prefix=jdbc:hsqldb:hsql://
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hyperscaledb/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Root logger option
2 | log4j.rootLogger=INFO, stdout
3 |
4 | # Direct log messages to stdout
5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
6 | log4j.appender.stdout.Target=System.out
7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
--------------------------------------------------------------------------------
/hadoop-mini-clusters-hyperscaledb/src/test/java/com/github/sakserv/minicluster/impl/HsqldbLocalServerIntegrationTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 | package com.github.sakserv.minicluster.impl;
15 |
16 | import static org.hamcrest.CoreMatchers.containsString;
17 | import static org.junit.Assert.assertThat;
18 | import static org.junit.Assert.assertTrue;
19 |
20 | import java.io.IOException;
21 | import java.sql.Connection;
22 | import java.sql.DriverManager;
23 | import java.sql.ResultSet;
24 | import java.sql.SQLException;
25 | import java.sql.Statement;
26 |
27 | import org.junit.AfterClass;
28 | import org.junit.BeforeClass;
29 | import org.junit.Test;
30 | import org.slf4j.Logger;
31 | import org.slf4j.LoggerFactory;
32 |
33 | import com.github.sakserv.minicluster.config.ConfigVars;
34 | import com.github.sakserv.propertyparser.PropertyParser;
35 |
36 | public class HsqldbLocalServerIntegrationTest {
37 |
38 | // Logger
39 | private static final Logger LOG = LoggerFactory.getLogger(HsqldbLocalServerIntegrationTest.class);
40 |
41 | // Setup the property parser
42 | private static PropertyParser propertyParser;
43 | static {
44 | try {
45 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE);
46 | propertyParser.parsePropsFile();
47 | } catch(IOException e) {
48 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE));
49 | }
50 | }
51 |
52 | private static HsqldbLocalServer hsqldbLocalServer;
53 |
54 | @BeforeClass
55 | public static void setUp() throws Exception {
56 | hsqldbLocalServer = new HsqldbLocalServer.Builder()
57 | .setHsqldbHostName(propertyParser.getProperty(ConfigVars.HSQLDB_HOSTNAME_KEY))
58 | .setHsqldbPort(propertyParser.getProperty(ConfigVars.HSQLDB_PORT_KEY))
59 | .setHsqldbTempDir(propertyParser.getProperty(ConfigVars.HSQLDB_TEMP_DIR_KEY))
60 | .setHsqldbDatabaseName(propertyParser.getProperty(ConfigVars.HSQLDB_DATABASE_NAME_KEY))
61 | .setHsqldbCompatibilityMode(propertyParser.getProperty(ConfigVars.HSQLDB_COMPATIBILITY_MODE_KEY))
62 | .setHsqldbJdbcDriver(propertyParser.getProperty(ConfigVars.HSQLDB_JDBC_DRIVER_KEY))
63 | .setHsqldbJdbcConnectionStringPrefix(propertyParser.getProperty(
64 | ConfigVars.HSQLDB_JDBC_CONNECTION_STRING_PREFIX_KEY))
65 | .build();
66 | hsqldbLocalServer.start();
67 | }
68 |
69 | @AfterClass
70 | public static void tearDown() throws Exception {
71 | hsqldbLocalServer.stop();
72 | }
73 |
74 | @Test
75 | public void testHsqldbLocalServer() throws ClassNotFoundException, SQLException {
76 |
77 | LOG.info("HSQLDB: Running User: {}", System.getProperty("user.name"));
78 |
79 | LOG.info("HSQLDB: Loading the JDBC Driver: {}", propertyParser.getProperty(ConfigVars.HSQLDB_JDBC_DRIVER_KEY));
80 | Class.forName(propertyParser.getProperty(ConfigVars.HSQLDB_JDBC_DRIVER_KEY));
81 |
82 | // Get the connection
83 | Connection connection = DriverManager.getConnection(
84 | propertyParser.getProperty(ConfigVars.HSQLDB_JDBC_CONNECTION_STRING_PREFIX_KEY) +
85 | propertyParser.getProperty(ConfigVars.HSQLDB_HOSTNAME_KEY) + ":" +
86 | propertyParser.getProperty(ConfigVars.HSQLDB_PORT_KEY) + "/" +
87 | propertyParser.getProperty(ConfigVars.HSQLDB_DATABASE_NAME_KEY),
88 | "SA", "");
89 | assertThat(connection.getMetaData().getURL(),
90 | containsString(propertyParser.getProperty(ConfigVars.HSQLDB_DATABASE_NAME_KEY)));
91 | }
92 |
93 | @Test
94 | public void testHsqldbMysqlCompatibilityMode() throws SQLException {
95 | Connection connection = DriverManager.getConnection(
96 | propertyParser.getProperty(ConfigVars.HSQLDB_JDBC_CONNECTION_STRING_PREFIX_KEY) +
97 | propertyParser.getProperty(ConfigVars.HSQLDB_HOSTNAME_KEY) + ":" +
98 | propertyParser.getProperty(ConfigVars.HSQLDB_PORT_KEY) + "/" +
99 | propertyParser.getProperty(ConfigVars.HSQLDB_DATABASE_NAME_KEY),
100 | "SA", "");
101 | Statement statement = connection.createStatement();
102 | statement.executeQuery(hsqldbLocalServer.getHsqldbCompatibilityModeStatement());
103 |
104 | statement = connection.createStatement();
105 | ResultSet resultSet = statement.executeQuery("SELECT PROPERTY_VALUE FROM INFORMATION_SCHEMA.SYSTEM_PROPERTIES WHERE PROPERTY_NAME = 'sql.syntax_mys'");
106 | while(resultSet.next()) {
107 | assertTrue(Boolean.parseBoolean(resultSet.getString(1)));
108 | }
109 | }
110 | }
111 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-kafka/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | hadoop-mini-clusters
5 | com.github.sakserv
6 | 0.1.17-SNAPSHOT
7 |
8 | 4.0.0
9 |
10 | hadoop-mini-clusters-kafka
11 |
12 |
13 | 1.3.7
14 |
15 |
16 |
17 |
18 |
19 |
20 | org.apache.kafka
21 | ${kafka.artifactid.version}
22 | ${kafka.version}
23 |
24 |
25 |
26 |
27 | com.github.sakserv
28 | hadoop-mini-clusters-zookeeper
29 | ${project.version}
30 | test
31 |
32 |
33 |
34 |
35 | org.codehaus.jettison
36 | jettison
37 | ${jettison.version}
38 | test
39 |
40 |
41 |
42 |
43 | com.github.sakserv
44 | hadoop-mini-clusters-common
45 | ${project.version}
46 |
47 |
48 |
49 |
50 |
51 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-kafka/src/main/java/com/github/sakserv/minicluster/systemtime/LocalSystemTime.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 | package com.github.sakserv.minicluster.systemtime;
15 |
16 | import org.apache.kafka.common.utils.Time;
17 |
18 | public class LocalSystemTime implements Time {
19 |
20 | @Override
21 | public long milliseconds() {
22 | return System.currentTimeMillis();
23 | }
24 |
25 | @Override
26 | public long nanoseconds() {
27 | return System.nanoTime();
28 | }
29 |
30 | @Override
31 | public void sleep(long ms) {
32 | try {
33 | Thread.sleep(ms);
34 | } catch (InterruptedException e) {
35 | // no stress
36 | }
37 | }
38 |
39 | @Override
40 | public long hiResClockMs() {
41 | return System.currentTimeMillis();
42 | }
43 |
44 | }
--------------------------------------------------------------------------------
/hadoop-mini-clusters-kafka/src/main/resources/default.properties:
--------------------------------------------------------------------------------
1 | # Zookeeper
2 | zookeeper.temp.dir=embedded_zk
3 | zookeeper.host=127.0.0.1
4 | zookeeper.port=22010
5 | zookeeper.connection.string=127.0.0.1:22010
6 |
7 | # Kafka
8 | kafka.hostname=localhost
9 | kafka.port=20111
10 |
11 | # Kafka Test
12 | kafka.test.topic=testtopic
13 | kafka.test.message.count=10
14 | kafka.test.broker.id=0
15 | kafka.test.temp.dir=embedded_kafka
--------------------------------------------------------------------------------
/hadoop-mini-clusters-kafka/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Root logger option
2 | log4j.rootLogger=INFO, stdout
3 |
4 | # Direct log messages to stdout
5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
6 | log4j.appender.stdout.Target=System.out
7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
--------------------------------------------------------------------------------
/hadoop-mini-clusters-kafka/src/test/java/com/github/sakserv/minicluster/datatime/GenerateRandomDay.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 |
15 | package com.github.sakserv.minicluster.datatime;
16 |
17 | import java.util.GregorianCalendar;
18 |
19 | public class GenerateRandomDay {
20 |
21 | public static String genRandomDay() {
22 |
23 | GregorianCalendar gc = new GregorianCalendar();
24 |
25 | int year = randBetween(2013, 2014);
26 |
27 | gc.set(gc.YEAR, year);
28 |
29 | int dayOfYear = randBetween(1, gc.getActualMaximum(gc.DAY_OF_YEAR));
30 |
31 | gc.set(gc.DAY_OF_YEAR, dayOfYear);
32 |
33 | return String.format("%04d-%02d-%02d", gc.get(gc.YEAR), gc.get(gc.MONTH), gc.get(gc.DAY_OF_MONTH));
34 |
35 | }
36 |
37 | public static int randBetween(int start, int end) {
38 | return start + (int)Math.round(Math.random() * (end - start));
39 | }
40 | }
--------------------------------------------------------------------------------
/hadoop-mini-clusters-kafka/src/test/java/com/github/sakserv/minicluster/impl/KafkaLocalBrokerIntegrationTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 |
15 | package com.github.sakserv.minicluster.impl;
16 |
17 | import java.io.IOException;
18 | import java.util.ArrayList;
19 | import java.util.List;
20 | import java.util.Properties;
21 |
22 | import org.junit.AfterClass;
23 | import org.junit.Assert;
24 | import org.junit.BeforeClass;
25 | import org.junit.Test;
26 | import org.slf4j.Logger;
27 | import org.slf4j.LoggerFactory;
28 |
29 | import com.github.sakserv.minicluster.config.ConfigVars;
30 | import com.github.sakserv.minicluster.kafka.consumer.KafkaTestConsumer;
31 | import com.github.sakserv.minicluster.kafka.producer.KafkaSimpleTestProducer;
32 | import com.github.sakserv.propertyparser.PropertyParser;
33 |
34 | public class KafkaLocalBrokerIntegrationTest {
35 |
36 | // Logger
37 | private static final Logger LOG = LoggerFactory.getLogger(KafkaLocalBrokerIntegrationTest.class);
38 |
39 | // Setup the property parser
40 | private static PropertyParser propertyParser;
41 | static {
42 | try {
43 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE);
44 | propertyParser.parsePropsFile();
45 | } catch(IOException e) {
46 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE));
47 | }
48 | }
49 |
50 | private static ZookeeperLocalCluster zookeeperLocalCluster;
51 | private static KafkaLocalBroker kafkaLocalBroker;
52 |
53 | @BeforeClass
54 | public static void setUp() throws Exception {
55 | zookeeperLocalCluster = new ZookeeperLocalCluster.Builder()
56 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY)))
57 | .setTempDir(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY))
58 | .setZookeeperConnectionString(propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY))
59 | .build();
60 | zookeeperLocalCluster.start();
61 |
62 | kafkaLocalBroker = new KafkaLocalBroker.Builder()
63 | .setKafkaHostname(propertyParser.getProperty(ConfigVars.KAFKA_HOSTNAME_KEY))
64 | .setKafkaPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_PORT_KEY)))
65 | .setKafkaBrokerId(Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_TEST_BROKER_ID_KEY)))
66 | .setKafkaProperties(new Properties())
67 | .setKafkaTempDir(propertyParser.getProperty(ConfigVars.KAFKA_TEST_TEMP_DIR_KEY))
68 | .setZookeeperConnectionString(propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY))
69 | .build();
70 | kafkaLocalBroker.start();
71 |
72 | }
73 |
74 | @AfterClass
75 | public static void tearDown() throws Exception {
76 |
77 | kafkaLocalBroker.stop();
78 | zookeeperLocalCluster.stop();
79 | }
80 |
81 | @Test
82 | public void testKafkaLocalBroker() throws Exception {
83 |
84 | // Producer
85 | KafkaSimpleTestProducer kafkaTestProducer = new KafkaSimpleTestProducer.Builder()
86 | .setKafkaHostname(propertyParser.getProperty(ConfigVars.KAFKA_HOSTNAME_KEY))
87 | .setKafkaPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_PORT_KEY)))
88 | .setTopic(propertyParser.getProperty(ConfigVars.KAFKA_TEST_TOPIC_KEY))
89 | .setMessageCount(Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY)))
90 | .build();
91 | kafkaTestProducer.produceMessages();
92 |
93 | // Consumer
94 | List seeds = new ArrayList();
95 | seeds.add(kafkaLocalBroker.getKafkaHostname());
96 | KafkaTestConsumer kafkaTestConsumer = new KafkaTestConsumer();
97 | kafkaTestConsumer.consumeMessages(
98 | Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY)),
99 | propertyParser.getProperty(ConfigVars.KAFKA_TEST_TOPIC_KEY),
100 | 0,
101 | seeds,
102 | kafkaLocalBroker.getKafkaPort());
103 |
104 |
105 |
106 | // Assert num of messages produced = num of message consumed
107 | Assert.assertEquals(Long.parseLong(propertyParser.getProperty(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY)),
108 | kafkaTestConsumer.getNumRead());
109 |
110 | }
111 |
112 | }
113 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-kafka/src/test/java/com/github/sakserv/minicluster/kafka/producer/KafkaSimpleTestProducer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 | package com.github.sakserv.minicluster.kafka.producer;
15 |
16 | import java.util.HashMap;
17 | import java.util.Map;
18 |
19 | import org.apache.kafka.clients.producer.KafkaProducer;
20 | import org.apache.kafka.clients.producer.ProducerConfig;
21 | import org.apache.kafka.clients.producer.ProducerRecord;
22 | import org.codehaus.jettison.json.JSONException;
23 | import org.codehaus.jettison.json.JSONObject;
24 | import org.slf4j.Logger;
25 | import org.slf4j.LoggerFactory;
26 |
27 | import com.github.sakserv.minicluster.datatime.GenerateRandomDay;
28 |
29 | public class KafkaSimpleTestProducer {
30 |
31 | // Logger
32 | private static final Logger LOG = LoggerFactory.getLogger(KafkaSimpleTestProducer.class);
33 |
34 | private String kafkaHostname;
35 | private Integer kafkaPort;
36 | private String topic;
37 | private Integer messageCount;
38 |
39 | private KafkaSimpleTestProducer(Builder builder) {
40 | this.kafkaHostname = builder.kafkaHostname;
41 | this.kafkaPort = builder.kafkaPort;
42 | this.topic = builder.topic;
43 | this.messageCount = builder.messageCount;
44 | }
45 |
46 | public String getKafkaHostname() {
47 | return kafkaHostname;
48 | }
49 |
50 | public Integer getKafkaPort() {
51 | return kafkaPort;
52 | }
53 |
54 | public String getTopic() {
55 | return topic;
56 | }
57 |
58 | public Integer getMessageCount() {
59 | return messageCount;
60 | }
61 |
62 | public static class Builder {
63 | private String kafkaHostname;
64 | private Integer kafkaPort;
65 | private String topic;
66 | private Integer messageCount;
67 |
68 | public Builder setKafkaHostname(String kafkaHostname) {
69 | this.kafkaHostname = kafkaHostname;
70 | return this;
71 | }
72 |
73 | public Builder setKafkaPort(Integer kafkaPort) {
74 | this.kafkaPort = kafkaPort;
75 | return this;
76 | }
77 |
78 | public Builder setTopic(String topic) {
79 | this.topic = topic;
80 | return this;
81 | }
82 |
83 | public Builder setMessageCount(Integer messageCount) {
84 | this.messageCount = messageCount;
85 | return this;
86 | }
87 |
88 | public KafkaSimpleTestProducer build() {
89 | KafkaSimpleTestProducer kafkaSimpleTestProducer = new KafkaSimpleTestProducer(this);
90 | return kafkaSimpleTestProducer;
91 | }
92 |
93 | }
94 |
95 | public Map createConfig() {
96 | Map config = new HashMap();
97 | config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaHostname() + ":" + getKafkaPort());
98 | config.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
99 | config.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
100 | return config;
101 | }
102 |
103 | public void produceMessages() {
104 |
105 | KafkaProducer producer = new KafkaProducer(createConfig());
106 |
107 | int count = 0;
108 | while(count < getMessageCount()) {
109 |
110 | // Create the JSON object
111 | JSONObject obj = new JSONObject();
112 | try {
113 | obj.put("id", String.valueOf(count));
114 | obj.put("msg", "test-message" + 1);
115 | obj.put("dt", GenerateRandomDay.genRandomDay());
116 | } catch(JSONException e) {
117 | e.printStackTrace();
118 | }
119 | String payload = obj.toString();
120 |
121 | producer.send(new ProducerRecord(getTopic(), payload));
122 | LOG.info("Sent message: {}", payload.toString());
123 | count++;
124 | }
125 | }
126 |
127 | }
128 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-kdc/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | hadoop-mini-clusters
5 | com.github.sakserv
6 | 0.1.17-SNAPSHOT
7 |
8 | 4.0.0
9 |
10 | hadoop-mini-clusters-kdc
11 |
12 |
13 |
14 |
15 |
16 | org.apache.hadoop
17 | hadoop-minikdc
18 | ${hadoop.version}
19 |
20 |
21 | org.apache.directory.jdbm
22 | apacheds-jdbm1
23 |
24 |
25 |
26 |
27 | org.apache.hadoop
28 | hadoop-common
29 | ${hadoop.version}
30 | tests
31 |
32 |
33 | org.apache.directory.server
34 | apacheds-jdbm
35 | 2.0.0-M5
36 |
37 |
38 |
39 |
40 | com.github.sakserv
41 | hadoop-mini-clusters-common
42 | ${project.version}
43 |
44 |
45 |
46 |
47 | com.github.sakserv
48 | hadoop-mini-clusters-hdfs
49 | ${project.version}
50 | test
51 |
52 |
53 |
54 |
55 | com.github.sakserv
56 | hadoop-mini-clusters-hbase
57 | ${project.version}
58 | test
59 |
60 |
61 | com.github.sakserv
62 | hadoop-mini-clusters-zookeeper
63 | ${project.version}
64 | test
65 |
66 |
67 |
68 |
69 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-kdc/src/main/resources/default.properties:
--------------------------------------------------------------------------------
1 | # KDC
2 | kdc.host=127.0.0.1
3 | kdc.port=34340
4 | kdc.basedir=embedded_kdc
5 | kdc.org.domain=ORG
6 | kdc.org.name=ACME
7 | kdc.principals=hdfs,hbase,yarn,oozie,oozie_user,zookeeper,storm,mapreduce,HTTP
8 | kdc.krbinstance=127.0.0.1
9 | kdc.instance=DefaultKrbServer
10 | kdc.transport=TCP
11 | kdc.max.ticket.lifetime=86400000
12 | kdc.max.renewable.lifetime=604800000
13 | kdc.debug=false
14 |
15 | # HDFS
16 | hdfs.namenode.port=20112
17 | hdfs.namenode.http.port=50070
18 | hdfs.temp.dir=embedded_hdfs
19 | hdfs.num.datanodes=1
20 | hdfs.enable.permissions=false
21 | hdfs.format=true
22 | hdfs.enable.running.user.as.proxy.user=true
23 |
24 | # HDFS Test
25 | hdfs.test.file=/tmp/testing
26 | hdfs.test.string=TESTING
27 |
28 | # Zookeeper
29 | zookeeper.temp.dir=embedded_zk
30 | zookeeper.host=127.0.0.1
31 | zookeeper.port=22010
32 | zookeeper.connection.string=127.0.0.1:22010
33 |
34 | # HBase
35 | hbase.master.port=25111
36 | hbase.master.info.port=-1
37 | hbase.num.region.servers=1
38 | hbase.root.dir=embedded_hbase
39 | hbase.znode.parent=/hbase-secure
40 | hbase.wal.replication.enabled=false
--------------------------------------------------------------------------------
/hadoop-mini-clusters-kdc/src/test/java/com/github/sakserv/minicluster/impl/KdcLocalClusterHdfsIntegrationTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 |
15 | package com.github.sakserv.minicluster.impl;
16 |
17 | import com.github.sakserv.minicluster.config.ConfigVars;
18 | import com.github.sakserv.propertyparser.PropertyParser;
19 | import org.apache.hadoop.conf.Configuration;
20 | import org.apache.hadoop.fs.FSDataInputStream;
21 | import org.apache.hadoop.fs.FSDataOutputStream;
22 | import org.apache.hadoop.fs.FileSystem;
23 | import org.apache.hadoop.fs.Path;
24 | import org.apache.hadoop.hdfs.HdfsConfiguration;
25 | import org.apache.hadoop.security.AccessControlException;
26 | import org.apache.hadoop.security.UserGroupInformation;
27 | import org.junit.AfterClass;
28 | import org.junit.BeforeClass;
29 | import org.junit.Test;
30 | import org.slf4j.Logger;
31 | import org.slf4j.LoggerFactory;
32 |
33 | import java.io.IOException;
34 |
35 | import static org.junit.Assert.*;
36 |
37 | public class KdcLocalClusterHdfsIntegrationTest {
38 |
39 | // Logger
40 | private static final Logger LOG = LoggerFactory.getLogger(KdcLocalClusterHdfsIntegrationTest.class);
41 |
42 | // Setup the property parser
43 | private static PropertyParser propertyParser;
44 |
45 | static {
46 | try {
47 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE);
48 | propertyParser.parsePropsFile();
49 | } catch (IOException e) {
50 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE));
51 | }
52 | }
53 |
54 | private static KdcLocalCluster kdcLocalCluster;
55 | private static HdfsLocalCluster hdfsLocalCluster;
56 |
57 | @BeforeClass
58 | public static void setUp() throws Exception {
59 |
60 | //System.setProperty("sun.security.krb5.debug", "true");
61 |
62 | // KDC
63 | kdcLocalCluster = new KdcLocalCluster.Builder()
64 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_PORT_KEY)))
65 | .setHost(propertyParser.getProperty(ConfigVars.KDC_HOST_KEY))
66 | .setBaseDir(propertyParser.getProperty(ConfigVars.KDC_BASEDIR_KEY))
67 | .setOrgDomain(propertyParser.getProperty(ConfigVars.KDC_ORG_DOMAIN_KEY))
68 | .setOrgName(propertyParser.getProperty(ConfigVars.KDC_ORG_NAME_KEY))
69 | .setPrincipals(propertyParser.getProperty(ConfigVars.KDC_PRINCIPALS_KEY).split(","))
70 | .setKrbInstance(propertyParser.getProperty(ConfigVars.KDC_KRBINSTANCE_KEY))
71 | .setInstance(propertyParser.getProperty(ConfigVars.KDC_INSTANCE_KEY))
72 | .setTransport(propertyParser.getProperty(ConfigVars.KDC_TRANSPORT))
73 | .setMaxTicketLifetime(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_MAX_TICKET_LIFETIME_KEY)))
74 | .setMaxRenewableLifetime(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_MAX_RENEWABLE_LIFETIME)))
75 | .setDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.KDC_DEBUG)))
76 | .build();
77 | kdcLocalCluster.start();
78 |
79 | Configuration baseConf = kdcLocalCluster.getBaseConf();
80 |
81 | //HDFS
82 | Configuration hdfsConfig = new HdfsConfiguration();
83 | hdfsConfig.addResource(baseConf);
84 | hdfsLocalCluster = new HdfsLocalCluster.Builder()
85 | .setHdfsNamenodePort(Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NAMENODE_PORT_KEY)))
86 | .setHdfsNamenodeHttpPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY)))
87 | .setHdfsTempDir(propertyParser.getProperty(ConfigVars.HDFS_TEMP_DIR_KEY))
88 | .setHdfsNumDatanodes(Integer.parseInt(propertyParser.getProperty(ConfigVars.HDFS_NUM_DATANODES_KEY)))
89 | .setHdfsEnablePermissions(
90 | Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.HDFS_ENABLE_PERMISSIONS_KEY)))
91 | .setHdfsFormat(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.HDFS_FORMAT_KEY)))
92 | .setHdfsEnableRunningUserAsProxyUser(Boolean.parseBoolean(
93 | propertyParser.getProperty(ConfigVars.HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER)))
94 | .setHdfsConfig(hdfsConfig)
95 | .build();
96 | hdfsLocalCluster.start();
97 | }
98 |
99 | @AfterClass
100 | public static void tearDown() throws Exception {
101 | hdfsLocalCluster.stop();
102 | kdcLocalCluster.stop();
103 | }
104 |
105 | @Test
106 | public void testHdfs() throws Exception {
107 | FileSystem hdfsFsHandle = hdfsLocalCluster.getHdfsFileSystemHandle();
108 |
109 | UserGroupInformation.loginUserFromKeytab(kdcLocalCluster.getKrbPrincipalWithRealm("hdfs"), kdcLocalCluster.getKeytabForPrincipal("hdfs"));
110 |
111 | assertTrue(UserGroupInformation.isSecurityEnabled());
112 | assertTrue(UserGroupInformation.isLoginKeytabBased());
113 |
114 | // Write a file to HDFS containing the test string
115 | FSDataOutputStream writer = hdfsFsHandle.create(
116 | new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY)));
117 | writer.writeUTF(propertyParser.getProperty(ConfigVars.HDFS_TEST_STRING_KEY));
118 | writer.close();
119 |
120 | // Read the file and compare to test string
121 | FSDataInputStream reader = hdfsFsHandle.open(
122 | new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY)));
123 | assertEquals(reader.readUTF(), propertyParser.getProperty(ConfigVars.HDFS_TEST_STRING_KEY));
124 | reader.close();
125 |
126 | // Log out
127 | UserGroupInformation.getLoginUser().logoutUserFromKeytab();
128 |
129 | UserGroupInformation.reset();
130 |
131 | try {
132 | Configuration conf = new Configuration();
133 | UserGroupInformation.setConfiguration(conf);
134 | FileSystem.get(hdfsFsHandle.getUri(), conf).open(
135 | new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY)));
136 | fail();
137 | } catch (AccessControlException e) {
138 | LOG.info("Not authenticated!");
139 | }
140 | }
141 | }
142 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-kdc/src/test/java/com/github/sakserv/minicluster/impl/KdcLocalClusterZookeeperIntegrationTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 |
15 | package com.github.sakserv.minicluster.impl;
16 |
17 | import com.github.sakserv.minicluster.auth.Jaas;
18 | import com.github.sakserv.minicluster.config.ConfigVars;
19 | import com.github.sakserv.minicluster.util.FileUtils;
20 | import com.github.sakserv.propertyparser.PropertyParser;
21 | import org.apache.curator.framework.CuratorFramework;
22 | import org.apache.curator.framework.CuratorFrameworkFactory;
23 | import org.apache.curator.retry.ExponentialBackoffRetry;
24 | import org.apache.zookeeper.CreateMode;
25 | import org.apache.zookeeper.KeeperException;
26 | import org.apache.zookeeper.ZooDefs;
27 | import org.apache.zookeeper.data.ACL;
28 | import org.junit.AfterClass;
29 | import org.junit.BeforeClass;
30 | import org.junit.Test;
31 | import org.slf4j.Logger;
32 | import org.slf4j.LoggerFactory;
33 |
34 | import java.io.IOException;
35 | import java.util.ArrayList;
36 | import java.util.HashMap;
37 | import java.util.List;
38 | import java.util.Map;
39 |
40 | import static org.junit.Assert.fail;
41 |
42 | public class KdcLocalClusterZookeeperIntegrationTest {
43 |
44 | // Logger
45 | private static final Logger LOG = LoggerFactory.getLogger(KdcLocalClusterZookeeperIntegrationTest.class);
46 |
47 | // Setup the property parser
48 | private static PropertyParser propertyParser;
49 |
50 | static {
51 | try {
52 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE);
53 | propertyParser.parsePropsFile();
54 | } catch (IOException e) {
55 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE));
56 | }
57 | }
58 |
59 | private static KdcLocalCluster kdcLocalCluster;
60 | private static ZookeeperLocalCluster zookeeperLocalCluster;
61 |
62 | @BeforeClass
63 | public static void setUp() throws Exception {
64 |
65 | //System.setProperty("sun.security.krb5.debug", "true");
66 |
67 | // Force clean
68 | FileUtils.deleteFolder(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY));
69 |
70 | // KDC
71 | kdcLocalCluster = new KdcLocalCluster.Builder()
72 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_PORT_KEY)))
73 | .setHost(propertyParser.getProperty(ConfigVars.KDC_HOST_KEY))
74 | .setBaseDir(propertyParser.getProperty(ConfigVars.KDC_BASEDIR_KEY))
75 | .setOrgDomain(propertyParser.getProperty(ConfigVars.KDC_ORG_DOMAIN_KEY))
76 | .setOrgName(propertyParser.getProperty(ConfigVars.KDC_ORG_NAME_KEY))
77 | .setPrincipals(propertyParser.getProperty(ConfigVars.KDC_PRINCIPALS_KEY).split(","))
78 | .setKrbInstance(propertyParser.getProperty(ConfigVars.KDC_KRBINSTANCE_KEY))
79 | .setInstance(propertyParser.getProperty(ConfigVars.KDC_INSTANCE_KEY))
80 | .setTransport(propertyParser.getProperty(ConfigVars.KDC_TRANSPORT))
81 | .setMaxTicketLifetime(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_MAX_TICKET_LIFETIME_KEY)))
82 | .setMaxRenewableLifetime(Integer.parseInt(propertyParser.getProperty(ConfigVars.KDC_MAX_RENEWABLE_LIFETIME)))
83 | .setDebug(Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.KDC_DEBUG)))
84 | .build();
85 | kdcLocalCluster.start();
86 |
87 | // Zookeeper
88 | Jaas jaas = new Jaas()
89 | .addServiceEntry("Server", kdcLocalCluster.getKrbPrincipal("zookeeper"), kdcLocalCluster.getKeytabForPrincipal("zookeeper"), "zookeeper");
90 | javax.security.auth.login.Configuration.setConfiguration(jaas);
91 |
92 | Map properties = new HashMap<>();
93 | properties.put("authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
94 | properties.put("requireClientAuthScheme", "sasl");
95 | properties.put("sasl.serverconfig", "Server");
96 | properties.put("kerberos.removeHostFromPrincipal", "true");
97 | properties.put("kerberos.removeRealmFromPrincipal", "true");
98 |
99 | zookeeperLocalCluster = new ZookeeperLocalCluster.Builder()
100 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY)))
101 | .setTempDir(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY))
102 | .setZookeeperConnectionString(propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY))
103 | .setCustomProperties(properties)
104 | .build();
105 | zookeeperLocalCluster.start();
106 | }
107 |
108 | @AfterClass
109 | public static void tearDown() throws Exception {
110 | zookeeperLocalCluster.stop();
111 | kdcLocalCluster.stop();
112 | }
113 |
114 | @Test
115 | public void testZookeeper() throws Exception {
116 |
117 | try (CuratorFramework client = CuratorFrameworkFactory.newClient(zookeeperLocalCluster.getZookeeperConnectionString(),
118 | new ExponentialBackoffRetry(1000, 3))) {
119 | client.start();
120 | client.getChildren().forPath("/");
121 | fail();
122 | } catch (KeeperException.AuthFailedException e) {
123 | LOG.debug("Not authenticated!");
124 | }
125 |
126 | System.setProperty("zookeeper.sasl.client", "true");
127 | System.setProperty("zookeeper.sasl.clientconfig", "Client");
128 | javax.security.auth.login.Configuration.setConfiguration(new Jaas()
129 | .addEntry("Client", kdcLocalCluster.getKrbPrincipalWithRealm("guest"), kdcLocalCluster.getKeytabForPrincipal("guest")));
130 |
131 | try (CuratorFramework client = CuratorFrameworkFactory.newClient(zookeeperLocalCluster.getZookeeperConnectionString(),
132 | new ExponentialBackoffRetry(1000, 3))) {
133 | client.start();
134 | client.getChildren().forPath("/").forEach(LOG::debug);
135 |
136 | List perms = new ArrayList<>();
137 | perms.add(new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.AUTH_IDS));
138 | perms.add(new ACL(ZooDefs.Perms.READ, ZooDefs.Ids.ANYONE_ID_UNSAFE));
139 |
140 | client.create().withMode(CreateMode.PERSISTENT).withACL(perms).forPath(propertyParser.getProperty(ConfigVars.HBASE_ZNODE_PARENT_KEY));
141 | }
142 | }
143 | }
144 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-knox/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | hadoop-mini-clusters
5 | com.github.sakserv
6 | 0.1.17-SNAPSHOT
7 |
8 | 4.0.0
9 |
10 | hadoop-mini-clusters-knox
11 |
12 |
13 |
14 |
15 |
16 | org.apache.knox
17 | gateway-server
18 | ${knox.version}
19 |
20 |
21 | org.apache.knox
22 | gateway-provider-rewrite
23 | ${knox.version}
24 |
25 |
26 | org.apache.knox
27 | gateway-provider-rewrite-step-secure-query
28 | ${knox.version}
29 |
30 |
31 | org.apache.knox
32 | gateway-provider-rewrite-step-encrypt-uri
33 | ${knox.version}
34 |
35 |
36 | org.apache.knox
37 | gateway-provider-rewrite-func-hostmap-static
38 | ${knox.version}
39 |
40 |
41 | org.apache.knox
42 | gateway-provider-rewrite-func-service-registry
43 | ${knox.version}
44 |
45 |
46 | org.apache.knox
47 | gateway-service-definitions
48 | ${knox.version}
49 |
50 |
51 | org.apache.knox
52 | gateway-test
53 | ${knox.version}
54 |
55 |
56 | com.mycila.xmltool
57 | xmltool
58 | 3.3
59 |
60 |
61 |
62 |
63 | com.github.sakserv
64 | hadoop-mini-clusters-common
65 | ${project.version}
66 |
67 |
68 |
70 |
71 | org.apache.httpcomponents
72 | httpclient
73 | ${httpclient.version}
74 | test
75 |
76 |
77 |
78 |
79 | com.github.sakserv
80 | hadoop-mini-clusters-hdfs
81 | ${project.version}
82 | test
83 |
84 |
85 |
86 |
87 | com.github.sakserv
88 | hadoop-mini-clusters-hbase
89 | ${project.version}
90 | test
91 |
92 |
93 | com.github.sakserv
94 | hadoop-mini-clusters-zookeeper
95 | ${project.version}
96 | test
97 |
98 |
99 |
100 |
101 | org.apache.knox
102 | gateway-shell
103 | ${knox.version}
104 | test
105 |
106 |
107 | org.apache.knox
108 | gateway-provider-security-authc-anon
109 | ${knox.version}
110 | test
111 |
112 |
113 | org.apache.knox
114 | gateway-provider-identity-assertion-pseudo
115 | ${knox.version}
116 | test
117 |
118 |
119 | org.apache.knox
120 | gateway-service-webhdfs
121 | ${knox.version}
122 | test
123 |
124 |
125 | org.apache.knox
126 | gateway-service-hbase
127 | ${knox.version}
128 | test
129 |
130 |
131 |
132 |
133 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-knox/src/main/resources/default.properties:
--------------------------------------------------------------------------------
1 | # HDFS
2 | hdfs.namenode.port=20112
3 | hdfs.namenode.http.port=50070
4 | hdfs.temp.dir=embedded_hdfs
5 | hdfs.num.datanodes=1
6 | hdfs.enable.permissions=false
7 | hdfs.format=true
8 | hdfs.enable.running.user.as.proxy.user=true
9 |
10 | # HDFS Test
11 | hdfs.test.file=/tmp/testing
12 | hdfs.test.string=TESTING
13 |
14 | # Zookeeper
15 | zookeeper.temp.dir=embedded_zk
16 | zookeeper.host=127.0.0.1
17 | zookeeper.port=22010
18 | zookeeper.connection.string=127.0.0.1:22010
19 |
20 | # HBase
21 | hbase.master.port=25111
22 | hbase.master.info.port=-1
23 | hbase.num.region.servers=1
24 | hbase.root.dir=embedded_hbase
25 | hbase.znode.parent=/hbase-unsecure
26 | hbase.wal.replication.enabled=false
27 |
28 | # HBase REST
29 | hbase.rest.port=28000
30 | hbase.rest.readonly=false
31 | hbase.rest.info.port=28080
32 | hbase.rest.host=0.0.0.0
33 | hbase.rest.threads.max=100
34 | hbase.rest.threads.min=2
35 |
36 | # HBase Test
37 | hbase.test.table.name=hbase_test_table
38 | hbase.test.col.family.name=cf1
39 | hbase.test.col.qualifier.name=cq1
40 | hbase.test.num.rows.to.put=50
41 |
42 | # KNOX
43 | knox.host=localhost
44 | knox.port=8888
45 | knox.path=gateway
46 | knox.cluster=mycluster
47 | knox.home.dir=embedded_knox
--------------------------------------------------------------------------------
/hadoop-mini-clusters-knox/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Root logger option
2 | log4j.rootLogger=INFO, stdout
3 |
4 | # Direct log messages to stdout
5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
6 | log4j.appender.stdout.Target=System.out
7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
9 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-knox/src/test/java/com/github/sakserv/minicluster/impl/KnoxLocalClusterTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 |
15 | package com.github.sakserv.minicluster.impl;
16 |
17 | import com.github.sakserv.minicluster.config.ConfigVars;
18 | import com.github.sakserv.propertyparser.PropertyParser;
19 | import com.mycila.xmltool.XMLDoc;
20 | import org.junit.BeforeClass;
21 | import org.junit.Rule;
22 | import org.junit.Test;
23 | import org.junit.rules.ExpectedException;
24 | import org.slf4j.Logger;
25 | import org.slf4j.LoggerFactory;
26 |
27 | import java.io.IOException;
28 |
29 | import static org.junit.Assert.assertEquals;
30 |
31 | /**
32 | * @author Vincent Devillers
33 | */
34 | public class KnoxLocalClusterTest {
35 |
36 | // Logger
37 | private static final Logger LOG = LoggerFactory.getLogger(KnoxLocalClusterTest.class);
38 |
39 | // Setup the property parser
40 | private static PropertyParser propertyParser;
41 |
42 | static {
43 | try {
44 | propertyParser = new PropertyParser(ConfigVars.DEFAULT_PROPS_FILE);
45 | propertyParser.parsePropsFile();
46 | } catch (IOException e) {
47 | LOG.error("Unable to load property file: {}", propertyParser.getProperty(ConfigVars.DEFAULT_PROPS_FILE));
48 | }
49 | }
50 |
51 | @Rule
52 | public ExpectedException exception = ExpectedException.none();
53 |
54 | private static KnoxLocalCluster knoxLocalCluster;
55 |
56 | @BeforeClass
57 | public static void setUp() {
58 | knoxLocalCluster = new KnoxLocalCluster.Builder()
59 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KNOX_PORT_KEY)))
60 | .setPath(propertyParser.getProperty(ConfigVars.KNOX_PATH_KEY))
61 | .setHomeDir(propertyParser.getProperty(ConfigVars.KNOX_HOME_DIR_KEY))
62 | .setCluster(propertyParser.getProperty(ConfigVars.KNOX_CLUSTER_KEY))
63 | .setTopology(XMLDoc.newDocument(true)
64 | .addRoot("topology")
65 | .addTag("service")
66 | .addTag("role").addText("WEBHDFS")
67 | .addTag("url").addText("http://localhost:20112/webhdfs")
68 | .gotoRoot().toString())
69 | .build();
70 | }
71 |
72 | @Test
73 | public void testKnoxPort() {
74 | assertEquals(Integer.parseInt(propertyParser.getProperty(ConfigVars.KNOX_PORT_KEY)),
75 | (int) knoxLocalCluster.getPort());
76 | }
77 |
78 | @Test
79 | public void testMissingKnoxPort() {
80 | exception.expect(IllegalArgumentException.class);
81 | knoxLocalCluster = new KnoxLocalCluster.Builder()
82 | .setHomeDir(propertyParser.getProperty(ConfigVars.KNOX_HOME_DIR_KEY))
83 | .build();
84 | }
85 |
86 | @Test
87 | public void testKnoxPath() {
88 | assertEquals(propertyParser.getProperty(ConfigVars.KNOX_PATH_KEY),
89 | knoxLocalCluster.getPath());
90 | }
91 |
92 | @Test
93 | public void testMissingKnoxPath() {
94 | exception.expect(IllegalArgumentException.class);
95 | knoxLocalCluster = new KnoxLocalCluster.Builder()
96 | .setHomeDir(propertyParser.getProperty(ConfigVars.KNOX_HOME_DIR_KEY))
97 | .build();
98 | }
99 |
100 | @Test
101 | public void testKnoxTempDir() {
102 | assertEquals(propertyParser.getProperty(ConfigVars.KNOX_HOME_DIR_KEY),
103 | knoxLocalCluster.getHomeDir());
104 | }
105 |
106 | @Test
107 | public void testMissingKnoxTempDir() {
108 | exception.expect(IllegalArgumentException.class);
109 | knoxLocalCluster = new KnoxLocalCluster.Builder()
110 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KNOX_PORT_KEY)))
111 | .build();
112 | }
113 |
114 | @Test
115 | public void testKnoxCluster() {
116 | assertEquals(propertyParser.getProperty(ConfigVars.KNOX_CLUSTER_KEY),
117 | knoxLocalCluster.getCluster());
118 | }
119 |
120 | @Test
121 | public void testMissingKnoxCluster() {
122 | exception.expect(IllegalArgumentException.class);
123 | knoxLocalCluster = new KnoxLocalCluster.Builder()
124 | .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KNOX_CLUSTER_KEY)))
125 | .build();
126 | }
127 | }
128 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-mapreduce/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | hadoop-mini-clusters
5 | com.github.sakserv
6 | 0.1.17-SNAPSHOT
7 |
8 | 4.0.0
9 |
10 | hadoop-mini-clusters-mapreduce
11 |
12 |
13 |
14 |
15 |
16 | org.apache.hadoop
17 | hadoop-client
18 | ${hadoop.version}
19 |
20 |
21 |
22 |
23 | org.apache.hadoop
24 | hadoop-minicluster
25 | ${hadoop.version}
26 |
27 |
28 |
29 |
30 | com.github.sakserv
31 | hadoop-mini-clusters-hdfs
32 | ${project.version}
33 | test
34 |
35 |
36 |
37 |
38 | com.github.sakserv
39 | hadoop-mini-clusters-common
40 | ${project.version}
41 |
42 |
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-mapreduce/src/main/resources/default.properties:
--------------------------------------------------------------------------------
1 | # HDFS
2 | hdfs.namenode.port=20112
3 | hdfs.temp.dir=embedded_hdfs
4 | hdfs.num.datanodes=1
5 | hdfs.enable.permissions=false
6 | hdfs.format=true
7 | hdfs.enable.running.user.as.proxy.user=true
8 |
9 | # YARN
10 | yarn.num.node.managers=1
11 | yarn.num.local.dirs=1
12 | yarn.num.log.dirs=1
13 | yarn.resource.manager.address=localhost:37001
14 | yarn.resource.manager.hostname=localhost
15 | yarn.resource.manager.scheduler.address=localhost:37002
16 | yarn.resource.manager.resource.tracker.address=localhost:37003
17 | yarn.resource.manager.webapp.address=localhost:37004
18 | yarn.use.in.jvm.container.executor=false
19 |
20 | # MR
21 | mr.job.history.address=localhost:37005
22 |
23 | # MR Test
24 | mr.test.data.filename=mr_input.txt
25 | mr.test.data.hdfs.input.dir=/tmp/mr_input
26 | mr.test.data.hdfs.output.dir=/tmp/mr_output
--------------------------------------------------------------------------------
/hadoop-mini-clusters-mapreduce/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Root logger option
2 | log4j.rootLogger=INFO, stdout
3 |
4 | # Direct log messages to stdout
5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
6 | log4j.appender.stdout.Target=System.out
7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
--------------------------------------------------------------------------------
/hadoop-mini-clusters-mapreduce/src/test/java/com/github/sakserv/minicluster/mapreduce/Driver.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 | package com.github.sakserv.minicluster.mapreduce;
15 |
16 | import org.apache.hadoop.conf.Configuration;
17 | import org.apache.hadoop.fs.Path;
18 | import org.apache.hadoop.io.IntWritable;
19 | import org.apache.hadoop.io.Text;
20 | import org.apache.hadoop.mapreduce.Job;
21 | import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
22 | import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
23 | import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
24 | import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
25 |
26 | public class Driver {
27 |
28 | private static Configuration configuration;
29 |
30 | public Configuration getConfiguration() {
31 | return configuration;
32 | }
33 |
34 | public void setConfiguration(Configuration configuration) {
35 | this.configuration = configuration;
36 | }
37 |
38 | public static void main(String[] args) throws Exception {
39 |
40 | if (args.length != 2) {
41 | System.out.println("usage: [input] [output]");
42 | System.exit(-1);
43 | }
44 |
45 | if (null == configuration) {
46 | configuration = new Configuration();
47 | }
48 |
49 | Job job = Job.getInstance(configuration);
50 | job.setOutputKeyClass(Text.class);
51 | job.setOutputValueClass(IntWritable.class);
52 |
53 | job.setMapperClass(WordMapper.class);
54 | job.setReducerClass(SumReducer.class);
55 |
56 | job.setInputFormatClass(TextInputFormat.class);
57 | job.setOutputFormatClass(TextOutputFormat.class);
58 |
59 | FileInputFormat.setInputPaths(job, new Path(args[0]));
60 | FileOutputFormat.setOutputPath(job, new Path(args[1]));
61 |
62 | job.setJarByClass(Driver.class);
63 |
64 | job.waitForCompletion(true);
65 |
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/hadoop-mini-clusters-mapreduce/src/test/java/com/github/sakserv/minicluster/mapreduce/SumReducer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed under the Apache License, Version 2.0 (the "License");
3 | * you may not use this file except in compliance with the License.
4 | * You may obtain a copy of the License at
5 | *
6 | * http://www.apache.org/licenses/LICENSE-2.0
7 | *
8 | * Unless required by applicable law or agreed to in writing, software
9 | * distributed under the License is distributed on an "AS IS" BASIS,
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | * See the License for the specific language governing permissions and
12 | * limitations under the License.
13 | */
14 | package com.github.sakserv.minicluster.mapreduce;
15 |
16 | import java.io.IOException;
17 | import java.util.Iterator;
18 |
19 | import org.apache.hadoop.io.IntWritable;
20 | import org.apache.hadoop.io.Text;
21 | import org.apache.hadoop.mapreduce.Reducer;
22 |
23 |
24 | public class SumReducer extends Reducer {
25 |
26 | private IntWritable totalWordCount = new IntWritable();
27 |
28 | @Override
29 | public void reduce(Text key, Iterable values, Context context)
30 | throws IOException, InterruptedException {
31 | int wordCount = 0;
32 | Iterator it=values.iterator();
33 | while (it.hasNext()) {
34 | wordCount += it.next().get();
35 | }
36 | totalWordCount.set(wordCount);
37 | context.write(key, totalWordCount);
38 | }
39 | }
--------------------------------------------------------------------------------
/hadoop-mini-clusters-mapreduce/src/test/java/com/github/sakserv/minicluster/mapreduce/WordMapper.java:
--------------------------------------------------------------------------------
1 | package com.github.sakserv.minicluster.mapreduce;
2 |
3 | import java.io.IOException;
4 | import java.util.StringTokenizer;
5 |
6 | import org.apache.hadoop.io.IntWritable;
7 | import org.apache.hadoop.io.Text;
8 | import org.apache.hadoop.mapreduce.Mapper;
9 |
10 | public class WordMapper extends Mapper