├── .gitignore
├── .travis.yml
├── LICENSE
├── Mesosphere-JavaFormatter.xml
├── README.md
├── bin
├── build-hdfs
├── hdfs-mesos
├── hdfs-mesos-datanode
├── hdfs-mesos-journalnode
├── hdfs-mesos-killtree
├── hdfs-mesos-namenode
└── hdfs-mesos-zkfc
├── build.gradle
├── conf
├── hdfs-site.xml
└── mesos-site.xml
├── config.md
├── example-conf
└── mesosphere-dcos
│ ├── core-site.xml
│ ├── hdfs-site.xml
│ └── mesos-site.xml
├── gradle.properties
├── gradle
├── checkstyle
│ └── checkstyle.xml
├── findbugs
│ └── excludeFilter.xml
├── quality.gradle
├── spock.gradle
└── wrapper
│ ├── gradle-wrapper.jar
│ └── gradle-wrapper.properties
├── gradlew
├── gradlew.bat
├── hdfs-commons
├── build.gradle
└── src
│ └── main
│ └── java
│ └── org
│ └── apache
│ └── mesos
│ └── hdfs
│ ├── config
│ ├── ConfigurationException.java
│ ├── HdfsFrameworkConfig.java
│ └── NodeConfig.java
│ └── util
│ ├── HDFSConstants.java
│ └── TaskStatusFactory.java
├── hdfs-executor
├── build.gradle
└── src
│ ├── main
│ ├── java
│ │ └── org
│ │ │ └── apache
│ │ │ └── mesos
│ │ │ └── hdfs
│ │ │ └── executor
│ │ │ ├── AbstractNodeExecutor.java
│ │ │ ├── ExecutorException.java
│ │ │ ├── HdfsProcessExitHandler.java
│ │ │ ├── NameNodeExecutor.java
│ │ │ ├── NodeExecutor.java
│ │ │ ├── NodeHealthChecker.java
│ │ │ ├── Task.java
│ │ │ └── TaskShutdownHook.java
│ └── resources
│ │ └── logback.xml
│ └── test
│ └── java
│ └── org
│ └── apache
│ └── mesos
│ └── hdfs
│ └── executor
│ └── TaskSpec.groovy
├── hdfs-scheduler
├── build.gradle
└── src
│ ├── main
│ ├── java
│ │ └── org
│ │ │ └── apache
│ │ │ └── mesos
│ │ │ └── hdfs
│ │ │ ├── config
│ │ │ ├── ConfigServer.java
│ │ │ └── ConfigServerException.java
│ │ │ ├── scheduler
│ │ │ ├── DataNode.java
│ │ │ ├── HdfsMesosConstraints.java
│ │ │ ├── HdfsNode.java
│ │ │ ├── HdfsScheduler.java
│ │ │ ├── HdfsSchedulerModule.java
│ │ │ ├── ILauncher.java
│ │ │ ├── IOfferEvaluator.java
│ │ │ ├── JournalNode.java
│ │ │ ├── Main.java
│ │ │ ├── NameNode.java
│ │ │ ├── NodeLauncher.java
│ │ │ ├── Reconciler.java
│ │ │ ├── SchedulerException.java
│ │ │ ├── StateFactory.java
│ │ │ ├── Task.java
│ │ │ └── ZKStateFactory.java
│ │ │ ├── state
│ │ │ ├── AcquisitionPhase.java
│ │ │ ├── HdfsState.java
│ │ │ ├── Serializer.java
│ │ │ └── StateMachine.java
│ │ │ └── util
│ │ │ ├── DnsResolver.java
│ │ │ ├── NodeTypes.java
│ │ │ └── PreNNInitTask.java
│ └── resources
│ │ └── logback.xml
│ └── test
│ ├── java
│ └── org
│ │ └── apache
│ │ └── mesos
│ │ └── hdfs
│ │ ├── SchedulerModuleTest.java
│ │ ├── config
│ │ └── HdfsFrameworkConfigSpec.groovy
│ │ ├── scheduler
│ │ ├── HdfsNodeSpec.groovy
│ │ ├── HdfsSchedulerSpec.groovy
│ │ ├── InMemoryStateFactory.java
│ │ ├── SchedulerConstraintsTest.java
│ │ └── SchedulerTest.java
│ │ └── state
│ │ ├── HdfsStateSpec.groovy
│ │ ├── HdfsStateTest.java
│ │ └── StateMachineTest.java
│ └── resources
│ ├── gcs-credentials.json
│ └── s3-credentials.json
├── mesos-commons
├── build.gradle
└── src
│ ├── main
│ └── java
│ │ └── org
│ │ └── apache
│ │ └── mesos
│ │ ├── collections
│ │ ├── MapUtil.java
│ │ └── StartsWithPredicate.java
│ │ ├── file
│ │ └── FileUtils.java
│ │ ├── process
│ │ ├── FailureUtils.java
│ │ ├── ProcessFailureHandler.java
│ │ ├── ProcessUtil.java
│ │ └── ProcessWatcher.java
│ │ ├── protobuf
│ │ ├── AttributeUtil.java
│ │ ├── CommandInfoBuilder.java
│ │ ├── EnvironmentBuilder.java
│ │ ├── ExecutorInfoBuilder.java
│ │ ├── FrameworkInfoUtil.java
│ │ ├── LabelBuilder.java
│ │ ├── OfferBuilder.java
│ │ ├── ResourceBuilder.java
│ │ ├── SlaveUtil.java
│ │ ├── TaskInfoBuilder.java
│ │ ├── TaskStatusBuilder.java
│ │ └── TaskUtil.java
│ │ └── stream
│ │ ├── StreamRedirect.java
│ │ └── StreamUtil.java
│ └── test
│ └── java
│ └── org
│ └── apache
│ └── mesos
│ └── collections
│ ├── MapUtilSpec.groovy
│ └── StartsWithPredicateSpec.groovy
├── pom.xml
└── settings.gradle
/.gitignore:
--------------------------------------------------------------------------------
1 | target/
2 | hadoop-*
3 | hdfs-mesos-*
4 | native/
5 |
6 | # idea
7 | *.i??
8 | .idea/
9 | out/
10 |
11 | # gradle
12 | .gradle
13 | build/
14 |
15 | # vim swap files
16 | *.swp
17 |
18 | # OS X
19 | .DS_Store
20 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | sudo: false
2 | language: java
3 | jdk:
4 | - openjdk7
5 | - oraclejdk7
6 | notifications:
7 | email: false
8 | slack:
9 | secure: KUqJxA9ibJxzB6apYaMu4wsyHD3thaltBLTN97RLv4wU0xCzbra8edf4Xe0nyWG040SLWZwBn/ewou+SzHUGAzcxArnl8yFmI38skFzFdH/wKAXH2RdlifHbjJOHftZQIIAR3d8TA6LmyvvWn7vFi/BQULxuj5mYFFGnoSubZrE=
10 | on_success: change
11 | on_failure: always
12 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # * * THIS REPOSITORY IS DEPRECATED * *
2 |
3 | *Do not use this HDFS framework for production workloads. The current HDFS Universe package for DC/OS is built-from a new, experimental Mesosphere private repo that will be open sourced later in 2016. Please submit any bug reports or feature requests for that package to the [DCOS Community JIRA](https://dcosjira.atlassian.net/projects/HDFS).*
4 |
5 |
6 |
7 | HA HDFS on Apache Mesos
8 | ======================
9 | Starts 1 active NameNode (with JournalNode and ZKFC), 1 standby NN (+JN,ZKFC), 1 JN, and everything else is DataNodes.
10 |
11 | Prerequisites
12 | --------------------------
13 | 1. Install `tar`, `unzip`, `wget` in your build host. Set proxy for maven / gradle and wget if needed.
14 | 2. Install `curl` for all hosts in cluster.
15 | 3. `$JAVA_HOME` needs to be set on the host running your HDFS scheduler. This can be set through setting the environment variable on the host, `export JAVA_HOME=/path/to/jre`, or specifying the environment variable in Marathon.
16 |
17 | **NOTE:** The build process current supports maven and gradle. The gradle wrapper meta-data is included in the project and is self boot-straping (meaning it isn't a prerequisite install). Maven as the build system is being deprecated.
18 |
19 | Building HDFS-Mesos
20 | --------------------------
21 | 1. Customize configuration in `conf/*-site.xml`. All configuration files updated here will be used by the scheduler and also bundled with the executors.
22 | 2. `./bin/build-hdfs`
23 | 3. Run `./bin/build-hdfs nocompile` to skip the `gradlew clean package` step and just re-bundle the binaries.
24 | 4. To remove the project build output and downloaded binaries, run `./bin/build-hdfs clean`.
25 |
26 | **NOTE:** The build process builds the artifacts under the `$PROJ_DIR/build` directory. A number of zip and tar files are cached under the `cache` directory for faster subsequent builds. The tarball used for installation is hdfs-mesos-x.x.x.tgz which contains the scheduler and the executor to be distributed.
27 |
28 |
29 | Installing HDFS-Mesos on your Cluster
30 | --------------------------
31 | 1. Upload `hdfs-mesos-*.tgz` to a node in your Mesos cluster (which is built to `$PROJ_DIR/build/hdfs-mesos-x.x.x.tgz`).
32 | 2. Extract it with `tar zxvf hdfs-mesos-*.tgz`.
33 | 3. Optional: Customize any additional configurations that weren't updated at compile time in `hdfs-mesos-*/etc/hadoop/*-site.xml` Note that if you update hdfs-site.xml, it will be used by the scheduler and bundled with the executors. However, core-site.xml and mesos-site.xml will be used by the scheduler only.
34 | 4. Check that `hostname` on that node resolves to a non-localhost IP; update /etc/hosts if necessary.
35 |
36 |
37 | **NOTE:** Read [Configurations](config.md) for details on how to configure and custom HDFS.
38 |
39 | Starting HDFS-Mesos
40 | --------------------------
41 | 1. `cd hdfs-mesos-*`
42 | 2. `./bin/hdfs-mesos`
43 | 3. Check the Mesos web console to wait until all tasks are RUNNING (monitor status in JN sandboxes)
44 |
45 | Using HDFS
46 | --------------------------
47 | See some of the many HDFS tutorials out there for more details and explore the web UI at
`http://:50070`.
48 | Note that you can access commands through `hdfs:///` (default: `hdfs://hdfs/`).
49 | Also here is a quick sanity check:
50 |
51 | 1. `hadoop fs -ls hdfs://hdfs/` should show nothing for starters
52 | 2. `hadoop fs -put /path/to/src_file hdfs://hdfs/`
53 | 3. `hadoop fs -ls hdfs://hdfs/` should now list src_file
54 |
55 | Resource Reservation Instructions (Optional)
56 | --------------------------
57 |
58 | 1. In mesos-site.xml, change mesos.hdfs.role to hdfs.
59 | 2. On master, add the role for HDFS, by running `echo hdfs > /etc/mesos-master/role` or by setting the `—-role=hdfs`.
60 | 3. Then restart the master by running `sudo service mesos-master restart`.
61 | 4. On each slave where you want to reserve resources, add specific resource reservations for the HDFS role. Here is one example:
62 |
`cpus(*):8;cpus(hdfs):4;mem(*):16384;mem(hdfs):8192 > /etc/mesos-slave/resources` or by setting `—-resources=cpus(*):8;cpus(hdfs):4;mem(*):16384;mem(hdfs):8192`.
63 | 5. On each slave with the new settings, stop the mesos slave by running
64 |
`sudo service mesos-slave stop`.
65 | 6. On each slave with the new settings, remove the old slave state by running
66 |
`rm -f /tmp/mesos/meta/slaves/latest`.
67 | Note: This will also remove task state, so you will want to manually kill any running tasks as a precaution.
68 | 7. On each slave with the new settings, start the mesos slave by running
69 |
`sudo service mesos-slave start`.
70 |
71 | Applying mesos slave constraints (Optional)
72 | --------------------------
73 | 1. In mesos-site.xml, add the configuration mesos.hdfs.constraints
74 | 2. Set the value of configuration as ";" separated set of key:value pairs. Key and value has to be separated by the ":". Key represents the attribute name. Value can be exact match, less than or equal to, subset or value within the range for attribute of type text, scalar, set and range, respectively. For example:
75 | ```sh
76 |
77 | mesos.hdfs.constraints
78 | zone:west,east;cpu:4;quality:optimized-disk;id:4
79 |
80 |
81 | "zone" is type of set with members {"west","east"}.
82 | "cpu" is type of scalar.
83 | "quality" is type of text.
84 | "id" may be type of range.
85 | ```
86 |
87 | System Environment for Configurations
88 | --------------------------
89 | Many scheduler configurations can be made by setting the system environment variables. To do this, convert the property to upper case and replace `.` with `_`.
90 | Example `mesos.hdfs.data.dir` can be replaced with `MESOS_HDFS_DATA_DIR`.
91 |
92 | Currently this only works for values that are used by scheduler. Values used by the executor can not be controlled in this way yet.
93 |
94 |
95 | Authentication with CRAM-MD5 (Optional)
96 | --------------------------
97 | 1. In mesos-site.xml add the "mesos.hdfs.principal" and "mesos.hdfs.secret" properties. For example:
98 | ```sh
99 |
100 | mesos.hdfs.principal
101 | hdfs
102 |
103 |
104 |
105 | mesos.hdfs.secret
106 | %ComplexPassword%123
107 |
108 | ```
109 |
110 | 2. Ensure that the Mesos master has access to the same credentials. See the [Mesos configuration documentation](http://mesos.apache.org/documentation/latest/configuration/), in particular the --credentials flag. Authentication defaults to CRAM-MD5 so setting the --authenticators flag is not necessary.
111 |
112 | NameNode backup (Optional)
113 | --------------------------
114 | Framework supports "live" backup of NameNode data. This function is disabled by default.
115 |
116 | In order to enable it, you need to uncomment `mesos.hdfs.backup.dir` setting in `mesos-site.xml` file.
117 | This setting should point to some shared (i.e. NFS) directory. Example:
118 | ```
119 |
120 | mesos.hdfs.backup.dir
121 | Backup dir for HDFS
122 | /nfs/hadoop
123 |
124 | ```
125 |
126 | Using this approach NameNodes would be configured to use 2 data directories to store it's data. Example for namenode1:
127 | ```
128 |
129 | dfs.namenode.name.dir
130 | file://${dataDir}/name,file://${backupDir/namenode1
131 |
132 | ```
133 | All NameNode related data would be written to both directories.
134 |
135 | Shutdown Instructions (Optional)
136 | --------------------------
137 |
138 | 1. In Marathon (or your other long-running process monitor) stop the hdfs scheduler application
139 | 2. Shutdown the hdfs framework in Mesos: `curl -d "frameworkId=YOUR_FRAMEWORK_ID" -X POST http://YOUR_MESOS_URL:5050/master/shutdown`
140 | 3. Access your zookeeper instance: `/PATH/TO/zookeeper/bin/zkCli.sh`
141 | 4. Remove hdfs-mesos framework state from zookeeper: `rmr /hdfs-mesos`
142 | 5. (Optional) Clear your data directories as specified in your `mesos-site.xml`. This is necessary to relaunch HDFS in the same directory.
143 |
144 | Developer Notes
145 | --------------------------
146 | The project uses [guice](https://github.com/google/guice) which is a light weight dependency injection framework. In this project it is used
147 | during application startup initialization. This is accomplished by using the `@Inject` annotation. Guice is aware of all concrete classes
148 | which are annotated with `@Singleton`, however when it comes to interfaces, guice needs to be "bound" to an implementation. This is accomplished
149 | with the `HdfsSchedulerModule` guice module class and is initialized in the main class with:
150 |
151 | ```
152 | // this is initializes guice with all the singletons + the passed in module
153 | Injector injector = Guice.createInjector(new HdfsSchedulerModule());
154 |
155 | // if this returns successfully, then the object was "wired" correctly.
156 | injector.getInstance(ConfigServer.class);
157 | ```
158 |
159 | If you have a singleton, mark it as such. If you have an interface + implemention class then bind it in the `HdfsSchedulerModule` such as:
160 |
161 | ```
162 | // bind(.class).to(.class);
163 | bind(IPersistentStateStore.class).to(PersistentStateStore.class);
164 | ```
165 |
166 | In this case, when an `@Inject` is encountered during the initialization of a guice initialized class, parameters of type `` will have
167 | an instance of the `` class passed.
168 |
169 | The advantage of this technique is that the interface can easily have a mock class provided for testing. For more motivation [read guice's motivation page](https://github.com/google/guice/wiki/Motivation)
170 |
--------------------------------------------------------------------------------
/bin/build-hdfs:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # this should move out to gradle builds
4 | # this will create in the project/build dir the tarball to distribute
5 |
6 | VERSION="0.1.5"
7 | PROJ_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
8 | BUILD_DIR=$PROJ_DIR/build
9 | BUILD_CACHE_DIR=$BUILD_DIR/cache
10 |
11 | HADOOP_VER=2.5.0-cdh5.3.1
12 | HADOOP_DIR=hadoop-$HADOOP_VER
13 | HADOOP_ZIP=$HADOOP_DIR.tar.gz
14 | HADOOP_URL=http://archive.cloudera.com/cdh5/cdh/5/$HADOOP_ZIP
15 | IMPALA_VER=cdh5.3.1-release
16 | IMPALA_ZIP=$IMPALA_VER.zip
17 | IMPALA_URL=https://github.com/cloudera/Impala/archive/$IMPALA_ZIP
18 | NATIVE=native
19 |
20 | # the full distro is in the $DIST dir or $DIST.tzg
21 | DIST=hdfs-mesos-$VERSION
22 | EXECUTOR=hdfs-mesos-executor-$VERSION
23 |
24 | # Remove cached binaries and exit
25 | if [ "$1" == "clean" ]; then
26 | rm -rf $BUILD_DIR
27 | $PROJ_DIR/gradlew clean
28 | exit 0
29 | fi
30 |
31 | # Build and package hdfs-mesos project
32 | if [ "$1" != "nocompile" ]; then
33 | $PROJ_DIR/gradlew clean shadowJar || exit
34 | fi
35 |
36 | # Download hadoop binary
37 | if [ ! -f $BUILD_CACHE_DIR/$HADOOP_ZIP ]; then
38 | echo "Downloading $HADOOP_URL"
39 | wget -P $BUILD_CACHE_DIR $HADOOP_URL || exit
40 | else
41 | echo "($HADOOP_ZIP already exists, skipping dl)"
42 | fi
43 |
44 | # Extract hadoop
45 | if [ ! -d $BUILD_CACHE_DIR/$HADOOP_DIR ]; then
46 | echo $BUILD_CACHE_DIR/$HADOOP_DIR
47 | echo "Extracting $HADOOP_ZIP in $BUILD_CACHE_DIR"
48 | cd $BUILD_CACHE_DIR
49 | tar xf $HADOOP_ZIP
50 | cd -
51 | else
52 | echo "($HADOOP_DIR already exists, skipping extract)"
53 | fi
54 |
55 | # Get native libraries
56 | if [ ! -d $BUILD_CACHE_DIR/$NATIVE ]; then
57 | echo "Downloading and unpacking native libs"
58 | wget -P $BUILD_CACHE_DIR $IMPALA_URL || exit
59 | cd $BUILD_CACHE_DIR
60 | unzip -q $IMPALA_VER.zip
61 | mkdir -p $BUILD_CACHE_DIR/$NATIVE
62 | cp $BUILD_CACHE_DIR/Impala-$IMPALA_VER/thirdparty/$HADOOP_DIR/lib/native/lib* $BUILD_CACHE_DIR/$NATIVE
63 | rm -rf $BUILD_CACHE_DIR/$IMPALA_VER* $BUILD_DIR/Impala*
64 | cd -
65 | else
66 | echo "($BUILD_DIR/$NATIVE libs already exist, skipping dl)"
67 | fi
68 |
69 | # Create dist
70 | if [ ! -d $BUILD_CACHE_DIR/$EXECUTOR ]; then
71 | echo "Creating new $BUILD_CACHE_DIR/$EXECUTOR dist folder"
72 | mkdir -p $BUILD_CACHE_DIR/$EXECUTOR
73 | else
74 | echo "($BUILD_CACHE_DIR/$EXECUTOR already exists, deleting before create)"
75 | rm -rf $BUILD_CACHE_DIR/$EXECUTOR
76 | mkdir -p $BUILD_CACHE_DIR/$EXECUTOR
77 | fi
78 |
79 | # Copy to dist
80 | echo "Copying required hadoop dependencies into $BUILD_DIR/$EXECUTOR"
81 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/bin $BUILD_CACHE_DIR/$EXECUTOR
82 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/etc $BUILD_CACHE_DIR/$EXECUTOR
83 | rm -rf $BUILD_CACHE_DIR/$EXECUTOR/etc/hadoop-mapreduce1 $BUILD_CACHE_DIR/$EXECUTOR/etc/hadoop-mapreduce1-pseudo $BUILD_CACHE_DIR/$EXECUTOR/etc/hadoop-mapreduce1-secure
84 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/libexec $BUILD_CACHE_DIR/$EXECUTOR
85 | mkdir -p $BUILD_CACHE_DIR/$EXECUTOR/share/hadoop/common
86 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/share/hadoop/common/hadoop-common-$HADOOP_VER.jar $BUILD_CACHE_DIR/$EXECUTOR/share/hadoop/common
87 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/share/hadoop/common/lib $BUILD_CACHE_DIR/$EXECUTOR/share/hadoop/common
88 | mkdir -p $BUILD_CACHE_DIR/$EXECUTOR/share/hadoop/hdfs
89 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/share/hadoop/hdfs/hadoop-hdfs-$HADOOP_VER.jar $BUILD_CACHE_DIR/$EXECUTOR/share/hadoop/hdfs
90 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/share/hadoop/hdfs/lib $BUILD_CACHE_DIR/$EXECUTOR/share/hadoop/hdfs
91 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/share/hadoop/hdfs/webapps $BUILD_CACHE_DIR/$EXECUTOR/share/hadoop/hdfs
92 |
93 | mkdir -p $BUILD_CACHE_DIR/$EXECUTOR/lib/native
94 | cp $BUILD_CACHE_DIR/$NATIVE/* $BUILD_CACHE_DIR/$EXECUTOR/lib/native
95 |
96 | echo "Copying build output into $BUILD_CACHE_DIR/$DIST"
97 | cd $BUILD_CACHE_DIR/$EXECUTOR
98 | cp $PROJ_DIR/bin/* bin/
99 | cp $PROJ_DIR/hdfs-executor/build/libs/*-uber.jar lib/
100 | cp $PROJ_DIR/conf/* etc/hadoop/
101 | cd -
102 |
103 | # Compress tarball
104 | echo "Compressing to $EXECUTOR.tgz"
105 | rm -f $BUILD_CACHE_DIR/$EXECUTOR.tgz
106 | cd $BUILD_CACHE_DIR
107 | tar czf $EXECUTOR.tgz $EXECUTOR
108 | cd -
109 |
110 | ##### Framework / scheduler build
111 |
112 | # Create Framework dir
113 | if [ ! -d $BUILD_DIR/$DIST ]; then
114 | echo "Creating new $BUILD_DIR/$DIST dist folder"
115 | mkdir -p $BUILD_DIR/$DIST
116 | else
117 | echo "($BUILD_DIR/$DIST already exists, deleting before create)"
118 | rm -rf $BUILD_DIR/$DIST
119 | mkdir -p $BUILD_DIR/$DIST
120 | fi
121 |
122 | # scheduler
123 | mkdir -p $BUILD_DIR/$DIST/bin
124 | mkdir -p $BUILD_DIR/$DIST/lib
125 | mkdir -p $BUILD_DIR/$DIST/etc/hadoop
126 |
127 | echo "Copying required hadoop dependencies into $BUILD_DIR/$DIST for the scheduler"
128 | cp $BUILD_CACHE_DIR/$HADOOP_DIR/bin/* $BUILD_DIR/$DIST/bin
129 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/etc/hadoop $BUILD_DIR/$DIST/etc
130 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/libexec $BUILD_DIR/$DIST
131 | mkdir -p $BUILD_DIR/$DIST/share/hadoop/common
132 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/share/hadoop/common/hadoop-common-$HADOOP_VER.jar $BUILD_DIR/$DIST/share/hadoop/common
133 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/share/hadoop/common/lib $BUILD_DIR/$DIST/share/hadoop/common
134 | mkdir -p $BUILD_DIR/$DIST/share/hadoop/hdfs
135 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/share/hadoop/hdfs/hadoop-hdfs-$HADOOP_VER.jar $BUILD_DIR/$DIST/share/hadoop/hdfs
136 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/share/hadoop/hdfs/lib $BUILD_DIR/$DIST/share/hadoop/hdfs
137 | cp -R $BUILD_CACHE_DIR/$HADOOP_DIR/share/hadoop/hdfs/webapps $BUILD_DIR/$DIST/share/hadoop/hdfs
138 |
139 | ## hdfs scheduler project needs
140 | cp $PROJ_DIR/bin/hdfs-mesos $BUILD_DIR/$DIST/bin
141 | cp $PROJ_DIR/hdfs-scheduler/build/libs/*-uber.jar $BUILD_DIR/$DIST/lib
142 | cp $BUILD_CACHE_DIR/$EXECUTOR.tgz $BUILD_DIR/$DIST
143 | cp $PROJ_DIR/conf/*.xml $BUILD_DIR/$DIST/etc/hadoop
144 |
145 | echo Adding read permissions to everything in and below $BUILD_DIR
146 | cd $BUILD_DIR
147 | chmod -R a+r .
148 |
149 | echo Creating $DIST.tgz while retaining permissions
150 | tar pczf $DIST.tgz $DIST
151 |
152 | echo "HDFS framework build complete: $BUILD_DIR/$DIST.tgz"
153 |
--------------------------------------------------------------------------------
/bin/hdfs-mesos:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ -z "$PORT0" ] ; then
4 | PORT0="8765"
5 | fi
6 |
7 | if [ -z "$JAVA_HOME" ]; then
8 | JAVA_CMD=$(readlink -f $(which java))
9 | if [ -z "$JAVA_CMD" ]; then
10 | echo “Error: java not found and JAVA_HOME not set”
11 | exit 1;
12 | fi
13 | else
14 | JAVA_CMD=$JAVA_HOME/bin/java
15 | fi
16 |
17 | exec $JAVA_CMD -cp lib/*.jar -Dmesos.conf.path=etc/hadoop/mesos-site.xml -Dmesos.hdfs.config.server.port=$PORT0 org.apache.mesos.hdfs.scheduler.Main
18 |
--------------------------------------------------------------------------------
/bin/hdfs-mesos-datanode:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
4 |
5 | if [ -z "$JAVA_HOME" ]; then
6 | JAVA_HOME_DIR=$(dirname $(readlink -f $(which java)))
7 | if [ -f $JAVA_HOME_DIR/../../bin/java ]; then
8 | export JAVA_HOME=$JAVA_HOME_DIR/../..
9 | elif [ -f $JAVA_HOME_DIR/../bin/java ]; then
10 | export JAVA_HOME=$JAVA_HOME_DIR/..
11 | else
12 | echo “Error: Could not determine JAVA_HOME”
13 | exit 1;
14 | fi
15 | fi
16 |
17 | trap "{ $DIR/mesos-killtree "$$" ; exit 0; }" EXIT
18 |
19 | function run_datanode() {
20 | while [ true ] ; do
21 | $DIR/hdfs datanode
22 | done
23 | }
24 |
25 | run_datanode
26 |
--------------------------------------------------------------------------------
/bin/hdfs-mesos-journalnode:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
4 |
5 | if [ -z "$JAVA_HOME" ]; then
6 | JAVA_HOME_DIR=$(dirname $(readlink -f $(which java)))
7 | if [ -f $JAVA_HOME_DIR/../../bin/java ]; then
8 | export JAVA_HOME=$JAVA_HOME_DIR/../..
9 | elif [ -f $JAVA_HOME_DIR/../bin/java ]; then
10 | export JAVA_HOME=$JAVA_HOME_DIR/..
11 | else
12 | echo “Error: Could not determine JAVA_HOME”
13 | exit 1;
14 | fi
15 | fi
16 |
17 | trap "{ $DIR/hdfs-mesos-killtree "$$" ; exit 0; }" EXIT
18 |
19 | function run_journalnode() {
20 | while [ true ] ; do
21 | $DIR/hdfs journalnode
22 | sleep 10
23 | done
24 | }
25 |
26 | run_journalnode
27 |
--------------------------------------------------------------------------------
/bin/hdfs-mesos-killtree:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | killtree() {
4 | local pid=$1
5 | local signal=$2
6 | if [ "$1" != "$$" ]; then
7 | kill -STOP ${child}
8 | for child in $(ps -o pid --no-headers --ppid ${pid}); do
9 | killtree ${child} ${signal}
10 | done
11 | kill -${signal} ${child}
12 | kill -CONT ${child}
13 | fi
14 | }
15 |
16 | begin() {
17 | local pid=$1
18 | for child in $(ps -o pid --no-headers --ppid ${pid}); do
19 | killtree ${child} "TERM"
20 | done
21 | sleep 5
22 | for child in $(ps -o pid --no-headers --ppid ${pid}); do
23 | killtree ${child} "KILL"
24 | done
25 | }
26 |
27 | begin $@
28 |
29 |
--------------------------------------------------------------------------------
/bin/hdfs-mesos-namenode:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo hdfs-mesos-namenode $1 $2 $3 $4 $5
3 |
4 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
5 |
6 | if [ -z "$JAVA_HOME" ]; then
7 | JAVA_HOME_DIR=$(dirname $(readlink -f $(which java)))
8 | if [ -f $JAVA_HOME_DIR/../../bin/java ]; then
9 | export JAVA_HOME=$JAVA_HOME_DIR/../..
10 | elif [ -f $JAVA_HOME_DIR/../bin/java ]; then
11 | export JAVA_HOME=$JAVA_HOME_DIR/..
12 | else
13 | echo “Error: Could not determine JAVA_HOME”
14 | exit 1;
15 | fi
16 | fi
17 |
18 | trap "{ $DIR/hdfs-mesos-killtree "$$" ; exit 0; }" EXIT
19 |
20 | function bootstrap_standby() {
21 | echo "Asked to bootstrap namenode"
22 | $DIR/hdfs zkfc -formatZK -force
23 | exec $DIR/hdfs namenode -bootstrapStandby -force
24 | }
25 |
26 | function format_namenode() {
27 | echo "Asked to format namenode"
28 | $DIR/hdfs zkfc -formatZK -force
29 | $DIR/hdfs namenode -format -force || exit 3
30 | }
31 |
32 | function initialize_shared_edits() {
33 | echo "Asked to initialize shared edits"
34 | exec $DIR/hdfs namenode -initializeSharedEdits -force
35 | }
36 |
37 | function run_namenode() {
38 | echo "Asked to run namenode"
39 | while [ true ] ; do
40 | $DIR/hdfs namenode
41 | sleep 10
42 | done
43 | }
44 |
45 | while getopts ":ibs" opt; do
46 | case $opt in
47 | i)
48 | format_namenode
49 | $DIR/hdfs-mesos-killtree "$$"
50 | exit 0
51 | ;;
52 | b)
53 | bootstrap_standby
54 | ;;
55 | s)
56 | initialize_shared_edits
57 | ;;
58 | \?)
59 | echo "Invalid option: -$OPTARG"
60 | exit 123
61 | ;;
62 | esac
63 | done
64 |
65 | run_namenode
66 |
--------------------------------------------------------------------------------
/bin/hdfs-mesos-zkfc:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
4 |
5 | if [ -z "$JAVA_HOME" ]; then
6 | JAVA_HOME_DIR=$(dirname $(readlink -f $(which java)))
7 | if [ -f $JAVA_HOME_DIR/../../bin/java ]; then
8 | export JAVA_HOME=$JAVA_HOME_DIR/../..
9 | elif [ -f $JAVA_HOME_DIR/../bin/java ]; then
10 | export JAVA_HOME=$JAVA_HOME_DIR/..
11 | else
12 | echo “Error: Could not determine JAVA_HOME”
13 | exit 1;
14 | fi
15 | fi
16 |
17 | trap "{ $DIR/hdfs-mesos-killtree "$$" ; exit 0; }" EXIT
18 |
19 | function run_zkfc() {
20 | while [ true ] ; do
21 | $DIR/hdfs zkfc
22 | sleep 10
23 | done
24 | }
25 |
26 | run_zkfc
27 |
--------------------------------------------------------------------------------
/build.gradle:
--------------------------------------------------------------------------------
1 | allprojects {
2 | apply plugin: 'idea'
3 |
4 | group = "com.apache.mesos.hdfs"
5 | version = "0.1.5"
6 | }
7 |
8 | idea {
9 | project {
10 | jdkName = '1.8'
11 | languageLevel = '1.7'
12 | ipr {
13 | withXml { provider ->
14 | provider.node.component
15 | .find { it.@name == 'VcsDirectoryMappings' }
16 | .mapping.@vcs = 'Git'
17 | }
18 | }
19 | }
20 | }
21 |
22 | subprojects {
23 |
24 | apply plugin: 'java'
25 | apply plugin: 'application'
26 | apply plugin: "jacoco"
27 |
28 | apply from: "$rootDir/gradle/quality.gradle"
29 | apply from: "$rootDir/gradle/spock.gradle"
30 |
31 | sourceCompatibility = '1.7'
32 | targetCompatibility = '1.7'
33 |
34 | [compileJava, compileTestJava]*.options*.encoding = 'UTF-8'
35 |
36 | mainClassName = "org.apache.mesos.hdfs.scheduler.Main"
37 |
38 | ext {
39 | curatorVer = "2.9.0"
40 | mesosVer = "0.24.1"
41 | hadoopVer = "2.5.0"
42 | slf4jVer = "1.7.10"
43 | logbackVer = "1.1.2"
44 | guiceVer = "3.0"
45 | junitVer = "4.11"
46 | mockitoVer = "1.9.5"
47 | }
48 |
49 | repositories {
50 | mavenLocal()
51 | mavenCentral()
52 | }
53 |
54 | dependencies {
55 |
56 | compile "org.apache.curator:curator-framework:${curatorVer}"
57 | compile "org.apache.curator:curator-recipes:${curatorVer}"
58 | compile "org.apache.mesos:mesos:${mesosVer}"
59 |
60 | compile "org.slf4j:log4j-over-slf4j:${slf4jVer}"
61 | compile "org.slf4j:jcl-over-slf4j:${slf4jVer}"
62 | compile "ch.qos.logback:logback-classic:${logbackVer}"
63 |
64 | compile("org.apache.hadoop:hadoop-common:${hadoopVer}") {
65 | exclude group: "log4j", module: "log4j"
66 | exclude group: "org.slf4j", module: "slf4j-log4j12"
67 | exclude group: "javax.servlet", module: "servlet-api"
68 | exclude group: "commons-httpclient", module: "commons-httpclient"
69 | exclude group: "net.java.dev.jets3t", module: "jets3t"
70 | }
71 |
72 | compile "com.google.inject:guice:${guiceVer}"
73 |
74 | }
75 |
76 | jacocoTestReport {
77 | reports {
78 | xml.enabled false
79 | csv.enabled false
80 | html.destination "${buildDir}/jacoco"
81 | }
82 | }
83 | }
84 |
--------------------------------------------------------------------------------
/conf/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 |
21 | dfs.ha.automatic-failover.enabled
22 | true
23 |
24 |
25 |
26 | dfs.nameservice.id
27 | ${frameworkName}
28 |
29 |
30 |
31 | dfs.nameservices
32 | ${frameworkName}
33 |
34 |
35 |
36 | dfs.ha.namenodes.${frameworkName}
37 | nn1,nn2
38 |
39 |
40 |
41 | dfs.namenode.rpc-address.${frameworkName}.nn1
42 | ${nn1Hostname}:50071
43 |
44 |
45 |
46 | dfs.namenode.http-address.${frameworkName}.nn1
47 | ${nn1Hostname}:50070
48 |
49 |
50 |
51 | dfs.namenode.rpc-address.${frameworkName}.nn2
52 | ${nn2Hostname}:50071
53 |
54 |
55 |
56 | dfs.namenode.http-address.${frameworkName}.nn2
57 | ${nn2Hostname}:50070
58 |
59 |
60 |
61 | dfs.client.failover.proxy.provider.${frameworkName}
62 | org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
63 |
64 |
65 |
66 | dfs.namenode.shared.edits.dir
67 | qjournal://${journalnodes}/${frameworkName}
68 |
69 |
70 |
71 | ha.zookeeper.quorum
72 | ${haZookeeperQuorum}
73 |
74 |
75 |
76 | dfs.journalnode.edits.dir
77 | ${dataDir}/jn
78 |
79 |
80 |
81 | dfs.namenode.name.dir
82 | ${dataDir}/name${if backupDir},${backupDir}${end}
83 |
84 |
85 |
86 | dfs.datanode.data.dir
87 | file://${dataDir}/data${if secondaryDataDir},file://${secondaryDataDir}/data${end}
88 |
89 |
90 |
91 | dfs.ha.fencing.methods
92 | shell(/bin/true)
93 |
94 |
95 |
96 | dfs.permissions
97 | false
98 |
99 |
100 |
101 | dfs.datanode.du.reserved
102 | 10485760
103 |
104 |
105 |
106 | dfs.datanode.balance.bandwidthPerSec
107 | 41943040
108 |
109 |
110 |
111 | dfs.namenode.safemode.threshold-pct
112 | 0.90
113 |
114 |
115 |
116 | dfs.namenode.heartbeat.recheck-interval
117 |
118 | 60000
119 |
120 |
121 |
122 | dfs.datanode.handler.count
123 | 10
124 |
125 |
126 |
127 | dfs.namenode.handler.count
128 | 20
129 |
130 |
131 |
132 | dfs.image.compress
133 | true
134 |
135 |
136 |
137 | dfs.image.compression.codec
138 | org.apache.hadoop.io.compress.SnappyCodec
139 |
140 |
141 |
142 | dfs.namenode.invalidate.work.pct.per.iteration
143 | 0.35f
144 |
145 |
146 |
147 | dfs.namenode.replication.work.multiplier.per.iteration
148 | 4
149 |
150 |
151 |
152 |
153 | dfs.namenode.datanode.registration.ip-hostname-check
154 | false
155 |
156 |
157 |
158 | dfs.client.read.shortcircuit
159 | true
160 |
161 |
162 |
163 | dfs.client.read.shortcircuit.streams.cache.size
164 | 1000
165 |
166 |
167 |
168 | dfs.client.read.shortcircuit.streams.cache.size.expiry.ms
169 | 1000
170 |
171 |
172 |
173 |
174 | dfs.domain.socket.path
175 | ${domainSocketDir}/dn._PORT
176 |
177 |
178 |
--------------------------------------------------------------------------------
/conf/mesos-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 |
21 | mesos.hdfs.data.dir
22 | The primary data directory in HDFS
23 | /var/lib/hdfs/data
24 |
25 |
26 |
33 |
34 |
35 | mesos.hdfs.domain.socket.dir
36 | The location used for a local socket used by the data nodes
37 | /var/run/hadoop-hdfs
38 |
39 |
40 |
47 |
48 |
49 | mesos.hdfs.native-hadoop-binaries
50 | Mark true if you have hadoop pre-installed on your host machines (otherwise it will be distributed by the scheduler)
51 | false
52 |
53 |
54 |
55 | mesos.hdfs.framework.mnt.path
56 | Mount location (if mesos.hdfs.native-hadoop-binaries is marked false)
57 | /opt/mesosphere
58 |
59 |
60 |
61 | mesos.hdfs.state.zk
62 | Comma-separated hostname-port pairs of zookeeper node locations for HDFS framework state information
63 | localhost:2181
64 |
65 |
66 |
67 | mesos.master.uri
68 | Zookeeper entry for mesos master location
69 | zk://localhost:2181/mesos
70 |
71 |
72 |
73 | mesos.hdfs.zkfc.ha.zookeeper.quorum
74 | Comma-separated list of zookeeper hostname-port pairs for HDFS HA features
75 | localhost:2181
76 |
77 |
78 |
79 | mesos.hdfs.framework.name
80 | Your Mesos framework name and cluster name when accessing files (hdfs://YOUR_NAME)
81 | hdfs
82 |
83 |
84 |
85 | mesos.hdfs.mesosdns
86 | Whether to use Mesos DNS for service discovery within HDFS
87 | false
88 |
89 |
90 |
91 | mesos.hdfs.mesosdns.domain
92 | Root domain name of Mesos DNS (usually 'mesos')
93 | mesos
94 |
95 |
96 |
97 | mesos.native.library
98 | Location of libmesos.so
99 | /usr/local/lib/libmesos.so
100 |
101 |
102 |
103 | mesos.hdfs.journalnode.count
104 | Number of journal nodes (must be odd)
105 | 3
106 |
107 |
108 |
109 |
173 |
174 |
--------------------------------------------------------------------------------
/config.md:
--------------------------------------------------------------------------------
1 | ## Configuration of HDFS framework and HDFS
2 |
3 | The configuration of HDFS and this framework are managed via:
4 |
5 | * hdfs-site.xml
6 | * mesos-site.xml
7 | * system env vars and properties
8 |
9 | The hdfs-site.xml file is used to configure the hdfs cluster. The values must match the configuration of the scheduler. For this
10 | reason the hdfs-site.xml is generally "fetched" or refreshed from the scheduler when a node is started. The normal configuration of
11 | the hdfs-site.xml has variables which are replaced by the scheduler when the xml file is fetched by the node. An example of these
12 | variables is `${frameworkName}`. The scheduler code that does the variable replacement is handled by ConfigServer.java. An
13 | example of this variable replacement is `model.put("frameworkName", hdfsFrameworkConfig.getFrameworkName());`
14 |
15 | It is possible to have the HDFS-mesos framework manage hdfs node instances on slaves that are previously provisioned with hdfs. Under scenario
16 | there is no way to update the `hdfs-site.xml` file. This is indicated by setting the property `mesos.hdfs.native-hadoop-binaries` == true in the `mesos-site.xml` file.
17 | This indicates that binaries exist on the nodes. Because the values in the `hdfs-site.xml` are not controlled by the HDFS-Mesos framework, it
18 | is important to make sure that all the xml files are consistent and the framework is started with property values which are consistent with the
19 | preexisting cluster.
20 |
21 | The mesos-site.xml file is used to configure the hdfs-mesos framework. We are working to deprecated this file. This general establishes
22 | values for the scheduler and in many cases these are passed to the executors. Although the configuration of the scheduler can be handled
23 | via XML configuration, we encourage the use of system environment variables for this purpose.
24 |
25 | ## Configuration Options
26 |
27 | * mesos.hdfs.framework.name - Used to define the framework name. This allows for 1) multi-deployments of hdfs and 2) has an impact on the dns name of the service. The default is "hdfs".
28 | * mesos.hdfs.user - Used to define the user to use for the scheduler and executor processes. The default is root.
29 | * mesos.hdfs.role - Used to determine the mesos role this framework will use. The default is "*".
30 | * mesos.hdfs.mesosdns - true if mesos-dns is used. The default is false.
31 | * mesos.hdfs.mesosdns.domain - When using mesos-dns, this value is the suffix used by mesos-dns. The default is "mesos".
32 | * mesos.native.library - The location of libmesos library. The default is "/usr/local/lib/libmesos.so"
33 | * mesos.hdfs.journalnode.count - The number of journal nodes the scheduler will maintain. The default is 3.
34 | * mesos.hdfs.data.dir - The location to store data on the slaves. The default is "/var/lib/hdfs/data".
35 | * mesos.hdfs.domain.socket.dir - The location used for a local socket used by the data nodes. The default is "/var/run/hadoop-hdfs".
36 | * mesos.hdfs.backup.dir - The location to replicated data to as a backup. The default is blank.
37 | * mesos.hdfs.native-hadoop-binaries - This is true if hdfs is pre-installed on the slaves. This will result in no distribution of binaries to the slaves. It will also mean that no xml configure refresh will be provided to the slaves. The default is false.
38 | * mesos.hdfs.framework.mnt.path - If native-hadoop-binaries == false, this is the location a symlink will be provided to execute hdfs commands on the slave. The default is "/opt/mesosphere"
39 | * mesos.hdfs.state.zk - The zookeeper that the scheduler will use to store state. The default is "localhost:2181"
40 | * mesos.master.uri - The zookeeper or mesos-master url that will be used to discover the mesos-master for scheduler registration. The default is "localhost:2181"
41 | * mesos.hdfs.zkfc.ha.zookeeper.quorum - The zookeeper that HDFS (not the framework) will use for HA mode. The default is "localhost:2181"
42 |
43 | There are additional configurations for executor jvm and resource management of the nodes.
44 |
45 | ## System Environment Variables
46 |
47 | All of the configuration flags previously defined can be overriden with system environment variables. The format to use to override a variable is to
48 | upper case the string and replace dots (".") with underscores ("_"). For example, to override the `mesos.hdfs.framework.name`, the value is `MESOS_HDFS_FRAMEWORK_NAME=unicorn`.
49 | To use this value, export the value, then start the scheduler. If a value is overridden by the system environment variable it will be propagated to
50 | the executors.
51 |
52 | ## Custom Configurations
53 |
54 | ### Mesos-DNS custom configuration
55 | You can see an example configuration in the `example-conf/dcos` directory. Since Mesos-DNS provides native bindings for master detection, we can simply use those names in our mesos and hdfs configurations. The example configuration assumes your Mesos masters and your zookeeper nodes are colocated. If they aren't you'll need to specify your zookeeper nodes separately. Also, note that if you are using the example in `example-conf/dcos`, the `mesos.hdfs.native-hadoop-binaries` property needs to be set to `false` if your HDFS binaries are not predistributed.
56 |
57 | ### If you have Hadoop pre-installed in your cluster
58 | If you have Hadoop installed across your cluster, you don't need the Mesos scheduler application to distribute the binaries. You can set the `mesos.hdfs.native-hadoop-binaries` configuration parameter in `mesos-site.xml` if you don't want the binaries distributed.
59 |
60 |
--------------------------------------------------------------------------------
/example-conf/mesosphere-dcos/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 |
21 | fs.default.name
22 | hdfs://hdfs
23 |
24 |
25 | hadoop.proxyuser.hue.hosts
26 | *
27 |
28 |
29 | hadoop.proxyuser.hue.groups
30 | *
31 |
32 |
33 | hadoop.proxyuser.root.hosts
34 | *
35 |
36 |
37 | hadoop.proxyuser.root.groups
38 | *
39 |
40 |
41 | hadoop.proxyuser.httpfs.hosts
42 | *
43 |
44 |
45 | hadoop.proxyuser.httpfs.groups
46 | *
47 |
48 |
49 |
--------------------------------------------------------------------------------
/example-conf/mesosphere-dcos/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 |
21 | dfs.ha.automatic-failover.enabled
22 | true
23 |
24 |
25 |
26 | dfs.nameservice.id
27 | hdfs
28 |
29 |
30 |
31 | dfs.nameservices
32 | hdfs
33 |
34 |
35 |
36 | dfs.ha.namenodes.hdfs
37 | nn1,nn2
38 |
39 |
40 |
41 | dfs.namenode.rpc-address.hdfs.nn1
42 | namenode1.hdfs.mesos:50071
43 |
44 |
45 |
46 | dfs.namenode.http-address.hdfs.nn1
47 | namenode1.hdfs.mesos:50070
48 |
49 |
50 |
51 | dfs.namenode.rpc-address.hdfs.nn2
52 | namenode2.hdfs.mesos:50071
53 |
54 |
55 |
56 | dfs.namenode.http-address.hdfs.nn2
57 | namenode2.hdfs.mesos:50070
58 |
59 |
60 |
61 | dfs.client.failover.proxy.provider.hdfs
62 | org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
63 |
64 |
65 |
66 | dfs.namenode.shared.edits.dir
67 | qjournal://journalnode1.hdfs.mesos:8485;journalnode2.hdfs.mesos:8485;journalnode3.hdfs.mesos:8485/hdfs
68 |
69 |
70 |
71 | ha.zookeeper.quorum
72 | master.mesos:2181
73 |
74 |
75 |
76 | dfs.journalnode.edits.dir
77 | /var/lib/hdfs/data/jn
78 |
79 |
80 |
81 | dfs.namenode.name.dir
82 | file:///var/lib/hdfs/data/name
83 |
84 |
85 |
86 | dfs.datanode.data.dir
87 | file:///var/lib/hdfs/data/data
88 |
89 |
90 |
91 | dfs.ha.fencing.methods
92 | shell(/bin/true)
93 |
94 |
95 |
96 | dfs.permissions
97 | false
98 |
99 |
100 |
101 | dfs.datanode.du.reserved
102 | 10485760
103 |
104 |
105 |
106 | dfs.datanode.balance.bandwidthPerSec
107 | 41943040
108 |
109 |
110 |
111 | dfs.namenode.safemode.threshold-pct
112 | 0.90
113 |
114 |
115 |
116 | dfs.namenode.heartbeat.recheck-interval
117 |
118 | 60000
119 |
120 |
121 |
122 | dfs.datanode.handler.count
123 | 10
124 |
125 |
126 |
127 | dfs.namenode.handler.count
128 | 20
129 |
130 |
131 |
132 | dfs.image.compress
133 | true
134 |
135 |
136 |
137 | dfs.image.compression.codec
138 | org.apache.hadoop.io.compress.SnappyCodec
139 |
140 |
141 |
142 | dfs.namenode.invalidate.work.pct.per.iteration
143 | 0.35f
144 |
145 |
146 |
147 | dfs.namenode.replication.work.multiplier.per.iteration
148 | 4
149 |
150 |
151 |
152 |
153 | dfs.namenode.datanode.registration.ip-hostname-check
154 | false
155 |
156 |
157 |
158 | dfs.client.read.shortcircuit
159 | true
160 |
161 |
162 |
163 | dfs.client.read.shortcircuit.streams.cache.size
164 | 1000
165 |
166 |
167 |
168 | dfs.client.read.shortcircuit.streams.cache.size.expiry.ms
169 | 1000
170 |
171 |
172 |
173 |
174 | dfs.domain.socket.path
175 | /var/run/hadoop-hdfs/dn._PORT
176 |
177 |
178 |
--------------------------------------------------------------------------------
/example-conf/mesosphere-dcos/mesos-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 |
21 |
22 | mesos.hdfs.namenode.cpus
23 | 0.25
24 |
25 |
26 |
27 | mesos.hdfs.datanode.cpus
28 | 0.25
29 |
30 |
31 |
32 | mesos.hdfs.journalnode.cpus
33 | 0.25
34 |
35 |
36 |
37 | mesos.hdfs.executor.cpus
38 | 0.1
39 |
40 |
41 |
42 | mesos.hdfs.data.dir
43 | The primary data directory in HDFS
44 | /var/lib/hdfs/data
45 |
46 |
47 |
54 |
55 |
56 | mesos.hdfs.framework.mnt.path
57 | /opt/mesosphere
58 | This is the default for all DCOS installs
59 |
60 |
61 |
62 | mesos.hdfs.state.zk
63 | master.mesos:2181
64 | See the Mesos DNS config file for explanation for this
65 |
66 |
67 |
68 | mesos.master.uri
69 | zk://master.mesos:2181/mesos
70 | See the Mesos DNS config file for explanation for this
71 |
72 |
73 |
74 | mesos.hdfs.zkfc.ha.zookeeper.quorum
75 | master.mesos:2181
76 | See the Mesos DNS config file for explanation for this
77 |
78 |
79 |
80 | mesos.hdfs.mesosdns
81 | true
82 | All DCOS installs come with mesos DNS to maintain static configurations
83 |
84 |
85 |
86 | mesos.hdfs.native-hadoop-binaries
87 | true
88 | DCOS comes with pre-distributed HDFS binaries in a single-tenant environment
89 |
90 |
91 |
92 | mesos.native.library
93 | /opt/mesosphere/lib/libmesos.so
94 |
95 |
96 |
97 | mesos.hdfs.ld-library-path
98 | /opt/mesosphere/lib
99 |
100 |
101 |
--------------------------------------------------------------------------------
/gradle.properties:
--------------------------------------------------------------------------------
1 | org.gradle.parallel=true
2 | org.gradle.jvmargs=-Xmx2048m -XX:MaxPermSize=256m -XX:+HeapDumpOnOutOfMemoryError -Dfile.encoding=UTF-8
3 |
4 | # faster builds: gradle build -x findBugsM
5 |
--------------------------------------------------------------------------------
/gradle/findbugs/excludeFilter.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/gradle/quality.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'findbugs'
2 | apply plugin: 'checkstyle'
3 | apply plugin: 'pmd'
4 | apply plugin: "jacoco"
5 |
6 |
7 | tasks.withType(FindBugs) {
8 | excludeFilter = file("$rootProject.projectDir/gradle/findbugs/excludeFilter.xml")
9 | maxHeapSize = '1024m'
10 | }
11 |
12 | tasks.withType(GroovyCompile) {
13 | configure(groovyOptions.forkOptions) {
14 | memoryMaximumSize = '1g'
15 | jvmArgs = ['-XX:MaxPermSize=512m', '-Xms512m', '-Xmx1g']
16 | }
17 | }
18 |
19 | checkstyle {
20 | configFile = file("$rootProject.projectDir/gradle/checkstyle/checkstyle.xml")
21 | sourceSets = [sourceSets.main] // disable style checks on tests
22 | }
23 |
24 | pmd {
25 | ruleSets = [
26 | 'java-basic',
27 | 'java-braces',
28 | 'java-clone',
29 | 'java-finalizers',
30 | 'java-imports'
31 | ]
32 | }
33 |
34 | ext {
35 | findbugsAnnotateVer = "1.3.2-201002241900"
36 | junitVer = "4.11"
37 | mockitoVer = "1.9.5"
38 | }
39 |
40 | dependencies {
41 |
42 | compile "com.kenai.nbpwr:edu-umd-cs-findbugs-annotations:${findbugsAnnotateVer}"
43 | testCompile "junit:junit:${junitVer}"
44 | testCompile "org.mockito:mockito-all:${mockitoVer}"
45 | }
46 |
--------------------------------------------------------------------------------
/gradle/spock.gradle:
--------------------------------------------------------------------------------
1 | // used for unit tests
2 | apply plugin: 'groovy'
3 |
4 | def spockVersion = '1.0-groovy-2.4'
5 | def powermockVersion = "1.6.1"
6 |
7 | dependencies {
8 |
9 | testCompile "org.codehaus.groovy:groovy-all:2.4.1"
10 | testCompile "org.spockframework:spock-core:$spockVersion"
11 |
12 | testCompile 'cglib:cglib-nodep:2.2.2' // need to mock classes
13 |
14 | // useful to mock out statics and final classes in Java.
15 | testCompile "org.powermock:powermock-module-junit4:$powermockVersion"
16 | testCompile "org.powermock:powermock-module-junit4-rule:$powermockVersion"
17 | testCompile "org.powermock:powermock-classloading-xstream:$powermockVersion"
18 | testCompile "org.powermock:powermock-api-mockito:$powermockVersion"
19 | }
20 |
21 | // for spock to live in test java tree
22 | sourceSets {
23 | test {
24 | groovy { srcDir 'src/test/java' }
25 | }
26 | }
27 |
28 |
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mesosphere-backup/hdfs-deprecated/c3e21df3578d9c6f6890a44a1a0e92cebb95f826/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Mon Nov 09 12:37:10 CST 2015
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-2.8-bin.zip
7 |
--------------------------------------------------------------------------------
/gradlew:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ##############################################################################
4 | ##
5 | ## Gradle start up script for UN*X
6 | ##
7 | ##############################################################################
8 |
9 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
10 | DEFAULT_JVM_OPTS=""
11 |
12 | APP_NAME="Gradle"
13 | APP_BASE_NAME=`basename "$0"`
14 |
15 | # Use the maximum available, or set MAX_FD != -1 to use that value.
16 | MAX_FD="maximum"
17 |
18 | warn ( ) {
19 | echo "$*"
20 | }
21 |
22 | die ( ) {
23 | echo
24 | echo "$*"
25 | echo
26 | exit 1
27 | }
28 |
29 | # OS specific support (must be 'true' or 'false').
30 | cygwin=false
31 | msys=false
32 | darwin=false
33 | case "`uname`" in
34 | CYGWIN* )
35 | cygwin=true
36 | ;;
37 | Darwin* )
38 | darwin=true
39 | ;;
40 | MINGW* )
41 | msys=true
42 | ;;
43 | esac
44 |
45 | # Attempt to set APP_HOME
46 | # Resolve links: $0 may be a link
47 | PRG="$0"
48 | # Need this for relative symlinks.
49 | while [ -h "$PRG" ] ; do
50 | ls=`ls -ld "$PRG"`
51 | link=`expr "$ls" : '.*-> \(.*\)$'`
52 | if expr "$link" : '/.*' > /dev/null; then
53 | PRG="$link"
54 | else
55 | PRG=`dirname "$PRG"`"/$link"
56 | fi
57 | done
58 | SAVED="`pwd`"
59 | cd "`dirname \"$PRG\"`/" >/dev/null
60 | APP_HOME="`pwd -P`"
61 | cd "$SAVED" >/dev/null
62 |
63 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
64 |
65 | # Determine the Java command to use to start the JVM.
66 | if [ -n "$JAVA_HOME" ] ; then
67 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
68 | # IBM's JDK on AIX uses strange locations for the executables
69 | JAVACMD="$JAVA_HOME/jre/sh/java"
70 | else
71 | JAVACMD="$JAVA_HOME/bin/java"
72 | fi
73 | if [ ! -x "$JAVACMD" ] ; then
74 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
75 |
76 | Please set the JAVA_HOME variable in your environment to match the
77 | location of your Java installation."
78 | fi
79 | else
80 | JAVACMD="java"
81 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
82 |
83 | Please set the JAVA_HOME variable in your environment to match the
84 | location of your Java installation."
85 | fi
86 |
87 | # Increase the maximum file descriptors if we can.
88 | if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
89 | MAX_FD_LIMIT=`ulimit -H -n`
90 | if [ $? -eq 0 ] ; then
91 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
92 | MAX_FD="$MAX_FD_LIMIT"
93 | fi
94 | ulimit -n $MAX_FD
95 | if [ $? -ne 0 ] ; then
96 | warn "Could not set maximum file descriptor limit: $MAX_FD"
97 | fi
98 | else
99 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
100 | fi
101 | fi
102 |
103 | # For Darwin, add options to specify how the application appears in the dock
104 | if $darwin; then
105 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
106 | fi
107 |
108 | # For Cygwin, switch paths to Windows format before running java
109 | if $cygwin ; then
110 | APP_HOME=`cygpath --path --mixed "$APP_HOME"`
111 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
112 | JAVACMD=`cygpath --unix "$JAVACMD"`
113 |
114 | # We build the pattern for arguments to be converted via cygpath
115 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
116 | SEP=""
117 | for dir in $ROOTDIRSRAW ; do
118 | ROOTDIRS="$ROOTDIRS$SEP$dir"
119 | SEP="|"
120 | done
121 | OURCYGPATTERN="(^($ROOTDIRS))"
122 | # Add a user-defined pattern to the cygpath arguments
123 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then
124 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
125 | fi
126 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
127 | i=0
128 | for arg in "$@" ; do
129 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
130 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
131 |
132 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
133 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
134 | else
135 | eval `echo args$i`="\"$arg\""
136 | fi
137 | i=$((i+1))
138 | done
139 | case $i in
140 | (0) set -- ;;
141 | (1) set -- "$args0" ;;
142 | (2) set -- "$args0" "$args1" ;;
143 | (3) set -- "$args0" "$args1" "$args2" ;;
144 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
145 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
146 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
147 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
148 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
149 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
150 | esac
151 | fi
152 |
153 | # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
154 | function splitJvmOpts() {
155 | JVM_OPTS=("$@")
156 | }
157 | eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
158 | JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
159 |
160 | exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"
161 |
--------------------------------------------------------------------------------
/gradlew.bat:
--------------------------------------------------------------------------------
1 | @if "%DEBUG%" == "" @echo off
2 | @rem ##########################################################################
3 | @rem
4 | @rem Gradle startup script for Windows
5 | @rem
6 | @rem ##########################################################################
7 |
8 | @rem Set local scope for the variables with windows NT shell
9 | if "%OS%"=="Windows_NT" setlocal
10 |
11 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
12 | set DEFAULT_JVM_OPTS=
13 |
14 | set DIRNAME=%~dp0
15 | if "%DIRNAME%" == "" set DIRNAME=.
16 | set APP_BASE_NAME=%~n0
17 | set APP_HOME=%DIRNAME%
18 |
19 | @rem Find java.exe
20 | if defined JAVA_HOME goto findJavaFromJavaHome
21 |
22 | set JAVA_EXE=java.exe
23 | %JAVA_EXE% -version >NUL 2>&1
24 | if "%ERRORLEVEL%" == "0" goto init
25 |
26 | echo.
27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 | echo.
29 | echo Please set the JAVA_HOME variable in your environment to match the
30 | echo location of your Java installation.
31 |
32 | goto fail
33 |
34 | :findJavaFromJavaHome
35 | set JAVA_HOME=%JAVA_HOME:"=%
36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 |
38 | if exist "%JAVA_EXE%" goto init
39 |
40 | echo.
41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 | echo.
43 | echo Please set the JAVA_HOME variable in your environment to match the
44 | echo location of your Java installation.
45 |
46 | goto fail
47 |
48 | :init
49 | @rem Get command-line arguments, handling Windowz variants
50 |
51 | if not "%OS%" == "Windows_NT" goto win9xME_args
52 | if "%@eval[2+2]" == "4" goto 4NT_args
53 |
54 | :win9xME_args
55 | @rem Slurp the command line arguments.
56 | set CMD_LINE_ARGS=
57 | set _SKIP=2
58 |
59 | :win9xME_args_slurp
60 | if "x%~1" == "x" goto execute
61 |
62 | set CMD_LINE_ARGS=%*
63 | goto execute
64 |
65 | :4NT_args
66 | @rem Get arguments from the 4NT Shell from JP Software
67 | set CMD_LINE_ARGS=%$
68 |
69 | :execute
70 | @rem Setup the command line
71 |
72 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
73 |
74 | @rem Execute Gradle
75 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
76 |
77 | :end
78 | @rem End local scope for the variables with windows NT shell
79 | if "%ERRORLEVEL%"=="0" goto mainEnd
80 |
81 | :fail
82 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
83 | rem the _cmd.exe /c_ return code!
84 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
85 | exit /b 1
86 |
87 | :mainEnd
88 | if "%OS%"=="Windows_NT" endlocal
89 |
90 | :omega
91 |
--------------------------------------------------------------------------------
/hdfs-commons/build.gradle:
--------------------------------------------------------------------------------
1 | dependencies {
2 | compile project(':mesos-commons')
3 | }
--------------------------------------------------------------------------------
/hdfs-commons/src/main/java/org/apache/mesos/hdfs/config/ConfigurationException.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.config;
2 |
3 | /**
4 | * Indicates an exception or poor request for configuration.
5 | */
6 | public class ConfigurationException extends RuntimeException {
7 |
8 | public ConfigurationException(Throwable cause) {
9 | super(cause);
10 | }
11 |
12 | public ConfigurationException(String message) {
13 | super(message);
14 | }
15 |
16 | public ConfigurationException(String message, Throwable cause) {
17 | super(message, cause);
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/hdfs-commons/src/main/java/org/apache/mesos/hdfs/config/NodeConfig.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.config;
2 |
3 | /**
4 | */
5 | public class NodeConfig {
6 | private String type;
7 | private int maxHeap;
8 | private double cpus;
9 | private int port;
10 |
11 | public double getCpus() {
12 | return cpus;
13 | }
14 |
15 | public void setCpus(double cpus) {
16 | this.cpus = cpus;
17 | }
18 |
19 | public int getMaxHeap() {
20 | return maxHeap;
21 | }
22 |
23 | public void setMaxHeap(int maxHeap) {
24 | this.maxHeap = maxHeap;
25 | }
26 |
27 | public int getPort() {
28 | return port;
29 | }
30 |
31 | public void setPort(int port) {
32 | this.port = port;
33 | }
34 |
35 | public String getType() {
36 | return type;
37 | }
38 |
39 | public void setType(String type) {
40 | this.type = type;
41 | }
42 |
43 | @Override
44 | public String toString() {
45 | return "NodeConfig{" +
46 | "cpus=" + cpus +
47 | ", type='" + type + '\'' +
48 | ", maxHeap=" + maxHeap +
49 | ", port=" + port +
50 | '}';
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/hdfs-commons/src/main/java/org/apache/mesos/hdfs/util/HDFSConstants.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.util;
2 |
3 | /**
4 | * Constants for HDFS.
5 | */
6 | public final class HDFSConstants {
7 |
8 | private HDFSConstants() {
9 | }
10 |
11 | // Total number of NameNodes
12 | // Note: We do not currently support more or less than 2 NameNodes
13 | public static final Integer TOTAL_NAME_NODES = 2;
14 | public static final Integer MILLIS_FROM_SECONDS = 1000;
15 |
16 | // Messages
17 | public static final String NAME_NODE_INIT_MESSAGE = "-i";
18 | public static final String NAME_NODE_BOOTSTRAP_MESSAGE = "-b";
19 | public static final String JOURNAL_NODE_INIT_MESSAGE = "-s";
20 | public static final String RELOAD_CONFIG = "reload config";
21 |
22 | // NodeIds
23 | public static final String NAME_NODE_ID = "namenode";
24 | public static final String JOURNAL_NODE_ID = "journalnode";
25 | public static final String DATA_NODE_ID = "datanode";
26 | public static final String ZKFC_NODE_ID = "zkfc";
27 |
28 | // NameNode TaskId
29 | public static final String NAME_NODE_TASKID = ".namenode.namenode.";
30 |
31 | // ExecutorsIds
32 | public static final String NODE_EXECUTOR_ID = "NodeExecutor";
33 | public static final String NAME_NODE_EXECUTOR_ID = "NameNodeExecutor";
34 |
35 | // Path to Store HDFS Binary
36 | public static final String HDFS_BINARY_DIR = "hdfs";
37 |
38 | // Current HDFS Binary File Name
39 | public static final String HDFS_BINARY_FILE_NAME = "hdfs-mesos-executor-0.1.5.tgz";
40 |
41 | // HDFS Config File Name
42 | public static final String HDFS_CONFIG_FILE_NAME = "hdfs-site.xml";
43 |
44 | // Listening Ports
45 | public static final Integer DATA_NODE_PORT = 50075;
46 | public static final Integer JOURNAL_NODE_PORT = 8480;
47 | public static final Integer ZKFC_NODE_PORT = 8019;
48 | public static final Integer NAME_NODE_PORT = 50070;
49 |
50 | // Exit codes
51 | public static final Integer PROC_EXIT_CODE = 1;
52 | public static final Integer RELOAD_EXIT_CODE = 2;
53 | public static final Integer NAMENODE_EXIT_CODE = 3;
54 | public static final Integer RECONCILE_EXIT_CODE = 4;
55 |
56 | // NameNode initialization constants
57 | public static final String ZK_FRAMEWORK_ID_KEY = "FrameworkId";
58 | public static final Integer ZK_MUTEX_ACQUIRE_TIMEOUT_SEC = 30;
59 | public static final Integer CURATOR_MAX_RETRIES = 3;
60 |
61 | public static final String NAMENODE_NUM_PARAM = "nn";
62 |
63 | public static final String NN_STATUS_KEY = "status";
64 | public static final String NN_STATUS_INIT_VAL = "initialized";
65 | public static final String NN_STATUS_UNINIT_VAL = "uninitialized";
66 | public static final String NN_STATUS_FORMATTED_VAL = "formatted";
67 | public static final String NN_STATUS_BOOTSTRAPPED_VAL = "bootstrapped";
68 |
69 | public static final String PROPERTY_VAR_PREFIX = "MESOS_HDFS_";
70 |
71 | public static final Integer POLL_DELAY_MS = 1000;
72 | }
73 |
--------------------------------------------------------------------------------
/hdfs-commons/src/main/java/org/apache/mesos/hdfs/util/TaskStatusFactory.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.util;
2 |
3 | import org.apache.mesos.Protos.TaskID;
4 | import org.apache.mesos.Protos.TaskState;
5 | import org.apache.mesos.Protos.TaskStatus;
6 | import org.apache.mesos.protobuf.TaskStatusBuilder;
7 |
8 | /**
9 | * Class to generate TaskStatus messages.
10 | */
11 | public class TaskStatusFactory {
12 | public static TaskStatus createNameNodeStatus(TaskID taskId, boolean initialized) {
13 | String initStatus = getNameNodeInitStatus(initialized);
14 |
15 | return new TaskStatusBuilder()
16 | .setTaskId(taskId)
17 | .setState(TaskState.TASK_RUNNING)
18 | .addLabel(HDFSConstants.NN_STATUS_KEY, initStatus)
19 | .build();
20 | }
21 |
22 | public static TaskStatus createRunningStatus(TaskID taskId) {
23 | return new TaskStatusBuilder()
24 | .setTaskId(taskId)
25 | .setState(TaskState.TASK_RUNNING)
26 | .build();
27 | }
28 |
29 | public static TaskStatus createKilledStatus(TaskID taskId) {
30 | return new TaskStatusBuilder()
31 | .setTaskId(taskId.getValue())
32 | .setState(TaskState.TASK_KILLED)
33 | .build();
34 | }
35 |
36 | private static String getNameNodeInitStatus(boolean initialized) {
37 | if (initialized) {
38 | return HDFSConstants.NN_STATUS_INIT_VAL;
39 | } else {
40 | return HDFSConstants.NN_STATUS_UNINIT_VAL;
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/hdfs-executor/build.gradle:
--------------------------------------------------------------------------------
1 | plugins {
2 | id 'com.github.johnrengelman.shadow' version '1.2.2'
3 | }
4 |
5 | dependencies {
6 | compile project(':mesos-commons')
7 | compile project(':hdfs-commons')
8 |
9 | }
10 |
11 |
12 | shadowJar {
13 | classifier = "uber"
14 |
15 | mergeServiceFiles()
16 |
17 | exclude 'META-INF/*.SF'
18 | exclude 'META-INF/*.DSA'
19 | exclude 'META-INF/*.RSA'
20 |
21 | dependencies {
22 | exclude(dependency("commons-logging:commons-logging"))
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/hdfs-executor/src/main/java/org/apache/mesos/hdfs/executor/ExecutorException.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.executor;
2 |
3 | /**
4 | * A invalid condition exist within the executor.
5 | */
6 | public class ExecutorException extends RuntimeException {
7 |
8 | public ExecutorException(String message) {
9 | super(message);
10 | }
11 |
12 | public ExecutorException(Throwable cause) {
13 | super(cause);
14 | }
15 |
16 | public ExecutorException(String message, Throwable cause) {
17 | super(message, cause);
18 | }
19 |
20 | }
21 |
--------------------------------------------------------------------------------
/hdfs-executor/src/main/java/org/apache/mesos/hdfs/executor/HdfsProcessExitHandler.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.executor;
2 |
3 | import org.apache.mesos.process.FailureUtils;
4 | import org.apache.mesos.hdfs.util.HDFSConstants;
5 | import org.apache.mesos.process.ProcessFailureHandler;
6 |
7 | /**
8 | * When a process fails this handler will exit the JVM.
9 | */
10 | public class HdfsProcessExitHandler implements ProcessFailureHandler {
11 | public void handle() {
12 | FailureUtils.exit("Task Process Failed", HDFSConstants.PROC_EXIT_CODE);
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/hdfs-executor/src/main/java/org/apache/mesos/hdfs/executor/NodeExecutor.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.executor;
2 |
3 | import com.google.inject.Guice;
4 | import com.google.inject.Inject;
5 | import com.google.inject.Injector;
6 | import org.apache.commons.logging.Log;
7 | import org.apache.commons.logging.LogFactory;
8 | import org.apache.mesos.ExecutorDriver;
9 | import org.apache.mesos.MesosExecutorDriver;
10 | import org.apache.mesos.Protos.Status;
11 | import org.apache.mesos.Protos.TaskID;
12 | import org.apache.mesos.Protos.TaskInfo;
13 | import org.apache.mesos.Protos.TaskState;
14 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
15 | import org.apache.mesos.process.FailureUtils;
16 | import org.apache.mesos.protobuf.TaskStatusBuilder;
17 |
18 | /**
19 | * The executor for a Basic Node (either a Journal Node or Data Node).
20 | */
21 | public class NodeExecutor extends AbstractNodeExecutor {
22 | private final Log log = LogFactory.getLog(NodeExecutor.class);
23 | private Task task;
24 |
25 | /**
26 | * The constructor for the node which saves the configuration.
27 | */
28 | @Inject
29 | NodeExecutor(HdfsFrameworkConfig config) {
30 | super(config);
31 | }
32 |
33 | /**
34 | * Main method for executor, which injects the configuration and state and starts the driver.
35 | */
36 | public static void main(String[] args) {
37 | Injector injector = Guice.createInjector();
38 |
39 | final NodeExecutor executor = injector.getInstance(NodeExecutor.class);
40 | MesosExecutorDriver driver = new MesosExecutorDriver(executor);
41 | Runtime.getRuntime().addShutdownHook(new Thread(new TaskShutdownHook(executor, driver)));
42 | FailureUtils.exit("mesos driver exited", driver.run() == Status.DRIVER_STOPPED ? 0 : 1);
43 | }
44 |
45 | /**
46 | * Add tasks to the task list and then start the tasks.
47 | */
48 | @Override
49 | public void launchTask(final ExecutorDriver driver, final TaskInfo taskInfo) {
50 | executorInfo = taskInfo.getExecutor();
51 | task = new Task(taskInfo);
52 | startProcess(driver, task);
53 | driver.sendStatusUpdate(TaskStatusBuilder.newBuilder()
54 | .setTaskId(taskInfo.getTaskId())
55 | .setState(TaskState.TASK_RUNNING)
56 | .setData(taskInfo.getData()).build());
57 | }
58 |
59 | @Override
60 | public void killTask(ExecutorDriver driver, TaskID taskId) {
61 | log.info("Killing task : " + taskId.getValue());
62 | if (task.getProcess() != null && taskId.equals(task.getTaskInfo().getTaskId())) {
63 | task.getProcess().destroy();
64 | task.setProcess(null);
65 | }
66 | driver.sendStatusUpdate(TaskStatusBuilder.newBuilder()
67 | .setTaskId(taskId)
68 | .setState(TaskState.TASK_KILLED)
69 | .build());
70 | }
71 |
72 | @Override
73 | public void shutdown(ExecutorDriver d) {
74 | // TODO(elingg) let's shut down the driver more gracefully
75 | log.info("Executor asked to shutdown");
76 | if (task != null) {
77 | killTask(d, task.getTaskInfo().getTaskId());
78 | }
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/hdfs-executor/src/main/java/org/apache/mesos/hdfs/executor/NodeHealthChecker.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.executor;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 | import org.apache.mesos.hdfs.util.HDFSConstants;
6 | import org.apache.mesos.stream.StreamUtil;
7 |
8 | import java.io.IOException;
9 | import java.net.InetAddress;
10 | import java.net.InetSocketAddress;
11 | import java.net.Socket;
12 |
13 | /**
14 | * The Task class for use within the executor.
15 | */
16 | public class NodeHealthChecker {
17 |
18 | private final Log log = LogFactory.getLog(NodeHealthChecker.class);
19 |
20 | public NodeHealthChecker() {
21 | }
22 |
23 | public boolean runHealthCheckForTask(Task task) {
24 | String taskIdStr = task.getTaskInfo().getTaskId().getValue();
25 | int healthCheckPort = getHealthCheckPort(taskIdStr);
26 | boolean taskHealthy = false;
27 |
28 | if (healthCheckPort != -1) {
29 | String healthCheckErrStr = "Error in node health check: ";
30 | String addressInUseStr = "Address already in use";
31 | Socket socket = null;
32 | try {
33 | // TODO (elingg) with better process supervision, check which process is
34 | // bound to the port.
35 | // Also, possibly do a http check for the name node UI as an additional
36 | // health check.
37 | String localhostAddress = InetAddress.getLocalHost().getHostAddress();
38 | socket = new Socket();
39 | socket.bind(new InetSocketAddress(localhostAddress, healthCheckPort));
40 | } catch (IOException e) {
41 | if (e.getMessage().contains(addressInUseStr)) {
42 | taskHealthy = true;
43 | log.info("Could not bind to port " + healthCheckPort + ", port is in use as expected.");
44 | } else {
45 | log.error(healthCheckErrStr, e);
46 | }
47 | } catch (SecurityException | IllegalArgumentException e) {
48 | log.error(healthCheckErrStr, e);
49 | }
50 | StreamUtil.closeQuietly(socket);
51 | }
52 |
53 | return taskHealthy;
54 | }
55 |
56 | private int getHealthCheckPort(String taskIdStr) {
57 | int healthCheckPort = -1;
58 |
59 | if (taskIdStr.contains(HDFSConstants.DATA_NODE_ID)) {
60 | healthCheckPort = HDFSConstants.DATA_NODE_PORT;
61 | } else if (taskIdStr.contains(HDFSConstants.JOURNAL_NODE_ID)) {
62 | healthCheckPort = HDFSConstants.JOURNAL_NODE_PORT;
63 | } else if (taskIdStr.contains(HDFSConstants.ZKFC_NODE_ID)) {
64 | healthCheckPort = HDFSConstants.ZKFC_NODE_PORT;
65 | } else if (taskIdStr.contains(HDFSConstants.NAME_NODE_ID)) {
66 | healthCheckPort = HDFSConstants.NAME_NODE_PORT;
67 | } else {
68 | log.error("Task unknown: " + taskIdStr);
69 | }
70 |
71 | return healthCheckPort;
72 | }
73 |
74 | }
75 |
--------------------------------------------------------------------------------
/hdfs-executor/src/main/java/org/apache/mesos/hdfs/executor/Task.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.executor;
2 |
3 | import org.apache.mesos.Protos;
4 | import org.apache.mesos.hdfs.util.HDFSConstants;
5 |
6 | /**
7 | * The Task class for use within the executor.
8 | */
9 | public class Task {
10 |
11 | private Protos.TaskInfo taskInfo;
12 | private String cmd;
13 | private Process process;
14 | private String type;
15 |
16 | public Task(Protos.TaskInfo taskInfo) {
17 | this.taskInfo = taskInfo;
18 | this.cmd = taskInfo.getData().toStringUtf8();
19 | setType(taskInfo.getTaskId().getValue());
20 | }
21 |
22 | public String getCmd() {
23 | return cmd;
24 | }
25 |
26 | public void setCmd(String cmd) {
27 | this.cmd = cmd;
28 | }
29 |
30 | public Process getProcess() {
31 | return process;
32 | }
33 |
34 | public void setProcess(Process process) {
35 | this.process = process;
36 | }
37 |
38 | public Protos.TaskInfo getTaskInfo() {
39 | return taskInfo;
40 | }
41 |
42 | public void setTaskInfo(Protos.TaskInfo taskInfo) {
43 | this.taskInfo = taskInfo;
44 | }
45 |
46 | public String getType() {
47 | return type;
48 | }
49 |
50 | private void setType(String taskId) {
51 | type = "";
52 | if (taskId.contains("task." + HDFSConstants.JOURNAL_NODE_ID)) {
53 | type = HDFSConstants.JOURNAL_NODE_ID;
54 | } else if (taskId.contains("task." + HDFSConstants.NAME_NODE_ID)) {
55 | type = HDFSConstants.NAME_NODE_ID;
56 | } else if (taskId.contains("task." + HDFSConstants.ZKFC_NODE_ID)) {
57 | type = HDFSConstants.ZKFC_NODE_ID;
58 | } else if (taskId.contains("task." + HDFSConstants.DATA_NODE_ID)) {
59 | type = HDFSConstants.DATA_NODE_ID;
60 | }
61 | }
62 |
63 | @Override
64 | public String toString() {
65 | return "Task{" +
66 | "cmd='" + cmd + '\'' +
67 | ", taskInfo=" + taskInfo +
68 | ", type='" + type + '\'' +
69 | '}';
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/hdfs-executor/src/main/java/org/apache/mesos/hdfs/executor/TaskShutdownHook.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.executor;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 | import org.apache.mesos.Executor;
6 | import org.apache.mesos.ExecutorDriver;
7 |
8 | /**
9 | */
10 | public class TaskShutdownHook implements Runnable {
11 |
12 | private final Log log = LogFactory.getLog(TaskShutdownHook.class);
13 |
14 | private Executor executor;
15 | private ExecutorDriver driver;
16 |
17 | public TaskShutdownHook(Executor executor, ExecutorDriver driver) {
18 | this.executor = executor;
19 | this.driver = driver;
20 | }
21 |
22 | @Override
23 | public void run() {
24 | log.info("shutdown hook shutting down tasks");
25 | executor.shutdown(this.driver);
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/hdfs-executor/src/main/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/hdfs-executor/src/test/java/org/apache/mesos/hdfs/executor/TaskSpec.groovy:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.executor
2 |
3 | import org.apache.mesos.hdfs.util.HDFSConstants
4 | import org.apache.mesos.protobuf.TaskInfoBuilder
5 | import spock.lang.Specification
6 |
7 | /**
8 | *
9 | */
10 | class TaskSpec extends Specification {
11 |
12 | def "task type detection"() {
13 |
14 | expect:
15 | new Task(new TaskInfoBuilder(taskId, "name", "slaveID").build()).type == type
16 |
17 | where:
18 | taskId | type
19 | "task.$HDFSConstants.JOURNAL_NODE_ID" | HDFSConstants.JOURNAL_NODE_ID
20 | "task.$HDFSConstants.NAME_NODE_ID" | HDFSConstants.NAME_NODE_ID
21 | "task.$HDFSConstants.DATA_NODE_ID" | HDFSConstants.DATA_NODE_ID
22 | "task.$HDFSConstants.ZKFC_NODE_ID" | HDFSConstants.ZKFC_NODE_ID
23 | "" | ""
24 | "junk" | ""
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/hdfs-scheduler/build.gradle:
--------------------------------------------------------------------------------
1 | plugins {
2 | id 'com.github.johnrengelman.shadow' version '1.2.2'
3 | }
4 |
5 | ext {
6 | jettyVer = "9.2.2.v20140723"
7 | jmteVer = "3.0"
8 | }
9 |
10 |
11 | dependencies {
12 | compile project(':mesos-commons')
13 | compile project(':hdfs-commons')
14 | compile "com.floreysoft:jmte:${jmteVer}"
15 | compile "org.eclipse.jetty:jetty-server:${jettyVer}"
16 | }
17 |
18 |
19 | shadowJar {
20 | classifier = "uber"
21 |
22 | mergeServiceFiles()
23 |
24 | exclude 'META-INF/*.SF'
25 | exclude 'META-INF/*.DSA'
26 | exclude 'META-INF/*.RSA'
27 |
28 | dependencies {
29 | exclude(dependency("commons-logging:commons-logging"))
30 | }
31 |
32 | doLast {
33 | setTeamcityParameters()
34 | }
35 | }
36 |
37 | build << {
38 | setTeamcityParameters()
39 | }
40 |
41 | def setTeamcityParameters() {
42 | println "##teamcity[setParameter name='env.gradle_PROJECT_VERSION' value='$version']"
43 | println "##teamcity[setParameter name='system.gradle.PROJECT_VERSION' value='$version']"
44 | }
45 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/config/ConfigServer.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.config;
2 |
3 | import com.floreysoft.jmte.Engine;
4 | import com.google.inject.Inject;
5 | import org.apache.commons.logging.Log;
6 | import org.apache.commons.logging.LogFactory;
7 | import org.apache.mesos.hdfs.scheduler.Task;
8 | import org.apache.mesos.hdfs.state.HdfsState;
9 | import org.apache.mesos.hdfs.util.HDFSConstants;
10 | import org.eclipse.jetty.server.Handler;
11 | import org.eclipse.jetty.server.Request;
12 | import org.eclipse.jetty.server.Server;
13 | import org.eclipse.jetty.server.handler.AbstractHandler;
14 | import org.eclipse.jetty.server.handler.HandlerList;
15 | import org.eclipse.jetty.server.handler.ResourceHandler;
16 |
17 | import javax.servlet.http.HttpServletRequest;
18 | import javax.servlet.http.HttpServletResponse;
19 | import java.io.File;
20 | import java.io.FileNotFoundException;
21 | import java.io.IOException;
22 | import java.nio.charset.Charset;
23 | import java.nio.file.Files;
24 | import java.nio.file.Paths;
25 | import java.util.ArrayList;
26 | import java.util.HashMap;
27 | import java.util.Iterator;
28 | import java.util.List;
29 | import java.util.Map;
30 |
31 | /**
32 | * This is the HTTP service which allows executors to fetch the configuration for hdfs-site.xml.
33 | */
34 | public class ConfigServer {
35 | private final Log log = LogFactory.getLog(ConfigServer.class);
36 |
37 | private Server server;
38 | private Engine engine;
39 | private HdfsFrameworkConfig hdfsFrameworkConfig;
40 | private HdfsState state;
41 |
42 | @Inject
43 | public ConfigServer(HdfsFrameworkConfig hdfsFrameworkConfig, HdfsState state) {
44 | this.hdfsFrameworkConfig = hdfsFrameworkConfig;
45 | this.state = state;
46 | engine = new Engine();
47 | server = new Server(hdfsFrameworkConfig.getConfigServerPort());
48 | ResourceHandler resourceHandler = new ResourceHandler();
49 | resourceHandler.setResourceBase(hdfsFrameworkConfig.getExecutorPath());
50 | HandlerList handlers = new HandlerList();
51 | handlers.setHandlers(new Handler[]{
52 | resourceHandler, new ServeHdfsConfigHandler()});
53 | server.setHandler(handlers);
54 |
55 | try {
56 | server.start();
57 |
58 | } catch (Exception e) {
59 | final String msg = "Unable to start jetty server";
60 | log.error(msg, e);
61 | throw new ConfigServerException(msg, e);
62 | }
63 | }
64 |
65 | public void stop() throws ConfigServerException {
66 | try {
67 | server.stop();
68 | } catch (Exception e) {
69 | final String msg = "Unable to stop the jetty service";
70 | log.error(msg, e);
71 | throw new ConfigServerException(msg, e);
72 | }
73 | }
74 |
75 | private List getHostNames(List tasks) {
76 | List names = new ArrayList();
77 |
78 | for (Task task : tasks) {
79 | names.add(task.getHostname());
80 | }
81 |
82 | return names;
83 | }
84 |
85 | private class ServeHdfsConfigHandler extends AbstractHandler {
86 | public synchronized void handle(
87 | String target,
88 | Request baseRequest,
89 | HttpServletRequest request,
90 | HttpServletResponse response)
91 | throws IOException {
92 |
93 | File confFile = new File(hdfsFrameworkConfig.getConfigPath());
94 |
95 | if (!confFile.exists()) {
96 | throw new FileNotFoundException("Couldn't file config file: " + confFile.getPath()
97 | + ". Please make sure it exists.");
98 | }
99 |
100 | String content = new String(Files.readAllBytes(Paths.get(confFile.getPath())), Charset.defaultCharset());
101 |
102 | List nameNodes = null;
103 | List journalNodes = null;
104 | try {
105 | nameNodes = getHostNames(state.getNameNodeTasks());
106 | journalNodes = getHostNames(state.getJournalNodeTasks());
107 | } catch (Exception ex) {
108 | throw new IOException(ex);
109 | }
110 |
111 | Map model = new HashMap<>();
112 | Iterator iter = nameNodes.iterator();
113 | if (iter.hasNext()) {
114 | model.put("nn1Hostname", iter.next());
115 | }
116 | if (iter.hasNext()) {
117 | model.put("nn2Hostname", iter.next());
118 | }
119 |
120 | String journalNodeString = getJournalNodes(journalNodes);
121 |
122 | model.put("journalnodes", journalNodeString);
123 | model.put("frameworkName", hdfsFrameworkConfig.getFrameworkName());
124 | model.put("dataDir", hdfsFrameworkConfig.getDataDir());
125 | model.put("secondaryDataDir", hdfsFrameworkConfig.getSecondaryDataDir());
126 | model.put("haZookeeperQuorum", hdfsFrameworkConfig.getHaZookeeperQuorum());
127 | model.put("domainSocketDir", hdfsFrameworkConfig.getDomainSocketDir());
128 |
129 | String nnNum = request.getParameter(HDFSConstants.NAMENODE_NUM_PARAM);
130 | if (hdfsFrameworkConfig.getBackupDir() != null && nnNum != null) {
131 | model.put("backupDir", hdfsFrameworkConfig.getBackupDir() + "/" + nnNum);
132 | }
133 |
134 | content = engine.transform(content, model);
135 |
136 | response.setContentType("application/octet-stream;charset=utf-8");
137 | response.setHeader("Content-Disposition", "attachment; filename=\"" +
138 | HDFSConstants.HDFS_CONFIG_FILE_NAME + "\" ");
139 | response.setHeader("Content-Transfer-Encoding", "binary");
140 | response.setHeader("Content-Length", Integer.toString(content.length()));
141 |
142 | response.setStatus(HttpServletResponse.SC_OK);
143 | baseRequest.setHandled(true);
144 | response.getWriter().println(content);
145 | }
146 |
147 | private String getJournalNodes(List journalNodes) {
148 | StringBuilder journalNodeStringBuilder = new StringBuilder("");
149 | for (String jn : journalNodes) {
150 | journalNodeStringBuilder.append(jn).append(":8485;");
151 | }
152 | String journalNodeString = journalNodeStringBuilder.toString();
153 |
154 | if (!journalNodeString.isEmpty()) {
155 | // Chop the trailing ,
156 | journalNodeString = journalNodeString.substring(0, journalNodeString.length() - 1);
157 | }
158 | return journalNodeString;
159 | }
160 | }
161 | }
162 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/config/ConfigServerException.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.config;
2 |
3 | /**
4 | * Indicates a failure to startup the config service, likely a jetty failure.
5 | */
6 | public class ConfigServerException extends RuntimeException {
7 |
8 | public ConfigServerException(Throwable cause) {
9 | super(cause);
10 | }
11 |
12 | public ConfigServerException(String message) {
13 | super(message);
14 | }
15 |
16 | public ConfigServerException(String message, Throwable cause) {
17 | super(message, cause);
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/DataNode.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 | import org.apache.mesos.Protos.Offer;
6 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
7 | import org.apache.mesos.hdfs.config.NodeConfig;
8 | import org.apache.mesos.hdfs.state.HdfsState;
9 | import org.apache.mesos.hdfs.util.HDFSConstants;
10 |
11 | import java.util.Arrays;
12 | import java.util.List;
13 |
14 | /**
15 | * DataNode.
16 | */
17 | public class DataNode extends HdfsNode {
18 | private final Log log = LogFactory.getLog(DataNode.class);
19 |
20 | public DataNode(
21 | HdfsState state,
22 | HdfsFrameworkConfig config) {
23 | super(state, config, HDFSConstants.DATA_NODE_ID);
24 | }
25 |
26 | public boolean evaluate(Offer offer) {
27 | boolean accept = false;
28 | NodeConfig dataNodeConfig = config.getNodeConfig(HDFSConstants.DATA_NODE_ID);
29 |
30 | if (!enoughResources(offer, dataNodeConfig.getCpus(), dataNodeConfig.getMaxHeap())) {
31 | log.info("Offer does not have enough resources");
32 | } else if (state.hostOccupied(offer.getHostname(), HDFSConstants.DATA_NODE_ID)) {
33 | log.info(String.format("Already running DataNode on %s", offer.getHostname()));
34 | } else if (violatesExclusivityConstraint(offer)) {
35 | log.info(String.format("Already running NameNode or JournalNode on %s", offer.getHostname()));
36 | } else {
37 | accept = true;
38 | }
39 |
40 | return accept;
41 | }
42 |
43 | protected String getExecutorName() {
44 | return HDFSConstants.NODE_EXECUTOR_ID;
45 | }
46 |
47 | protected List getTaskTypes() {
48 | return Arrays.asList(HDFSConstants.DATA_NODE_ID);
49 | }
50 |
51 | private boolean violatesExclusivityConstraint(Offer offer) {
52 | return config.getRunDatanodeExclusively() &&
53 | (state.hostOccupied(offer.getHostname(), HDFSConstants.NAME_NODE_ID)
54 | || state.hostOccupied(offer.getHostname(), HDFSConstants.JOURNAL_NODE_ID));
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/HdfsMesosConstraints.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 | import org.apache.mesos.Protos.Attribute;
6 | import org.apache.mesos.Protos.Offer;
7 | import org.apache.mesos.Protos.Value.Range;
8 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
9 |
10 | import java.util.List;
11 | import java.util.Map;
12 | import java.util.Set;
13 |
14 | /**
15 | * HDFS Mesos offer constraints checker class implementation.
16 | */
17 | public class HdfsMesosConstraints {
18 |
19 | private final Log log = LogFactory.getLog(HdfsMesosConstraints.class);
20 | private final HdfsFrameworkConfig config;
21 |
22 | public HdfsMesosConstraints(HdfsFrameworkConfig config) {
23 | this.config = config;
24 | }
25 |
26 | public boolean constraintsAllow(Offer offer) {
27 | List attributes = offer.getAttributesList();
28 |
29 | Map constraints = config.getMesosSlaveConstraints();
30 | Set> constraintSet = constraints.entrySet();
31 |
32 | for (Map.Entry constraintEntry : constraintSet) {
33 | boolean found = false;
34 | String constraintName = constraintEntry.getKey();
35 | String constraintValue = constraintEntry.getValue();
36 |
37 | for (Attribute attribute : attributes) {
38 | if (attribute.getName().equals(constraintName)) {
39 | switch (attribute.getType()) {
40 | case RANGES:
41 | if (attribute.hasRanges()) {
42 | try {
43 | Long range = Long.parseLong(constraintValue);
44 | for (Range r : attribute.getRanges().getRangeList()) {
45 | if ((!r.hasBegin() || range >= r.getBegin())
46 | && (!r.hasEnd() || range <= r.getEnd())) {
47 | found = true;
48 | break;
49 | }
50 | }
51 | } catch (NumberFormatException e) {
52 | // Offer attribute value is not castble to number.
53 | String msg = "Constraint value " + constraintValue +
54 | " is not of type range for offer attribute " + constraintName;
55 | log.warn(msg, e);
56 | }
57 | }
58 | break;
59 | case SCALAR:
60 | if (attribute.hasScalar()) {
61 | try {
62 | if (attribute.getScalar().getValue() >= Double
63 | .parseDouble(constraintValue)) {
64 | found = true;
65 | }
66 | } catch (NumberFormatException e) {
67 | // Offer attribute value is not castble to scalar.
68 | String msg = "Constraint value \"" + constraintValue +
69 | "\" is not of type scalar for offer attribute " + constraintName;
70 | log.warn(msg, e);
71 | }
72 | }
73 | break;
74 | case SET:
75 | if (attribute.hasSet()) {
76 | boolean isSubset = true;
77 | List attributeSetValues = attribute.getSet().getItemList();
78 | String[] constraintSetValues = constraintValue.split(",");
79 | for (String element : constraintSetValues) {
80 | if (!attributeSetValues.contains(element)) {
81 | isSubset = false;
82 | break;
83 | }
84 | }
85 | found = isSubset;
86 | }
87 | break;
88 | case TEXT:
89 | if (attribute.hasText()
90 | && (!attribute.getText().hasValue() || attribute.getText()
91 | .getValue().equals(constraintValue))) {
92 | found = true;
93 | break;
94 | }
95 | break;
96 | default:
97 | break;
98 | }
99 | }
100 |
101 | if (found) {
102 | break;
103 | }
104 | }
105 |
106 | if (!found) {
107 | return false;
108 | }
109 | }
110 |
111 | return true;
112 | }
113 | }
114 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/HdfsNode.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 | import org.apache.mesos.Protos.CommandInfo;
6 | import org.apache.mesos.Protos.Environment;
7 | import org.apache.mesos.Protos.ExecutorInfo;
8 | import org.apache.mesos.Protos.Offer;
9 | import org.apache.mesos.Protos.Resource;
10 | import org.apache.mesos.Protos.TaskInfo;
11 | import org.apache.mesos.SchedulerDriver;
12 | import org.apache.mesos.collections.MapUtil;
13 | import org.apache.mesos.collections.StartsWithPredicate;
14 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
15 | import org.apache.mesos.hdfs.config.NodeConfig;
16 | import org.apache.mesos.hdfs.state.HdfsState;
17 | import org.apache.mesos.hdfs.util.HDFSConstants;
18 | import org.apache.mesos.protobuf.CommandInfoBuilder;
19 | import org.apache.mesos.protobuf.EnvironmentBuilder;
20 | import org.apache.mesos.protobuf.ExecutorInfoBuilder;
21 | import org.apache.mesos.protobuf.ResourceBuilder;
22 |
23 | import java.io.IOException;
24 | import java.util.ArrayList;
25 | import java.util.Arrays;
26 | import java.util.Collection;
27 | import java.util.List;
28 | import java.util.concurrent.ExecutionException;
29 |
30 | /**
31 | * HdfsNode base class.
32 | */
33 | public abstract class HdfsNode implements IOfferEvaluator, ILauncher {
34 | private final Log log = LogFactory.getLog(HdfsNode.class);
35 | private final ResourceBuilder resourceBuilder;
36 |
37 | protected final HdfsFrameworkConfig config;
38 | protected final HdfsState state;
39 | protected final String name;
40 |
41 | public HdfsNode(HdfsState state, HdfsFrameworkConfig config, String name) {
42 | this.state = state;
43 | this.config = config;
44 | this.name = name;
45 | this.resourceBuilder = new ResourceBuilder(config.getHdfsRole());
46 | }
47 |
48 | public String getName() {
49 | return name;
50 | }
51 |
52 | protected abstract String getExecutorName();
53 |
54 | protected abstract List getTaskTypes();
55 |
56 | public void launch(SchedulerDriver driver, Offer offer)
57 | throws ClassNotFoundException, IOException, InterruptedException, ExecutionException {
58 | List tasks = createTasks(offer);
59 | List taskInfos = getTaskInfos(tasks);
60 |
61 | // The recording of Tasks is what can potentially throw the exceptions noted above. This is good news
62 | // because we are guaranteed that we do not actually launch Tasks unless we have recorded them.
63 | recordTasks(tasks);
64 | driver.launchTasks(Arrays.asList(offer.getId()), taskInfos);
65 | }
66 |
67 | private List getTaskInfos(List tasks) {
68 | List taskInfos = new ArrayList();
69 |
70 | for (Task task : tasks) {
71 | taskInfos.add(task.getInfo());
72 | }
73 |
74 | return taskInfos;
75 | }
76 |
77 | private void recordTasks(List tasks)
78 | throws ClassNotFoundException, IOException, InterruptedException, ExecutionException {
79 | for (Task task : tasks) {
80 | state.recordTask(task);
81 | }
82 | }
83 |
84 | private ExecutorInfo createExecutor(String taskIdName, String nodeName, String nnNum, String executorName) {
85 |
86 | String cmd = "export JAVA_HOME=$MESOS_DIRECTORY/" + config.getJreVersion()
87 | + " && env ; cd hdfs-mesos-* && "
88 | + "exec `if [ -z \"$JAVA_HOME\" ]; then echo java; "
89 | + "else echo $JAVA_HOME/bin/java; fi` "
90 | + "$HADOOP_OPTS "
91 | + "$EXECUTOR_OPTS "
92 | + "-cp lib/*.jar org.apache.mesos.hdfs.executor." + executorName;
93 |
94 | return ExecutorInfoBuilder.createExecutorInfoBuilder()
95 | .setName(nodeName + " executor")
96 | .setExecutorId(ExecutorInfoBuilder.createExecutorId("executor." + taskIdName))
97 | .addAllResources(getExecutorResources())
98 | .setCommand(CommandInfoBuilder.createCmdInfo(cmd, getCmdUriList(nnNum), getExecutorEnvironment()))
99 | .build();
100 | }
101 |
102 | private List getCmdUriList(String nnNum) {
103 | int confServerPort = config.getConfigServerPort();
104 |
105 | String url = String.format("http://%s:%d/%s", config.getFrameworkHostAddress(),
106 | confServerPort, HDFSConstants.HDFS_CONFIG_FILE_NAME);
107 | if (nnNum != null) {
108 | url += "?" + HDFSConstants.NAMENODE_NUM_PARAM + "=" + nnNum;
109 | }
110 |
111 | return Arrays.asList(
112 | CommandInfoBuilder.createCmdInfoUri(String.format("http://%s:%d/%s", config.getFrameworkHostAddress(),
113 | confServerPort,
114 | HDFSConstants.HDFS_BINARY_FILE_NAME)),
115 | CommandInfoBuilder.createCmdInfoUri(url),
116 | CommandInfoBuilder.createCmdInfoUri(config.getJreUrl()));
117 | }
118 |
119 | protected List getExecutorEnvironment() {
120 | List env = EnvironmentBuilder.
121 | createEnvironment(MapUtil.propertyMapFilter(System.getProperties(),
122 | new StartsWithPredicate(HDFSConstants.PROPERTY_VAR_PREFIX)));
123 | env.add(EnvironmentBuilder.createEnvironment("LD_LIBRARY_PATH", config.getLdLibraryPath()));
124 | env.add(EnvironmentBuilder.createEnvironment("EXECUTOR_OPTS", "-Xmx"
125 | + config.getExecutorHeap() + "m -Xms" + config.getExecutorHeap() + "m"));
126 | log.info(env);
127 | return env;
128 | }
129 |
130 | private List getTaskNames(String taskType) {
131 | List names = new ArrayList();
132 |
133 | try {
134 | List tasks = state.getTasks();
135 | for (Task task : tasks) {
136 | if (task.getType().equals(taskType)) {
137 | names.add(task.getName());
138 | }
139 | }
140 | } catch (Exception ex) {
141 | log.error("Failed to retrieve task names, with exception: " + ex);
142 | }
143 |
144 | return names;
145 | }
146 |
147 | private int getTaskTargetCount(String taskType) throws SchedulerException {
148 | switch (taskType) {
149 | case HDFSConstants.NAME_NODE_ID:
150 | return HDFSConstants.TOTAL_NAME_NODES;
151 | case HDFSConstants.JOURNAL_NODE_ID:
152 | return config.getJournalNodeCount();
153 | default:
154 | return 0;
155 | }
156 | }
157 |
158 | private List getTaskResources(String taskType) {
159 | NodeConfig nodeConfig = config.getNodeConfig(taskType);
160 | double cpu = nodeConfig.getCpus();
161 | double mem = nodeConfig.getMaxHeap() * config.getJvmOverhead();
162 |
163 | List resources = new ArrayList();
164 | resources.add(resourceBuilder.createCpuResource(cpu));
165 | resources.add(resourceBuilder.createMemResource(mem));
166 |
167 | return resources;
168 | }
169 |
170 |
171 | private String getNextTaskName(String taskType) {
172 | int targetCount = getTaskTargetCount(taskType);
173 | for (int i = 1; i <= targetCount; i++) {
174 | Collection nameNodeTaskNames = getTaskNames(taskType);
175 | String nextName = taskType + i;
176 | if (!nameNodeTaskNames.contains(nextName)) {
177 | return nextName;
178 | }
179 | }
180 |
181 | // If we are attempting to find a name for a node type that
182 | // expects more than 1 instance (e.g. namenode1, namenode2, etc.)
183 | // we should not reach here.
184 | if (targetCount > 0) {
185 | String errorStr = "Task name requested when no more names are available for Task type: " + taskType;
186 | log.error(errorStr);
187 | throw new SchedulerException(errorStr);
188 | }
189 |
190 | return taskType;
191 | }
192 |
193 | private List getExecutorResources() {
194 | double cpu = config.getExecutorCpus();
195 | double mem = config.getExecutorHeap() * config.getJvmOverhead();
196 |
197 | return Arrays.asList(
198 | resourceBuilder.createCpuResource(cpu),
199 | resourceBuilder.createMemResource(mem));
200 | }
201 |
202 | protected boolean enoughResources(Offer offer, double cpus, int mem) {
203 | for (Resource offerResource : offer.getResourcesList()) {
204 | if (offerResource.getName().equals("cpus") &&
205 | cpus + config.getExecutorCpus() > offerResource.getScalar().getValue()) {
206 | return false;
207 | }
208 |
209 | if (offerResource.getName().equals("mem") &&
210 | (mem * config.getJvmOverhead())
211 | + (config.getExecutorHeap() * config.getJvmOverhead())
212 | > offerResource.getScalar().getValue()) {
213 | return false;
214 | }
215 | }
216 |
217 | return true;
218 | }
219 |
220 | private List createTasks(Offer offer) {
221 | String executorName = getExecutorName();
222 | String taskIdName = String.format("%s.%s.%d", name, executorName, System.currentTimeMillis());
223 | List tasks = new ArrayList<>();
224 |
225 | String nnNum = getTaskTypes().contains(HDFSConstants.NAME_NODE_ID)
226 | ? getNextTaskName(HDFSConstants.NAME_NODE_ID)
227 | : null;
228 |
229 | for (String type : getTaskTypes()) {
230 | String taskName = getNextTaskName(type);
231 |
232 | List resources = getTaskResources(type);
233 | ExecutorInfo execInfo = createExecutor(taskIdName, name, nnNum, executorName);
234 |
235 | tasks.add(new Task(resources, execInfo, offer, taskName, type, taskIdName));
236 | }
237 |
238 | return tasks;
239 | }
240 | }
241 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/HdfsSchedulerModule.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import com.google.inject.AbstractModule;
4 |
5 | /**
6 | * Guice Module for initializing interfaces to implementations for the HDFS Scheduler.
7 | */
8 | public class HdfsSchedulerModule extends AbstractModule {
9 |
10 | @Override
11 | protected void configure() {
12 | bind(StateFactory.class).to(ZKStateFactory.class);
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/ILauncher.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import org.apache.mesos.Protos.Offer;
4 | import org.apache.mesos.SchedulerDriver;
5 |
6 | import java.io.IOException;
7 | import java.util.concurrent.ExecutionException;
8 |
9 | /**
10 | * ILauncher interface.
11 | */
12 | public interface ILauncher {
13 | public void launch(SchedulerDriver driver, Offer offer)
14 | throws ClassNotFoundException, IOException, InterruptedException, ExecutionException;
15 | }
16 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/IOfferEvaluator.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import org.apache.mesos.Protos.Offer;
4 |
5 | /**
6 | * IOfferEvaluator interface.
7 | */
8 | public interface IOfferEvaluator {
9 | public boolean evaluate(Offer offer);
10 | }
11 |
12 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/JournalNode.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 | import org.apache.mesos.Protos.Offer;
6 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
7 | import org.apache.mesos.hdfs.config.NodeConfig;
8 | import org.apache.mesos.hdfs.state.HdfsState;
9 | import org.apache.mesos.hdfs.util.HDFSConstants;
10 |
11 | import java.util.Arrays;
12 | import java.util.List;
13 |
14 | /**
15 | * JournalNode.
16 | */
17 | public class JournalNode extends HdfsNode {
18 | private final Log log = LogFactory.getLog(JournalNode.class);
19 |
20 | public JournalNode(
21 | HdfsState state,
22 | HdfsFrameworkConfig config) {
23 | super(state, config, HDFSConstants.JOURNAL_NODE_ID);
24 | }
25 |
26 | public boolean evaluate(Offer offer) {
27 | boolean accept = false;
28 |
29 | NodeConfig journalNodeConfig = config.getNodeConfig(HDFSConstants.JOURNAL_NODE_ID);
30 |
31 | int journalCount = 0;
32 | try {
33 | journalCount = state.getJournalCount();
34 | } catch (Exception ex) {
35 | log.error("Failed to retrieve Journal count with exception: " + ex);
36 | return false;
37 | }
38 |
39 | if (!enoughResources(offer, journalNodeConfig.getCpus(), journalNodeConfig.getMaxHeap())) {
40 | log.info("Offer does not have enough resources");
41 | } else if (journalCount >= config.getJournalNodeCount()) {
42 | log.info(String.format("Already running %s journalnodes", config.getJournalNodeCount()));
43 | } else if (state.hostOccupied(offer.getHostname(), HDFSConstants.JOURNAL_NODE_ID)) {
44 | log.info(String.format("Already running journalnode on %s", offer.getHostname()));
45 | } else if (config.getRunDatanodeExclusively()
46 | && state.hostOccupied(offer.getHostname(), HDFSConstants.DATA_NODE_ID)) {
47 | log.info(String.format("Cannot colocate journalnode and datanode on %s", offer.getHostname()));
48 | } else {
49 | accept = true;
50 | }
51 |
52 | return accept;
53 | }
54 |
55 | protected String getExecutorName() {
56 | return HDFSConstants.NODE_EXECUTOR_ID;
57 | }
58 |
59 | protected List getTaskTypes() {
60 | return Arrays.asList(HDFSConstants.JOURNAL_NODE_ID);
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/Main.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import com.google.inject.Guice;
4 | import com.google.inject.Injector;
5 | import org.apache.commons.logging.Log;
6 | import org.apache.commons.logging.LogFactory;
7 | import org.apache.mesos.hdfs.config.ConfigServer;
8 | import org.apache.mesos.process.FailureUtils;
9 |
10 | /**
11 | * Main entry point for the Scheduler.
12 | */
13 | public final class Main {
14 |
15 | private final Log log = LogFactory.getLog(Main.class);
16 |
17 | public static void main(String[] args) {
18 | new Main().start();
19 | }
20 |
21 | private void start() {
22 | Injector injector = Guice.createInjector(new HdfsSchedulerModule());
23 | getSchedulerThread(injector).start();
24 | injector.getInstance(ConfigServer.class);
25 | }
26 |
27 | private Thread getSchedulerThread(Injector injector) {
28 | Thread scheduler = new Thread(injector.getInstance(HdfsScheduler.class));
29 | scheduler.setName("HdfsScheduler");
30 | scheduler.setUncaughtExceptionHandler(getUncaughtExceptionHandler());
31 | return scheduler;
32 | }
33 |
34 | private Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
35 |
36 | return new Thread.UncaughtExceptionHandler() {
37 | @Override
38 | public void uncaughtException(Thread t, Throwable e) {
39 | final String message = "Scheduler exiting due to uncaught exception";
40 | log.error(message, e);
41 | FailureUtils.exit(message, 2);
42 | }
43 | };
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/NameNode.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 | import org.apache.mesos.Protos.Offer;
6 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
7 | import org.apache.mesos.hdfs.config.NodeConfig;
8 | import org.apache.mesos.hdfs.state.HdfsState;
9 | import org.apache.mesos.hdfs.util.DnsResolver;
10 | import org.apache.mesos.hdfs.util.HDFSConstants;
11 |
12 | import java.util.Arrays;
13 | import java.util.List;
14 |
15 | /**
16 | * Namenode.
17 | */
18 | public class NameNode extends HdfsNode {
19 | private final Log log = LogFactory.getLog(NameNode.class);
20 | private String executorName = HDFSConstants.NAME_NODE_EXECUTOR_ID;
21 | private DnsResolver dnsResolver;
22 |
23 | public NameNode(
24 | HdfsState state,
25 | DnsResolver dnsResolver,
26 | HdfsFrameworkConfig config) {
27 | super(state, config, HDFSConstants.NAME_NODE_ID);
28 | this.dnsResolver = dnsResolver;
29 | }
30 |
31 | public boolean evaluate(Offer offer) {
32 | boolean accept = false;
33 | String hostname = offer.getHostname();
34 |
35 | if (dnsResolver.journalNodesResolvable()) {
36 | NodeConfig nameNodeConfig = config.getNodeConfig(HDFSConstants.NAME_NODE_ID);
37 | NodeConfig zkfcNodeConfig = config.getNodeConfig(HDFSConstants.ZKFC_NODE_ID);
38 |
39 | int nameCount = 0;
40 | try {
41 | nameCount = state.getNameCount();
42 | } catch (Exception ex) {
43 | log.error("Failed to retrieve NameNode count with exception: " + ex);
44 | return false;
45 | }
46 |
47 | if (!enoughResources(offer,
48 | (nameNodeConfig.getCpus() + zkfcNodeConfig.getCpus()),
49 | (nameNodeConfig.getMaxHeap() + zkfcNodeConfig.getMaxHeap()))) {
50 | log.info("Offer does not have enough resources");
51 | } else if (nameCount >= HDFSConstants.TOTAL_NAME_NODES) {
52 | log.info(String.format("Already running %s namenodes", HDFSConstants.TOTAL_NAME_NODES));
53 | } else if (state.hostOccupied(hostname, HDFSConstants.NAME_NODE_ID)) {
54 | log.info(String.format("Already running namenode on %s", offer.getHostname()));
55 | } else if (config.getRunDatanodeExclusively()
56 | && state.hostOccupied(hostname, HDFSConstants.DATA_NODE_ID)) {
57 | log.info(String.format("Cannot colocate namenode and datanode on %s", offer.getHostname()));
58 | } else if (!state.hostOccupied(hostname, HDFSConstants.JOURNAL_NODE_ID)) {
59 | log.info(String.format("We need to colocate the namenode with a journalnode and there is "
60 | + "no journalnode running on this host. %s", offer.getHostname()));
61 | } else {
62 | accept = true;
63 | }
64 | }
65 |
66 | return accept;
67 | }
68 |
69 | protected String getExecutorName() {
70 | return HDFSConstants.NAME_NODE_EXECUTOR_ID;
71 | }
72 |
73 | protected List getTaskTypes() {
74 | return Arrays.asList(HDFSConstants.NAME_NODE_ID, HDFSConstants.ZKFC_NODE_ID);
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/NodeLauncher.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 | import org.apache.mesos.Protos.Offer;
6 | import org.apache.mesos.Protos.OfferID;
7 | import org.apache.mesos.SchedulerDriver;
8 |
9 | import java.io.IOException;
10 | import java.util.concurrent.ExecutionException;
11 |
12 | /**
13 | * Attempts to launch HDFS nodes, after determining whether an offer is appropriate.
14 | */
15 | public class NodeLauncher {
16 | private static final Log log = LogFactory.getLog(NodeLauncher.class);
17 |
18 | public boolean tryLaunch(SchedulerDriver driver, Offer offer, HdfsNode node)
19 | throws ClassNotFoundException, IOException, InterruptedException, ExecutionException {
20 |
21 | String nodeName = node.getName();
22 | OfferID offerId = offer.getId();
23 |
24 | log.info(String.format("Node: %s, evaluating offer: %s", nodeName, offerId));
25 | boolean acceptOffer = node.evaluate(offer);
26 |
27 | if (acceptOffer) {
28 | log.info(String.format("Node: %s, accepting offer: %s", nodeName, offerId));
29 | node.launch(driver, offer);
30 | } else {
31 | log.info(String.format("Node: %s, declining offer: %s", nodeName, offerId));
32 | driver.declineOffer(offerId);
33 | }
34 |
35 | return acceptOffer;
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/Reconciler.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import com.google.inject.Inject;
4 | import org.apache.commons.logging.Log;
5 | import org.apache.commons.logging.LogFactory;
6 | import org.apache.mesos.Protos;
7 | import org.apache.mesos.Protos.TaskStatus;
8 | import org.apache.mesos.SchedulerDriver;
9 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
10 | import org.apache.mesos.hdfs.state.HdfsState;
11 | import org.apache.mesos.hdfs.util.HDFSConstants;
12 | import org.apache.mesos.hdfs.util.TaskStatusFactory;
13 | import org.apache.mesos.protobuf.TaskUtil;
14 |
15 | import java.util.ArrayList;
16 | import java.util.Collections;
17 | import java.util.HashSet;
18 | import java.util.List;
19 | import java.util.Observable;
20 | import java.util.Observer;
21 | import java.util.Set;
22 | import java.util.concurrent.ExecutionException;
23 |
24 | /**
25 | * HDFS Mesos Framework Reconciler class implementation.
26 | */
27 | public class Reconciler implements Observer {
28 | private final Log log = LogFactory.getLog(HdfsScheduler.class);
29 |
30 | private HdfsFrameworkConfig config;
31 | private HdfsState state;
32 | private Set pendingTasks;
33 |
34 | @Inject
35 | public Reconciler(HdfsFrameworkConfig config, HdfsState state) {
36 | this.config = config;
37 | this.state = state;
38 | this.pendingTasks = new HashSet();
39 | }
40 |
41 | public void reconcile(SchedulerDriver driver) throws InterruptedException, ExecutionException {
42 | pendingTasks = state.getTaskIds();
43 | (new ReconcileThread(this, driver)).start();
44 | }
45 |
46 | private void reconcileInternal(SchedulerDriver driver) {
47 | if (pendingTasks != null) {
48 | logPendingTasks();
49 | explicitlyReconcileTasks(driver);
50 | } else {
51 | log.warn("IPersistentStateStore returned null list of TaskIds");
52 | }
53 |
54 | implicitlyReconcileTasks(driver);
55 | }
56 |
57 | public void update(Observable obs, Object obj) {
58 | TaskStatus status = (TaskStatus) obj;
59 |
60 | String taskId = status.getTaskId().getValue();
61 | log.info("Received task update for: " + taskId);
62 |
63 | if (!complete()) {
64 | log.info("Reconciliation is NOT complete");
65 |
66 | if (taskIsPending(taskId)) {
67 | log.info(String.format("Reconciling Task '%s'.", taskId));
68 | pendingTasks.remove(taskId);
69 | } else {
70 | log.info(String.format("Task %s has already been reconciled or is unknown.", taskId));
71 | }
72 |
73 | logPendingTasks();
74 |
75 | if (complete()) {
76 | log.info("Reconciliation is complete");
77 | }
78 | }
79 | }
80 |
81 | private boolean taskIsPending(String taskId) {
82 | for (String t : pendingTasks) {
83 | if (t.equals(taskId)) {
84 | return true;
85 | }
86 | }
87 |
88 | return false;
89 | }
90 |
91 | public boolean complete() {
92 | if (pendingTasks.size() > 0) {
93 | return false;
94 | }
95 |
96 | return true;
97 | }
98 |
99 | private void logPendingTasks() {
100 | log.info("=========================================");
101 | log.info("pendingTasks size: " + pendingTasks.size());
102 | for (String t : pendingTasks) {
103 | log.info(t);
104 | }
105 | log.info("=========================================");
106 | }
107 |
108 | private void implicitlyReconcileTasks(SchedulerDriver driver) {
109 | log.info("Implicitly Reconciling Tasks");
110 | driver.reconcileTasks(Collections.emptyList());
111 | }
112 |
113 | private void explicitlyReconcileTasks(SchedulerDriver driver) {
114 | log.info("Explicitly Reconciling Tasks");
115 | List tasks = new ArrayList();
116 |
117 | for (String id : pendingTasks) {
118 | if (id == null) {
119 | log.warn("NULL TaskID encountered during Explicit Reconciliation.");
120 | } else {
121 | Protos.TaskID taskId = TaskUtil.createTaskId(id);
122 |
123 | TaskStatus taskStatus = TaskStatusFactory.createRunningStatus(taskId);
124 | tasks.add(taskStatus);
125 | }
126 | }
127 |
128 | driver.reconcileTasks(tasks);
129 | }
130 |
131 | private class ReconcileThread extends Thread {
132 | private static final int BACKOFF_MULTIPLIER = 2;
133 |
134 | private Reconciler reconciler;
135 | private SchedulerDriver driver;
136 |
137 | public ReconcileThread(Reconciler reconciler, SchedulerDriver driver) {
138 | this.reconciler = reconciler;
139 | this.driver = driver;
140 | }
141 |
142 | public void run() {
143 | int currDelay = reconciler.config.getReconciliationTimeout();
144 |
145 | while (!reconciler.complete()) {
146 | reconciler.reconcileInternal(driver);
147 | int sleepDuration = currDelay * HDFSConstants.MILLIS_FROM_SECONDS;
148 |
149 | log.info(String.format("Sleeping for %sms before retrying reconciliation.", sleepDuration));
150 | try {
151 | Thread.sleep(sleepDuration);
152 | } catch (InterruptedException ex) {
153 | log.warn(String.format("Reconciliation thread sleep was interrupted with exception: %s", ex));
154 | }
155 |
156 | currDelay = getDelay(currDelay);
157 | }
158 | }
159 |
160 | private int getDelay(int currDelay) {
161 | int tempDelay = currDelay * BACKOFF_MULTIPLIER;
162 | int maxDelay = reconciler.config.getMaxReconciliationTimeout();
163 |
164 | return Math.min(tempDelay, maxDelay);
165 | }
166 | }
167 | }
168 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/SchedulerException.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | /**
4 | * Exceptions in the scheduler which likely result in the scheduler being shutdown.
5 | */
6 | public class SchedulerException extends RuntimeException {
7 |
8 | public SchedulerException(Throwable cause) {
9 | super(cause);
10 | }
11 |
12 | public SchedulerException(String message) {
13 | super(message);
14 | }
15 |
16 | public SchedulerException(String message, Throwable cause) {
17 | super(message, cause);
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/StateFactory.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
4 | import org.apache.mesos.state.State;
5 |
6 | /**
7 | * StateFactory Inteface.
8 | */
9 | public interface StateFactory {
10 | public State create(String path, HdfsFrameworkConfig config);
11 | }
12 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/Task.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import org.apache.mesos.Protos.ExecutorInfo;
4 | import org.apache.mesos.Protos.Offer;
5 | import org.apache.mesos.Protos.Resource;
6 | import org.apache.mesos.Protos.TaskID;
7 | import org.apache.mesos.Protos.TaskInfo;
8 | import org.apache.mesos.Protos.TaskStatus;
9 | import org.apache.mesos.protobuf.TaskInfoBuilder;
10 |
11 | import java.io.IOException;
12 | import java.io.ObjectStreamException;
13 | import java.io.Serializable;
14 | import java.util.List;
15 |
16 | /**
17 | * Task class encapsulates TaskInfo and metadata necessary for recording State when appropriate.
18 | */
19 | public class Task implements Serializable {
20 | private TaskInfo info;
21 | private TaskStatus status;
22 | private Offer offer;
23 | private String type;
24 | private String name;
25 |
26 | public Task(
27 | List resources,
28 | ExecutorInfo execInfo,
29 | Offer offer,
30 | String name,
31 | String type,
32 | String idName) {
33 |
34 | this.info = new TaskInfoBuilder(String.format("task.%s.%s", type, idName), name, offer.getSlaveId().getValue())
35 | .setExecutorInfo(execInfo)
36 | .addAllResources(resources)
37 | .setData(String.format("bin/hdfs-mesos-%s", type))
38 | .build();
39 |
40 | setStatus(null);
41 | this.offer = offer;
42 | this.type = type;
43 | this.name = name;
44 | }
45 |
46 | public TaskID getId() {
47 | return getInfo().getTaskId();
48 | }
49 |
50 | public TaskInfo getInfo() {
51 | return info;
52 | }
53 |
54 | public TaskStatus getStatus() {
55 | return status;
56 | }
57 |
58 | public Offer getOffer() {
59 | return offer;
60 | }
61 |
62 | public String getType() {
63 | return type;
64 | }
65 |
66 | public String getName() {
67 | return name;
68 | }
69 |
70 | public String getHostname() {
71 | return offer.getHostname();
72 | }
73 |
74 | public void setStatus(TaskStatus status) {
75 | this.status = status;
76 | }
77 |
78 | private void writeObject(java.io.ObjectOutputStream out) throws IOException {
79 | out.defaultWriteObject();
80 | }
81 |
82 | private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
83 | in.defaultReadObject();
84 | }
85 |
86 | private static class TaskDeserializationException extends ObjectStreamException {
87 | }
88 |
89 | private void readObjectNoData() throws ObjectStreamException {
90 | throw new TaskDeserializationException();
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/scheduler/ZKStateFactory.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import com.google.inject.Inject;
4 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
5 | import org.apache.mesos.state.State;
6 | import org.apache.mesos.state.ZooKeeperState;
7 |
8 | import java.util.concurrent.TimeUnit;
9 |
10 | /**
11 | * Generates Zookeeper Mesos State abstractions.
12 | */
13 | public class ZKStateFactory implements StateFactory {
14 |
15 | @Inject
16 | public State create(String path, HdfsFrameworkConfig config) {
17 | return new ZooKeeperState(
18 | config.getStateZkServers(),
19 | config.getStateZkTimeout(),
20 | TimeUnit.MILLISECONDS,
21 | path);
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/state/AcquisitionPhase.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.state;
2 |
3 | /**
4 | * Defines node types.
5 | */
6 | public enum AcquisitionPhase {
7 |
8 | /**
9 | * Waits here for the timeout on (re)registration.
10 | */
11 | RECONCILING_TASKS,
12 |
13 | /**
14 | * Launches and waits for all journalnodes to start.
15 | */
16 | JOURNAL_NODES,
17 |
18 | /**
19 | * Launches the both namenodes.
20 | */
21 | NAME_NODES,
22 |
23 | /**
24 | * If everything is healthy the scheduler stays here and tries to launch
25 | * datanodes on any slave that doesn't have an hdfs task running on it.
26 | */
27 | DATA_NODES
28 | }
29 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/state/Serializer.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.state;
2 |
3 | import java.io.ByteArrayInputStream;
4 | import java.io.ByteArrayOutputStream;
5 | import java.io.IOException;
6 | import java.io.ObjectInputStream;
7 | import java.io.ObjectOutputStream;
8 |
9 | /**
10 | * Serializes Objects. Taken from example on stackoverflow
11 | * http://stackoverflow.com/questions/5837698/converting-any-object-to-a-byte-array-in-java#
12 | *
13 | * @throws IOException
14 | */
15 | public class Serializer {
16 | public static byte[] serialize(Object obj) throws IOException {
17 | ByteArrayOutputStream b = new ByteArrayOutputStream();
18 | ObjectOutputStream o = new ObjectOutputStream(b);
19 | o.writeObject(obj);
20 | return b.toByteArray();
21 | }
22 |
23 | public static Object deserialize(byte[] bytes) throws IOException, ClassNotFoundException {
24 | ByteArrayInputStream b = new ByteArrayInputStream(bytes);
25 | ObjectInputStream o = new ObjectInputStream(b);
26 | return o.readObject();
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/state/StateMachine.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.state;
2 |
3 | import com.google.inject.Inject;
4 | import org.apache.commons.logging.Log;
5 | import org.apache.commons.logging.LogFactory;
6 | import org.apache.mesos.SchedulerDriver;
7 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
8 | import org.apache.mesos.hdfs.scheduler.Reconciler;
9 | import org.apache.mesos.process.FailureUtils;
10 | import org.apache.mesos.hdfs.util.HDFSConstants;
11 |
12 | /**
13 | * The Scheduler state machine.
14 | */
15 | public class StateMachine {
16 | private final HdfsState state;
17 | private final HdfsFrameworkConfig config;
18 | private final Log log = LogFactory.getLog(StateMachine.class);
19 | private final Reconciler reconciler;
20 | private AcquisitionPhase currPhase;
21 |
22 | @Inject
23 | public StateMachine(
24 | HdfsState state,
25 | HdfsFrameworkConfig config,
26 | Reconciler reconciler) {
27 | this.state = state;
28 | this.config = config;
29 | this.currPhase = AcquisitionPhase.RECONCILING_TASKS;
30 | this.reconciler = reconciler;
31 | }
32 |
33 | public Reconciler getReconciler() {
34 | return reconciler;
35 | }
36 |
37 | public AcquisitionPhase getCurrentPhase() {
38 | return currPhase;
39 | }
40 |
41 | public AcquisitionPhase correctPhase() {
42 | int currJournalCount = 0;
43 | int currNameCount = 0;
44 |
45 | try {
46 | currJournalCount = state.getJournalCount();
47 | currNameCount = state.getNameCount();
48 | } catch (Exception ex) {
49 | // We will not change phase here if we cannot determine our state
50 | log.error("Failed to fetch node counts with exception: " + ex);
51 | return currPhase;
52 | }
53 |
54 | int targetJournalCount = config.getJournalNodeCount();
55 | int targetNameCount = HDFSConstants.TOTAL_NAME_NODES;
56 |
57 | log.info(String.format("Correcting phase with journal counts: %s/%s and name counts: %s/%s",
58 | currJournalCount,
59 | targetJournalCount,
60 | currNameCount,
61 | targetNameCount));
62 |
63 | if (!reconciler.complete()) {
64 | transitionTo(AcquisitionPhase.RECONCILING_TASKS);
65 | } else if (currJournalCount < targetJournalCount) {
66 | transitionTo(AcquisitionPhase.JOURNAL_NODES);
67 | } else if (currNameCount < targetNameCount || !state.nameNodesInitialized()) {
68 | transitionTo(AcquisitionPhase.NAME_NODES);
69 | } else {
70 | transitionTo(AcquisitionPhase.DATA_NODES);
71 | }
72 |
73 | log.info("Current phase is now: " + currPhase);
74 | return currPhase;
75 | }
76 |
77 | private void transitionTo(AcquisitionPhase nextPhase) {
78 | if (currPhase.equals(nextPhase)) {
79 | log.info(String.format("Acquisition phase is already '%s'", currPhase));
80 | } else {
81 | log.info(String.format("Transitioning from acquisition phase '%s' to '%s'", currPhase, nextPhase));
82 | currPhase = nextPhase;
83 | }
84 | }
85 |
86 | public void reconcile(SchedulerDriver driver) {
87 | try {
88 | transitionTo(AcquisitionPhase.RECONCILING_TASKS);
89 | reconciler.reconcile(driver);
90 | } catch (Exception ex) {
91 | FailureUtils.exit("Failed to conduct Reconciliation with exception: " + ex, HDFSConstants.RECONCILE_EXIT_CODE);
92 | }
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/util/DnsResolver.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.util;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 | import org.apache.mesos.Protos;
6 | import org.apache.mesos.SchedulerDriver;
7 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
8 | import org.apache.mesos.hdfs.scheduler.HdfsScheduler;
9 |
10 | import java.io.IOException;
11 | import java.net.InetAddress;
12 | import java.util.HashSet;
13 | import java.util.Set;
14 | import java.util.Timer;
15 |
16 | /**
17 | * Provides DNS resolving specific to HDFS.
18 | */
19 | public class DnsResolver {
20 | private final Log log = LogFactory.getLog(DnsResolver.class);
21 |
22 | static final int NN_TIMER_PERIOD = 10000;
23 |
24 | private final HdfsScheduler scheduler;
25 | private final HdfsFrameworkConfig hdfsFrameworkConfig;
26 |
27 | public DnsResolver(HdfsScheduler scheduler, HdfsFrameworkConfig hdfsFrameworkConfig) {
28 | this.scheduler = scheduler;
29 | this.hdfsFrameworkConfig = hdfsFrameworkConfig;
30 | }
31 |
32 | public boolean journalNodesResolvable() {
33 | if (!hdfsFrameworkConfig.usingMesosDns()) {
34 | return true;
35 | } //short circuit since Mesos handles this otherwise
36 | Set hosts = new HashSet<>();
37 | for (int i = 1; i <= hdfsFrameworkConfig.getJournalNodeCount(); i++) {
38 | hosts.add(HDFSConstants.JOURNAL_NODE_ID + i + "." + hdfsFrameworkConfig.getFrameworkName() +
39 | "." + hdfsFrameworkConfig.getMesosDnsDomain());
40 | }
41 | boolean success = true;
42 | for (String host : hosts) {
43 | log.info("Resolving DNS for " + host);
44 | try {
45 | InetAddress.getByName(host);
46 | log.info("Successfully found " + host);
47 | } catch (SecurityException | IOException e) {
48 | log.warn("Couldn't resolve host " + host);
49 | success = false;
50 | break;
51 | }
52 | }
53 | return success;
54 | }
55 |
56 | public boolean nameNodesResolvable() {
57 | if (!hdfsFrameworkConfig.usingMesosDns()) {
58 | return true;
59 | } //short circuit since Mesos handles this otherwise
60 | Set hosts = new HashSet<>();
61 | for (int i = 1; i <= HDFSConstants.TOTAL_NAME_NODES; i++) {
62 | hosts.add(HDFSConstants.NAME_NODE_ID + i + "." + hdfsFrameworkConfig.getFrameworkName() +
63 | "." + hdfsFrameworkConfig.getMesosDnsDomain());
64 | }
65 | boolean success = true;
66 | for (String host : hosts) {
67 | log.info("Resolving DNS for " + host);
68 | try {
69 | InetAddress.getByName(host);
70 | log.info("Successfully found " + host);
71 | } catch (SecurityException | IOException e) {
72 | log.warn("Couldn't resolve host " + host);
73 | success = false;
74 | break;
75 | }
76 | }
77 | return success;
78 | }
79 |
80 | public void sendMessageAfterNNResolvable(final SchedulerDriver driver,
81 | final Protos.TaskID taskId, final Protos.SlaveID slaveID, final String message) {
82 | if (!hdfsFrameworkConfig.usingMesosDns()) {
83 | // short circuit since Mesos handles this otherwise
84 | scheduler.sendMessageTo(driver, taskId, slaveID, message);
85 | return;
86 | }
87 | Timer timer = new Timer();
88 | PreNNInitTask task = new PreNNInitTask(this, scheduler, driver, taskId, slaveID, message);
89 | timer.scheduleAtFixedRate(task, 0, NN_TIMER_PERIOD);
90 | }
91 | }
92 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/util/NodeTypes.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.util;
2 |
3 | /**
4 | * List of node types.
5 | */
6 | public final class NodeTypes {
7 |
8 | public static final String NAMENODES_KEY = "nameNodes";
9 | public static final String JOURNALNODES_KEY = "journalNodes";
10 | public static final String DATANODES_KEY = "dataNodes";
11 |
12 |
13 | public NodeTypes() {
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/java/org/apache/mesos/hdfs/util/PreNNInitTask.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.util;
2 |
3 | import org.apache.mesos.Protos;
4 | import org.apache.mesos.SchedulerDriver;
5 | import org.apache.mesos.hdfs.scheduler.HdfsScheduler;
6 |
7 | import java.util.TimerTask;
8 |
9 | /**
10 | * Used for a NameNode init timer to see if DNS is complete.
11 | */
12 | public class PreNNInitTask extends TimerTask {
13 |
14 | private final DnsResolver dnsResolver;
15 | private final HdfsScheduler scheduler;
16 | private final SchedulerDriver driver;
17 | private final Protos.TaskID taskId;
18 | private final Protos.SlaveID slaveID;
19 | private final String message;
20 |
21 | public PreNNInitTask(DnsResolver dnsResolver, HdfsScheduler scheduler, SchedulerDriver driver, Protos.TaskID taskId,
22 | Protos.SlaveID slaveID, String message) {
23 | this.dnsResolver = dnsResolver;
24 | this.scheduler = scheduler;
25 | this.driver = driver;
26 | this.taskId = taskId;
27 | this.slaveID = slaveID;
28 | this.message = message;
29 | }
30 |
31 | @Override
32 | public void run() {
33 | if (dnsResolver.nameNodesResolvable()) {
34 | scheduler.sendMessageTo(driver, taskId, slaveID, message);
35 | this.cancel();
36 | }
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/main/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/test/java/org/apache/mesos/hdfs/SchedulerModuleTest.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs;
2 |
3 | import com.google.inject.AbstractModule;
4 | import org.apache.mesos.hdfs.scheduler.InMemoryStateFactory;
5 | import org.apache.mesos.hdfs.scheduler.StateFactory;
6 |
7 | /**
8 | * Guice Module for initializing interfaces to implementations for the HDFS Scheduler.
9 | */
10 | public class SchedulerModuleTest extends AbstractModule {
11 |
12 | @Override
13 | protected void configure() {
14 | bind(StateFactory.class).to(InMemoryStateFactory.class);
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/test/java/org/apache/mesos/hdfs/config/HdfsFrameworkConfigSpec.groovy:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.config
2 |
3 | import org.junit.Rule
4 | import org.junit.rules.TemporaryFolder
5 | import spock.lang.Specification
6 | import spock.util.environment.RestoreSystemProperties
7 |
8 | /**
9 | *
10 | */
11 | class HdfsFrameworkConfigSpec extends Specification {
12 |
13 | @Rule
14 | final TemporaryFolder temporaryFolder = new TemporaryFolder()
15 | File xmlFile
16 |
17 | def setup() {
18 | temporaryFolder.create()
19 | xmlFile = file("mesos-site.xml")
20 | System.setProperty("mesos.conf.path", xmlFile.absolutePath)
21 | }
22 |
23 | @RestoreSystemProperties
24 | def "system property override"() {
25 | given:
26 | createXML()
27 |
28 | when:
29 | def config = new HdfsFrameworkConfig()
30 | def datadir = config.dataDir
31 | def fwName = config.frameworkName
32 |
33 | then:
34 | datadir == "/var/lib/hdfs/data"
35 | fwName == "hdfs"
36 |
37 | when:
38 | System.setProperty("MESOS_HDFS_DATA_DIR", "spacetime")
39 | System.setProperty("MESOS_HDFS_FRAMEWORK_NAME", "einstein")
40 | config = new HdfsFrameworkConfig()
41 | datadir = config.dataDir
42 | fwName = config.frameworkName
43 |
44 | then:
45 | datadir == "spacetime"
46 | fwName == "einstein"
47 | }
48 |
49 | def createXML() {
50 | xmlFile << """
51 |
52 |
53 | mesos.hdfs.data.dir
54 | The primary data directory in HDFS
55 | /var/lib/hdfs/data
56 |
57 |
58 | mesos.hdfs.framework.name
59 | Your Mesos framework name and cluster name when accessing files (hdfs://YOUR_NAME)
60 | hdfs
61 |
62 |
63 | """
64 | }
65 |
66 | File file(String name) {
67 | def file = new File(temporaryFolder.root, name)
68 | file.parentFile.mkdirs()
69 | return file
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/test/java/org/apache/mesos/hdfs/scheduler/HdfsNodeSpec.groovy:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler
2 |
3 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig
4 | import spock.lang.Specification
5 | import spock.util.environment.RestoreSystemProperties
6 |
7 | /**
8 | *
9 | */
10 | class HdfsNodeSpec extends Specification {
11 |
12 | HdfsNode hdfsNode
13 | def config = Mock(HdfsFrameworkConfig)
14 |
15 | def setup() {
16 | config.getHdfsRole() >> "*"
17 | hdfsNode = new DataNode(null, config)
18 | }
19 |
20 | @RestoreSystemProperties
21 | def "executor environment with system properties"() {
22 |
23 | when:
24 | config.getLdLibraryPath() >> "path"
25 | config.getExecutorHeap() >> 512
26 |
27 | then:
28 | hdfsNode.getExecutorEnvironment().size() == 2
29 |
30 | when:
31 | System.properties.put("MESOS_HDFS_NEW_PROP", "value")
32 |
33 | then:
34 | hdfsNode.getExecutorEnvironment().size() == 3
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/test/java/org/apache/mesos/hdfs/scheduler/HdfsSchedulerSpec.groovy:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler
2 |
3 | import com.google.inject.Guice
4 | import org.apache.mesos.SchedulerDriver
5 | import org.apache.mesos.hdfs.SchedulerModuleTest
6 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig
7 | import org.apache.mesos.hdfs.state.AcquisitionPhase
8 | import org.apache.mesos.hdfs.state.HdfsState
9 | import org.apache.mesos.hdfs.state.StateMachine
10 | import org.apache.mesos.protobuf.FrameworkInfoUtil
11 | import org.apache.mesos.protobuf.OfferBuilder
12 | import spock.lang.Shared
13 | import spock.lang.Specification
14 |
15 | /**
16 | *
17 | */
18 | class HdfsSchedulerSpec extends Specification {
19 |
20 | def injector = Guice.createInjector(new SchedulerModuleTest())
21 | def reconciler = Mock(Reconciler)
22 | def stateMachine = Mock(StateMachine)
23 | def driver = Mock(SchedulerDriver)
24 | def state = injector.getInstance(HdfsState.class)
25 | def scheduler
26 | def config = Mock(HdfsFrameworkConfig)
27 |
28 | @Shared
29 | int offerCount = 1
30 |
31 | def setup() {
32 | stateMachine.reconciler >> reconciler
33 | config.hdfsRole >> "*"
34 | scheduler = new HdfsScheduler(config, state, stateMachine)
35 | }
36 |
37 | def "resource offers - reconciling"() {
38 | given:
39 | def constraints = Mock(HdfsMesosConstraints)
40 | scheduler.hdfsMesosConstraints = constraints
41 |
42 | def offers = []
43 | offers << createOffer()
44 |
45 | when:
46 | scheduler.resourceOffers(driver, offers)
47 |
48 | then:
49 | 1 * stateMachine.currentPhase >> AcquisitionPhase.RECONCILING_TASKS
50 |
51 |
52 | then:
53 | 1 * stateMachine.correctPhase()
54 | 1 * driver.declineOffer(_)
55 | }
56 |
57 | def "resource offers - all node types"() {
58 | given:
59 | def constraints = Mock(HdfsMesosConstraints)
60 | def launcher = Mock(NodeLauncher)
61 | scheduler.hdfsMesosConstraints = constraints
62 | scheduler.launcher = launcher
63 |
64 | def offers = []
65 | offers << createOffer()
66 |
67 | when:
68 | scheduler.resourceOffers(driver, offers)
69 |
70 | then:
71 | stateMachine.currentPhase >> phase
72 | 0 * stateMachine.correctPhase()
73 | constraints.constraintsAllow(_) >> true
74 |
75 | then:
76 | 1 * launcher.tryLaunch(_, _, { nodeType.call(it) })
77 |
78 | where:
79 | phase | nodeType
80 | AcquisitionPhase.JOURNAL_NODES | { node -> node instanceof JournalNode }
81 | AcquisitionPhase.DATA_NODES | { node -> node instanceof DataNode }
82 | AcquisitionPhase.NAME_NODES | { node -> node instanceof NameNode }
83 |
84 | }
85 |
86 | def "resource offers - exception on launch"() {
87 | given:
88 | def constraints = Mock(HdfsMesosConstraints)
89 | scheduler.hdfsMesosConstraints = constraints
90 | def launcher = Mock(NodeLauncher)
91 | scheduler.launcher = launcher
92 |
93 | def offers = []
94 | offers << createOffer()
95 |
96 | when:
97 | scheduler.resourceOffers(driver, offers)
98 |
99 | then:
100 | stateMachine.currentPhase >> AcquisitionPhase.JOURNAL_NODES
101 | constraints.constraintsAllow(_) >> true
102 |
103 | then:
104 | 1 * launcher.tryLaunch(*_) >> { throw new Exception("houston, we have a problem") }
105 |
106 | then:
107 | 1 * driver.declineOffer(_)
108 | }
109 |
110 | def "registered"() {
111 | given:
112 | def frameworkID = FrameworkInfoUtil.createFrameworkId("frameworkId")
113 |
114 | when:
115 | scheduler.registered(driver, frameworkID, null)
116 |
117 | then:
118 | state.frameworkId == frameworkID
119 | 1 * stateMachine.reconcile(driver)
120 | }
121 |
122 | def "declines offers it doesn't need"() {
123 | def constraints = Mock(HdfsMesosConstraints)
124 | scheduler.hdfsMesosConstraints = constraints
125 | def launcher = Mock(NodeLauncher)
126 | scheduler.launcher = launcher
127 |
128 | def offers = []
129 | 4.times {
130 | offers << createOffer()
131 | }
132 |
133 | when:
134 | scheduler.resourceOffers(driver, offers)
135 |
136 | then:
137 | constraints.constraintsAllow(_) >> true
138 | stateMachine.currentPhase >> AcquisitionPhase.DATA_NODES
139 | 1 * launcher.tryLaunch(*_) >> true
140 | 3 * driver.declineOffer(*_)
141 | }
142 |
143 | def createOffer() {
144 | return OfferBuilder.createOffer("framework", offerCount++ as String, "slave", "host")
145 | }
146 |
147 | def createTestOfferId(int instanceNumber) {
148 | return OfferBuilder.createOfferID("offer" + instanceNumber)
149 | }
150 | }
151 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/test/java/org/apache/mesos/hdfs/scheduler/InMemoryStateFactory.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import com.google.inject.Inject;
4 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
5 | import org.apache.mesos.state.InMemoryState;
6 | import org.apache.mesos.state.State;
7 |
8 | /**
9 | * Generates in-memory Mesos State abstractions.
10 | */
11 | public class InMemoryStateFactory implements StateFactory {
12 |
13 | @Inject
14 | public State create(String path, HdfsFrameworkConfig config) {
15 | return new InMemoryState();
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/test/java/org/apache/mesos/hdfs/scheduler/SchedulerConstraintsTest.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import com.google.common.collect.Lists;
4 | import com.google.inject.Guice;
5 | import com.google.inject.Injector;
6 | import org.apache.hadoop.conf.Configuration;
7 | import org.apache.mesos.Protos.Offer;
8 | import org.apache.mesos.Protos.TaskInfo;
9 | import org.apache.mesos.SchedulerDriver;
10 | import org.apache.mesos.hdfs.SchedulerModuleTest;
11 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
12 | import org.apache.mesos.hdfs.state.AcquisitionPhase;
13 | import org.apache.mesos.hdfs.state.HdfsState;
14 | import org.apache.mesos.hdfs.state.StateMachine;
15 | import org.apache.mesos.protobuf.AttributeUtil;
16 | import org.apache.mesos.protobuf.OfferBuilder;
17 | import org.apache.mesos.protobuf.ResourceBuilder;
18 | import org.junit.Before;
19 | import org.junit.Test;
20 | import org.mockito.ArgumentCaptor;
21 | import org.mockito.Captor;
22 | import org.mockito.Mock;
23 | import org.mockito.MockitoAnnotations;
24 |
25 | import java.util.Collection;
26 |
27 | import static org.mockito.Mockito.*;
28 |
29 | @SuppressWarnings("unchecked")
30 | public class SchedulerConstraintsTest {
31 | private final Injector injector = Guice.createInjector(new SchedulerModuleTest());
32 | private Configuration config = new Configuration();
33 | private HdfsFrameworkConfig hdfsConfig = new HdfsFrameworkConfig(config);
34 | private HdfsState state = injector.getInstance(HdfsState.class);
35 | private StateMachine stateMachine = createMockStateMachine(AcquisitionPhase.DATA_NODES);
36 |
37 | @Mock
38 | SchedulerDriver driver;
39 |
40 | @Captor
41 | ArgumentCaptor> taskInfosCapture;
42 |
43 | @Before
44 | public void setup() {
45 | MockitoAnnotations.initMocks(this);
46 | }
47 |
48 | @Test
49 | public void acceptOffersWithConstraintMatch() {
50 | config.set("mesos.hdfs.constraints", "zone:east");
51 | HdfsScheduler scheduler = createDefaultScheduler();
52 |
53 | Offer offer = createTestOfferBuilderWithResources(4, 5, 64 * 1024)
54 | .addAttribute(AttributeUtil.createTextAttribute("zone", "east")).build();
55 |
56 | scheduler.resourceOffers(driver, Lists.newArrayList(offer));
57 | verify(driver, times(1)).launchTasks(anyList(), taskInfosCapture.capture());
58 | }
59 |
60 | @Test
61 | public void declineOffersWithNoConstraintMatch() {
62 | config.set("mesos.hdfs.constraints", "zone:east");
63 | HdfsScheduler scheduler = createDefaultScheduler();
64 |
65 | Offer offer = createTestOfferBuilderWithResources(4, 5, 64 * 1024)
66 | .addAttribute(AttributeUtil.createTextAttribute("zone", "west")).build();
67 |
68 | scheduler.resourceOffers(driver, Lists.newArrayList(offer));
69 | verify(driver, times(1)).declineOffer(offer.getId());
70 | }
71 |
72 | @Test
73 | public void acceptOffersWithConstraintMatchSet() {
74 | config.set("mesos.hdfs.constraints", "zone:east");
75 | HdfsScheduler scheduler = createDefaultScheduler();
76 |
77 | Offer offer = createTestOfferBuilderWithResources(4, 5, 64 * 1024)
78 | .addAttribute(AttributeUtil.createTextAttributeSet("zone", "east")).build();
79 |
80 | scheduler.resourceOffers(driver, Lists.newArrayList(offer));
81 | verify(driver, times(1)).launchTasks(anyList(), taskInfosCapture.capture());
82 | }
83 |
84 | @Test
85 | public void acceptOffersWithConstraintMatchScalar() {
86 | config.set("mesos.hdfs.constraints", "CPU:3");
87 | HdfsScheduler scheduler = createDefaultScheduler();
88 |
89 | Offer offer = createTestOfferBuilderWithResources(4, 5, 64 * 1024)
90 | .addAttribute(AttributeUtil.createScalarAttribute("CPU", 3.5))
91 | .build();
92 |
93 | scheduler.resourceOffers(driver, Lists.newArrayList(offer));
94 | verify(driver, times(1)).launchTasks(anyList(), taskInfosCapture.capture());
95 | }
96 |
97 | @Test
98 | public void acceptOffersWithConstraintMatchMultiple() {
99 | config.set("mesos.hdfs.constraints", "CPU:2;ZONE:west");
100 | HdfsScheduler scheduler = createDefaultScheduler();
101 |
102 | Offer offer = createTestOfferBuilderWithResources(4, 5, 64 * 1024)
103 | .addAttribute(AttributeUtil.createTextAttributeSet("ZONE", "west,east,north"))
104 | .addAttribute(AttributeUtil.createTextAttribute("TYPE", "hi-end"))
105 | .addAttribute(AttributeUtil.createScalarAttribute("CPU", 3.5))
106 | .build();
107 |
108 | scheduler.resourceOffers(driver, Lists.newArrayList(offer));
109 | verify(driver, times(1)).launchTasks(anyList(), taskInfosCapture.capture());
110 | }
111 |
112 | @Test
113 | public void declineOffersWithNoConstraintMatchMultiple() {
114 | config.set("mesos.hdfs.constraints", "TYPE:low-end;ZONE:north");
115 | HdfsScheduler scheduler = createDefaultScheduler();
116 |
117 | Offer offer = createTestOfferBuilderWithResources(4, 5, 64 * 1024)
118 | .addAttribute(AttributeUtil.createTextAttributeSet("ZONE", "west,east,north"))
119 | .addAttribute(AttributeUtil.createTextAttribute("TYPE", "hi-end"))
120 | .addAttribute(AttributeUtil.createScalarAttribute("CPU", 3.5))
121 | .build();
122 |
123 | scheduler.resourceOffers(driver, Lists.newArrayList(offer));
124 | verify(driver, times(1)).declineOffer(offer.getId());
125 | }
126 |
127 | private HdfsScheduler createDefaultScheduler() {
128 | Reconciler reconciler = mock(Reconciler.class);
129 | when(stateMachine.getReconciler()).thenReturn(reconciler);
130 | return new HdfsScheduler(hdfsConfig, state, stateMachine);
131 | }
132 |
133 | @Test
134 | public void acceptOffersWithRangeConstraintSpecified() {
135 | config.set("mesos.hdfs.constraints", "DISKSIZE:500");
136 | HdfsScheduler scheduler = createDefaultScheduler();
137 |
138 | Offer offer = createTestOfferBuilderWithResources(4, 5, 64 * 1024)
139 | .addAttribute(AttributeUtil.createRangeAttribute("DISKSIZE", 100, 1000))
140 | .build();
141 |
142 | scheduler.resourceOffers(driver, Lists.newArrayList(offer));
143 | verify(driver, times(1)).launchTasks(anyList(), taskInfosCapture.capture());
144 | }
145 |
146 | @Test
147 | public void acceptOffersWithNoConstraintSpecified() {
148 | config.set("mesos.hdfs.constraints", "");
149 | HdfsScheduler scheduler = createDefaultScheduler();
150 |
151 | Offer offer = createTestOfferBuilderWithResources(4, 5, 64 * 1024)
152 | .addAttribute(AttributeUtil.createTextAttribute("zone", "east")).build();
153 |
154 | scheduler.resourceOffers(driver, Lists.newArrayList(offer));
155 | verify(driver, times(1)).launchTasks(anyList(), taskInfosCapture.capture());
156 | }
157 |
158 | private StateMachine createMockStateMachine(AcquisitionPhase phase) {
159 | StateMachine stateMachine = mock(StateMachine.class);
160 | when(stateMachine.getCurrentPhase()).thenReturn(phase);
161 | return stateMachine;
162 | }
163 |
164 | private OfferBuilder createTestOfferBuilderWithResources(
165 | int instanceNumber,
166 | double cpus,
167 | int mem) {
168 |
169 | ResourceBuilder resourceBuilder = new ResourceBuilder("*");
170 | return new OfferBuilder("offer" + instanceNumber, "framework1", "slave" + instanceNumber, "host" + instanceNumber)
171 | .addResource(resourceBuilder.createCpuResource(cpus))
172 | .addResource(resourceBuilder.createMemResource(mem));
173 | }
174 | }
175 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/test/java/org/apache/mesos/hdfs/scheduler/SchedulerTest.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.scheduler;
2 |
3 | import com.google.common.collect.Lists;
4 | import com.google.inject.Guice;
5 | import com.google.inject.Injector;
6 | import org.apache.mesos.Protos.Offer;
7 | import org.apache.mesos.Protos.TaskInfo;
8 | import org.apache.mesos.SchedulerDriver;
9 | import org.apache.mesos.hdfs.SchedulerModuleTest;
10 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
11 | import org.apache.mesos.hdfs.state.AcquisitionPhase;
12 | import org.apache.mesos.hdfs.state.HdfsState;
13 | import org.apache.mesos.hdfs.state.StateMachine;
14 | import org.apache.mesos.hdfs.util.HDFSConstants;
15 | import org.apache.mesos.protobuf.OfferBuilder;
16 | import org.apache.mesos.protobuf.ResourceBuilder;
17 | import org.junit.Before;
18 | import org.junit.Test;
19 | import org.mockito.ArgumentCaptor;
20 | import org.mockito.Captor;
21 | import org.mockito.Mock;
22 | import org.mockito.MockitoAnnotations;
23 |
24 | import java.io.IOException;
25 | import java.util.Collection;
26 | import java.util.Iterator;
27 | import java.util.concurrent.ExecutionException;
28 |
29 | import static org.junit.Assert.assertTrue;
30 | import static org.mockito.Mockito.*;
31 |
32 | @SuppressWarnings("unchecked")
33 | public class SchedulerTest {
34 | private final Injector injector = Guice.createInjector(new SchedulerModuleTest());
35 | private HdfsFrameworkConfig config = injector.getInstance(HdfsFrameworkConfig.class);
36 | private final int TARGET_JOURNAL_COUNT = config.getJournalNodeCount();
37 |
38 | @Mock
39 | SchedulerDriver driver;
40 |
41 | @Captor
42 | ArgumentCaptor> taskInfosCapture;
43 |
44 | @Before
45 | public void init() {
46 | MockitoAnnotations.initMocks(this);
47 | }
48 |
49 | @Test
50 | public void launchesOnlyNeededNumberOfJournalNodes()
51 | throws ClassNotFoundException, InterruptedException, ExecutionException, IOException {
52 | StateMachine stateMachine = createMockStateMachine(AcquisitionPhase.JOURNAL_NODES);
53 | HdfsState state = mock(HdfsState.class);
54 | when(state.getJournalCount()).thenReturn(TARGET_JOURNAL_COUNT);
55 |
56 | HdfsScheduler scheduler = new HdfsScheduler(config, state, stateMachine);
57 | scheduler.resourceOffers(driver, Lists.newArrayList(createTestOffer(0)));
58 | verify(driver, never()).launchTasks(anyList(), anyList());
59 | }
60 |
61 | @Test
62 | public void launchesNamenodes() {
63 | StateMachine stateMachine = createMockStateMachine(AcquisitionPhase.NAME_NODES);
64 | HdfsState state = mock(HdfsState.class);
65 | when(state.hostOccupied(any(String.class), matches(HDFSConstants.JOURNAL_NODE_ID))).thenReturn(true);
66 |
67 | HdfsScheduler scheduler = new HdfsScheduler(config, state, stateMachine);
68 | scheduler.resourceOffers(driver, Lists.newArrayList(createTestOffer(0)));
69 |
70 | verify(driver, times(1)).launchTasks(anyList(), taskInfosCapture.capture());
71 | assertTrue(taskInfosCapture.getValue().size() == 2);
72 |
73 | Iterator taskInfoIterator = taskInfosCapture.getValue().iterator();
74 | String firstTask = taskInfoIterator.next().getName();
75 | assertTrue(firstTask.contains(HDFSConstants.NAME_NODE_ID)
76 | || firstTask.contains(HDFSConstants.ZKFC_NODE_ID));
77 |
78 | String secondTask = taskInfoIterator.next().getName();
79 | assertTrue(secondTask.contains(HDFSConstants.NAME_NODE_ID)
80 | || secondTask.contains(HDFSConstants.ZKFC_NODE_ID));
81 | }
82 |
83 | @Test
84 | public void declinesOffersWithNotEnoughResources() {
85 | StateMachine stateMachine = createMockStateMachine(AcquisitionPhase.DATA_NODES);
86 | HdfsState state = injector.getInstance(HdfsState.class);
87 | HdfsScheduler scheduler = new HdfsScheduler(config, state, stateMachine);
88 |
89 | Offer offer = createTestOfferWithResources(0, 0.1, 64);
90 | scheduler.resourceOffers(driver, Lists.newArrayList(offer));
91 | verify(driver, times(1)).declineOffer(offer.getId());
92 | }
93 |
94 | private StateMachine createMockStateMachine(AcquisitionPhase phase) {
95 | Reconciler reconciler = mock(Reconciler.class);
96 | StateMachine stateMachine = mock(StateMachine.class);
97 | when(stateMachine.getCurrentPhase()).thenReturn(phase);
98 | when(stateMachine.getReconciler()).thenReturn(reconciler);
99 | return stateMachine;
100 | }
101 |
102 | private Offer createTestOfferWithResources(int instanceNumber, double cpus, int mem) {
103 | ResourceBuilder resourceBuilder = new ResourceBuilder("*");
104 | return new OfferBuilder("offer" + instanceNumber, "framework1", "slave" + instanceNumber, "host" + instanceNumber)
105 | .addResource(resourceBuilder.createCpuResource(cpus))
106 | .addResource(resourceBuilder.createMemResource(mem))
107 | .build();
108 | }
109 |
110 | private Offer createTestOffer(int instanceNumber) {
111 | return new OfferBuilder("offer" + instanceNumber, "framework1", "slave" + instanceNumber, "host" + instanceNumber).build();
112 | }
113 | }
114 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/test/java/org/apache/mesos/hdfs/state/HdfsStateSpec.groovy:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.state
2 |
3 | import com.google.inject.Guice
4 | import org.apache.mesos.Protos
5 | import org.apache.mesos.hdfs.SchedulerModuleTest
6 | import org.apache.mesos.hdfs.scheduler.Task
7 | import org.apache.mesos.hdfs.util.HDFSConstants
8 | import org.apache.mesos.protobuf.ExecutorInfoBuilder
9 | import org.apache.mesos.protobuf.FrameworkInfoUtil
10 | import org.apache.mesos.protobuf.OfferBuilder
11 | import spock.lang.Shared
12 | import spock.lang.Specification
13 |
14 | import java.security.SecureRandom
15 |
16 | /**
17 | *
18 | *
19 | */
20 | class HdfsStateSpec extends Specification {
21 | def injector = Guice.createInjector(new SchedulerModuleTest())
22 | static final String TEST_HOST = "host"
23 | static final String TEST_TYPE = "type"
24 | static final String TEST_NAME = "name"
25 | static final String testIdName = "framework"
26 |
27 | @Shared
28 | SecureRandom random = new SecureRandom()
29 |
30 | def "setting frameworkID"() {
31 | given:
32 | def state = injector.getInstance(HdfsState.class)
33 | def expectedId = createFrameworkId()
34 | state.setFrameworkId(expectedId)
35 |
36 | expect:
37 | expectedId == state.getFrameworkId()
38 | }
39 |
40 | def "retrieves the same record from storage"() {
41 | given:
42 | def state = injector.getInstance(HdfsState.class);
43 | def tasks = state.getTasks();
44 |
45 | expect:
46 | tasks.size() == 0
47 |
48 | when:
49 | def inTask = createTask(TEST_NAME);
50 | state.recordTask(inTask);
51 | tasks = state.getTasks();
52 |
53 | then:
54 | tasks.size() == 1
55 | with(tasks.get(0)) { task ->
56 | task.status == inTask.status
57 | task.offer == inTask.offer
58 | task.type == inTask.type
59 | task.name == inTask.name
60 | }
61 | }
62 |
63 | def "update label none to label task"() {
64 | given:
65 | HdfsState state = injector.getInstance(HdfsState.class)
66 | Task task1 = createTask(TEST_NAME)
67 |
68 | def status = createTaskStatusWithLabel(task1.id, Protos.TaskState.TASK_RUNNING, "value")
69 | task1.status = status
70 | state.recordTask(task1)
71 |
72 | when:
73 | state.update(null, status)
74 |
75 | then:
76 | state.getTasks().get(0).status == status
77 | }
78 |
79 |
80 | def "update label to label task"() {
81 | given:
82 | HdfsState state = injector.getInstance(HdfsState.class)
83 | Task task1 = createTask(TEST_NAME)
84 |
85 | def status1 = createTaskStatusWithLabel(task1.getId(), Protos.TaskState.TASK_RUNNING, "value1")
86 | def status2 = createTaskStatusWithLabel(task1.getId(), Protos.TaskState.TASK_RUNNING, "value2")
87 | task1.status = status1
88 | state.recordTask(task1)
89 |
90 | when:
91 | state.update(null, status2)
92 |
93 | then:
94 | Task outTask = state.getTasks().get(0)
95 | outTask.status == status2
96 | }
97 |
98 | def "update label tasks"() {
99 | given:
100 | HdfsState state = injector.getInstance(HdfsState.class)
101 | Task task1 = createTask(TEST_NAME)
102 |
103 | def status1 = createTaskStatusWithLabel(task1.getId(), Protos.TaskState.TASK_RUNNING, value1)
104 | def status2 = createTaskStatusWithLabel(task1.getId(), Protos.TaskState.TASK_RUNNING, value2)
105 | task1.status = status1
106 | state.recordTask(task1)
107 |
108 | when:
109 | state.update(null, status2)
110 |
111 | then:
112 | Task outTask = state.getTasks().get(0)
113 | outTask.status == status1Valid ? status1 : status2
114 |
115 | where:
116 | value1 | value2 | status1Valid
117 | "value1" | "value2" | false
118 | "value1" | null | true
119 | }
120 |
121 |
122 | def createTaskStatusWithLabel(Protos.TaskID taskID, Protos.TaskState state, String value) {
123 | def status = createTaskStatus(taskID, state)
124 | if (!value) {
125 | return status
126 | }
127 | def builder = Protos.TaskStatus.newBuilder(status)
128 | return builder.setLabels(Protos.Labels.newBuilder()
129 | .addLabels(Protos.Label.newBuilder()
130 | .setKey(HDFSConstants.NN_STATUS_KEY)
131 | .setValue(value)))
132 | .build()
133 | }
134 |
135 | def createTaskStatus(Protos.TaskID taskID, Protos.TaskState state) {
136 | return Protos.TaskStatus.newBuilder()
137 | .setTaskId(taskID)
138 | .setState(state)
139 | .setSlaveId(Protos.SlaveID.newBuilder().setValue("slave").build())
140 | .setMessage("From Test")
141 | .build()
142 | }
143 |
144 | def createTask(String name) {
145 | def resources = createResourceList()
146 | def execInfo = createExecutorInfo()
147 | def offer = createOffer()
148 | def taskIdName = createTaskIdName()
149 | return new Task(resources, execInfo, offer, name, TEST_TYPE, taskIdName)
150 | }
151 |
152 | def createResourceList() {
153 | def r = Protos.Resource.newBuilder()
154 | .setName("name")
155 | .setType(Protos.Value.Type.SCALAR)
156 | .setScalar(Protos.Value.Scalar.newBuilder()
157 | .setValue(1).build())
158 | .setRole("role")
159 | .build()
160 |
161 | def resources = new ArrayList()
162 | resources.add(r)
163 | return resources
164 | }
165 |
166 | def createExecutorInfo() {
167 | return Protos.ExecutorInfo
168 | .newBuilder()
169 | .setExecutorId(ExecutorInfoBuilder.createExecutorId("executor"))
170 | .setCommand(
171 | Protos.CommandInfo
172 | .newBuilder()
173 | .addAllUris(
174 | Arrays.asList(
175 | Protos.CommandInfo.URI
176 | .newBuilder()
177 | .setValue("http://test_url/")
178 | .build())))
179 | .build()
180 | }
181 |
182 | def createOffer() {
183 | return OfferBuilder.createOffer("framework", "offer", "slave", TEST_HOST);
184 | }
185 |
186 | def createTestOfferId(int instanceNumber) {
187 | return OfferBuilder.createOfferID("offer" + instanceNumber);
188 | }
189 |
190 | def createTaskIdName() {
191 | return "taskIdName_" + new BigInteger(130, random).toString(32)
192 | }
193 |
194 | def createFrameworkId() {
195 | return FrameworkInfoUtil.createFrameworkId(testIdName)
196 | }
197 |
198 | }
199 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/test/java/org/apache/mesos/hdfs/state/HdfsStateTest.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.state;
2 |
3 | import com.google.inject.Guice;
4 | import com.google.inject.Injector;
5 | import org.apache.mesos.Protos.ExecutorInfo;
6 | import org.apache.mesos.Protos.Offer;
7 | import org.apache.mesos.Protos.Resource;
8 | import org.apache.mesos.Protos.TaskState;
9 | import org.apache.mesos.Protos.TaskStatus;
10 | import org.apache.mesos.hdfs.SchedulerModuleTest;
11 | import org.apache.mesos.hdfs.scheduler.Task;
12 | import org.apache.mesos.hdfs.util.HDFSConstants;
13 | import org.apache.mesos.hdfs.util.TaskStatusFactory;
14 | import org.apache.mesos.protobuf.CommandInfoBuilder;
15 | import org.apache.mesos.protobuf.ExecutorInfoBuilder;
16 | import org.apache.mesos.protobuf.OfferBuilder;
17 | import org.apache.mesos.protobuf.ResourceBuilder;
18 | import org.apache.mesos.protobuf.TaskStatusBuilder;
19 | import org.junit.Test;
20 |
21 | import java.io.IOException;
22 | import java.math.BigInteger;
23 | import java.security.SecureRandom;
24 | import java.util.ArrayList;
25 | import java.util.List;
26 | import java.util.concurrent.ExecutionException;
27 |
28 | import static org.junit.Assert.*;
29 |
30 | public class HdfsStateTest {
31 | private final Injector injector = Guice.createInjector(new SchedulerModuleTest());
32 | private SecureRandom random = new SecureRandom();
33 | private static final String testIdName = "framework";
34 | private static final String TEST_HOST = "host";
35 | private static final String TEST_TYPE = "type";
36 | private static final String TEST_NAME = "name";
37 |
38 |
39 | @Test
40 | public void testTerminalStatusUpdate()
41 | throws ClassNotFoundException, IOException, InterruptedException, ExecutionException {
42 | HdfsState state = injector.getInstance(HdfsState.class);
43 | Task inTask = createTask();
44 | state.recordTask(inTask);
45 |
46 | TaskStatus status = createTaskStatus(inTask.getId().getValue(), TaskState.TASK_FAILED);
47 | state.update(null, status);
48 | List tasks = state.getTasks();
49 | assertEquals(0, tasks.size());
50 | }
51 |
52 | @Test
53 | public void testNonTerminalStatusUpdate()
54 | throws ClassNotFoundException, IOException, InterruptedException, ExecutionException {
55 | HdfsState state = injector.getInstance(HdfsState.class);
56 | Task inTask = createTask();
57 | state.recordTask(inTask);
58 |
59 | TaskStatus status = createTaskStatus(inTask.getId().getValue(), TaskState.TASK_RUNNING);
60 | state.update(null, status);
61 | List tasks = state.getTasks();
62 | assertEquals(1, tasks.size());
63 |
64 | Task outTask = tasks.get(0);
65 | assertEquals(status, outTask.getStatus());
66 | }
67 |
68 | @Test
69 | public void testHostOccupied()
70 | throws ClassNotFoundException, IOException, InterruptedException, ExecutionException {
71 | HdfsState state = createDefaultState();
72 | assertFalse(state.hostOccupied("wrong_host", TEST_TYPE));
73 | assertFalse(state.hostOccupied(TEST_HOST, "wrong_type"));
74 | assertFalse(state.hostOccupied("wrong_host", "wrong_type"));
75 | assertTrue(state.hostOccupied(TEST_HOST, TEST_TYPE));
76 | }
77 |
78 | @Test
79 | public void testGetNameNodeTasks()
80 | throws ClassNotFoundException, IOException, InterruptedException, ExecutionException {
81 | HdfsState state = injector.getInstance(HdfsState.class);
82 | Task inTask = createNameNodeTask();
83 | state.recordTask(inTask);
84 |
85 | List nameTasks = state.getNameNodeTasks();
86 | assertEquals(1, nameTasks.size());
87 |
88 | List journalTasks = state.getJournalNodeTasks();
89 | assertEquals(0, journalTasks.size());
90 | }
91 |
92 | @Test
93 | public void testGetJournalNodeTasks()
94 | throws ClassNotFoundException, IOException, InterruptedException, ExecutionException {
95 | HdfsState state = injector.getInstance(HdfsState.class);
96 | Task inTask = createJournalNodeTask();
97 | state.recordTask(inTask);
98 |
99 | List journalTasks = state.getJournalNodeTasks();
100 | assertEquals(1, journalTasks.size());
101 |
102 | List nameTasks = state.getNameNodeTasks();
103 | assertEquals(0, nameTasks.size());
104 | }
105 |
106 | @Test
107 | public void testNameNodesInitialized()
108 | throws ClassNotFoundException, IOException, InterruptedException, ExecutionException {
109 | HdfsState state = injector.getInstance(HdfsState.class);
110 | assertFalse(state.nameNodesInitialized());
111 |
112 | Task namenode1Task = createNameNodeTask();
113 | Task namenode2Task = createNameNodeTask();
114 | state.recordTask(namenode1Task);
115 | state.recordTask(namenode2Task);
116 |
117 | TaskStatus status1 = TaskStatusFactory.createNameNodeStatus(namenode1Task.getId(), true);
118 | TaskStatus status2 = TaskStatusFactory.createNameNodeStatus(namenode2Task.getId(), true);
119 |
120 | state.update(null, status1);
121 | assertFalse(state.nameNodesInitialized());
122 |
123 | state.update(null, status2);
124 | assertTrue(state.nameNodesInitialized());
125 | }
126 |
127 | private HdfsState createDefaultState()
128 | throws ClassNotFoundException, IOException, InterruptedException, ExecutionException {
129 | HdfsState state = injector.getInstance(HdfsState.class);
130 | Task inTask = createTask();
131 | state.recordTask(inTask);
132 | return state;
133 | }
134 |
135 | private Task createTask() {
136 | return createTask(TEST_NAME);
137 | }
138 |
139 | private Task createNameNodeTask() {
140 | return createTask(HDFSConstants.NAME_NODE_ID);
141 | }
142 |
143 | private Task createJournalNodeTask() {
144 | return createTask(HDFSConstants.JOURNAL_NODE_ID);
145 | }
146 |
147 | private Task createTask(String name) {
148 | List resources = createResourceList();
149 | ExecutorInfo execInfo = createExecutorInfo();
150 | Offer offer = createOffer();
151 | String taskIdName = createTaskIdName();
152 | return new Task(resources, execInfo, offer, name, TEST_TYPE, taskIdName);
153 | }
154 |
155 | public String createTaskIdName() {
156 | return "taskIdName_" + new BigInteger(130, random).toString(32);
157 | }
158 |
159 | private List createResourceList() {
160 | Resource r = ResourceBuilder.createScalarResource("name", 1, "role");
161 | List resources = new ArrayList();
162 | resources.add(r);
163 | return resources;
164 | }
165 |
166 | private TaskStatus createTaskStatus(String taskId, TaskState state) {
167 | return TaskStatusBuilder.createTaskStatus(taskId, "slave", state, "From Test");
168 | }
169 |
170 |
171 | private ExecutorInfo createExecutorInfo() {
172 |
173 | ExecutorInfoBuilder builder = new ExecutorInfoBuilder("executor", "executor");
174 | builder.addCommandInfo(new CommandInfoBuilder()
175 | .addUri("http://test_url/")
176 | .build());
177 | return builder.build();
178 | }
179 |
180 | private Offer createOffer() {
181 | return new OfferBuilder("offer1", "framework", "slave", TEST_HOST).build();
182 | }
183 | }
184 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/test/java/org/apache/mesos/hdfs/state/StateMachineTest.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.hdfs.state;
2 |
3 | import com.google.inject.Guice;
4 | import com.google.inject.Injector;
5 | import org.apache.mesos.hdfs.SchedulerModuleTest;
6 | import org.apache.mesos.hdfs.config.HdfsFrameworkConfig;
7 | import org.apache.mesos.hdfs.scheduler.Reconciler;
8 | import org.apache.mesos.hdfs.util.HDFSConstants;
9 | import org.junit.Test;
10 |
11 | import java.io.IOException;
12 | import java.util.concurrent.ExecutionException;
13 |
14 | import static org.junit.Assert.assertEquals;
15 | import static org.mockito.Mockito.mock;
16 | import static org.mockito.Mockito.when;
17 |
18 | public class StateMachineTest {
19 | private final Injector injector = Guice.createInjector(new SchedulerModuleTest());
20 | private final HdfsFrameworkConfig config = injector.getInstance(HdfsFrameworkConfig.class);
21 |
22 | private final int TARGET_JOURNAL_COUNT = config.getJournalNodeCount();
23 | private final int TARGET_NAME_COUNT = HDFSConstants.TOTAL_NAME_NODES;
24 | private final Reconciler completeReconciler = createMockReconciler(true);
25 | private final Reconciler incompleteReconciler = createMockReconciler(false);
26 |
27 | @Test
28 | public void testInitialCorrectPhase() {
29 | StateMachine sm = createStateMachine(incompleteReconciler);
30 | assertEquals(AcquisitionPhase.RECONCILING_TASKS, sm.getCurrentPhase());
31 | }
32 |
33 | @Test
34 | public void testStayInReconciliationIfIncomplete() {
35 | StateMachine sm = createStateMachine(incompleteReconciler);
36 | sm.correctPhase();
37 | assertEquals(AcquisitionPhase.RECONCILING_TASKS, sm.getCurrentPhase());
38 | }
39 |
40 | @Test
41 | public void testTransitionFromReconcilingToJournal() {
42 | StateMachine sm = createStateMachine(completeReconciler);
43 | sm.correctPhase();
44 | assertEquals(AcquisitionPhase.JOURNAL_NODES, sm.getCurrentPhase());
45 | }
46 |
47 | @Test
48 | public void testStayInJournalIfTooFew()
49 | throws ClassNotFoundException, InterruptedException, ExecutionException, IOException {
50 | HdfsState state = createMockState(0, 0, false);
51 | StateMachine sm = createStateMachine(state, completeReconciler);
52 | sm.correctPhase();
53 | assertEquals(AcquisitionPhase.JOURNAL_NODES, sm.getCurrentPhase());
54 |
55 | setMockState(state, TARGET_JOURNAL_COUNT - 1, 0, false);
56 | sm.correctPhase();
57 | assertEquals(AcquisitionPhase.JOURNAL_NODES, sm.getCurrentPhase());
58 | }
59 |
60 | @Test
61 | public void testTransitionFromJournalToName()
62 | throws ClassNotFoundException, InterruptedException, ExecutionException, IOException {
63 | HdfsState state = createMockState(0, 0, false);
64 | StateMachine sm = createStateMachine(state, completeReconciler);
65 | sm.correctPhase();
66 | assertEquals(AcquisitionPhase.JOURNAL_NODES, sm.getCurrentPhase());
67 |
68 | setMockState(state, TARGET_JOURNAL_COUNT, 0, false);
69 | sm.correctPhase();
70 | assertEquals(AcquisitionPhase.NAME_NODES, sm.getCurrentPhase());
71 | }
72 |
73 | @Test
74 | public void testStayInNameIfTooFew()
75 | throws ClassNotFoundException, InterruptedException, ExecutionException, IOException {
76 | HdfsState state = createMockState(TARGET_JOURNAL_COUNT, TARGET_NAME_COUNT - 1, false);
77 | StateMachine sm = createStateMachine(state, completeReconciler);
78 | sm.correctPhase();
79 | assertEquals(AcquisitionPhase.NAME_NODES, sm.getCurrentPhase());
80 | }
81 |
82 | @Test
83 | public void testStayInNameIfNotInitialized()
84 | throws ClassNotFoundException, InterruptedException, ExecutionException, IOException {
85 | HdfsState state = createMockState(TARGET_JOURNAL_COUNT, TARGET_NAME_COUNT, false);
86 | StateMachine sm = createStateMachine(state, completeReconciler);
87 | sm.correctPhase();
88 | assertEquals(AcquisitionPhase.NAME_NODES, sm.getCurrentPhase());
89 | }
90 |
91 | @Test
92 | public void testTransitionToData()
93 | throws ClassNotFoundException, InterruptedException, ExecutionException, IOException {
94 | HdfsState state = createMockState(TARGET_JOURNAL_COUNT, TARGET_NAME_COUNT, true);
95 | StateMachine sm = createStateMachine(state, completeReconciler);
96 | sm.correctPhase();
97 | assertEquals(AcquisitionPhase.DATA_NODES, sm.getCurrentPhase());
98 | }
99 |
100 | @Test
101 | public void testTransitionFromDataToReconciling()
102 | throws ClassNotFoundException, InterruptedException, ExecutionException, IOException {
103 | HdfsState state = createMockState(TARGET_JOURNAL_COUNT, TARGET_NAME_COUNT, true);
104 | Reconciler reconciler = createMockReconciler(true);
105 | StateMachine sm = createStateMachine(state, reconciler);
106 | sm.correctPhase();
107 | assertEquals(AcquisitionPhase.DATA_NODES, sm.getCurrentPhase());
108 |
109 | setMockReconciler(reconciler, false);
110 | sm.correctPhase();
111 | assertEquals(AcquisitionPhase.RECONCILING_TASKS, sm.getCurrentPhase());
112 | }
113 |
114 | private StateMachine createStateMachine(Reconciler reconciler) {
115 | return createStateMachine(
116 | injector.getInstance(HdfsState.class),
117 | reconciler);
118 | }
119 |
120 | private StateMachine createStateMachine(HdfsState state, Reconciler reconciler) {
121 | return new StateMachine(state, config, reconciler);
122 | }
123 |
124 | private HdfsState createMockState(int journalCount, int nameCount, boolean nameInitialized)
125 | throws ClassNotFoundException, InterruptedException, ExecutionException, IOException {
126 | HdfsState state = mock(HdfsState.class);
127 | return setMockState(state, journalCount, nameCount, nameInitialized);
128 | }
129 |
130 | private HdfsState setMockState(
131 | HdfsState state,
132 | int journalCount,
133 | int nameCount,
134 | boolean nameInitialized)
135 | throws ClassNotFoundException, InterruptedException, ExecutionException, IOException {
136 | when(state.getJournalCount()).thenReturn(journalCount);
137 | when(state.getNameCount()).thenReturn(nameCount);
138 | when(state.nameNodesInitialized()).thenReturn(nameInitialized);
139 | return state;
140 | }
141 |
142 | private Reconciler createMockReconciler(boolean complete) {
143 | Reconciler reconciler = mock(Reconciler.class);
144 | return setMockReconciler(reconciler, complete);
145 | }
146 |
147 | private Reconciler setMockReconciler(Reconciler reconciler, boolean complete) {
148 | when(reconciler.complete()).thenReturn(complete);
149 | return reconciler;
150 | }
151 | }
152 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/test/resources/gcs-credentials.json:
--------------------------------------------------------------------------------
1 | {
2 | "private_key_id": "blablahblah",
3 | "private_key": "-----BEGIN PRIVATE KEY-----\nMIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAMDVA29zEbjXxz1z\njtdKSJ8PP9olmBd5jGUCyqqUI+vWx9KnScApdaoAHQdxP12dJo24NSjNYuqua+Wl\nYuQQfBKphmBdlxm3YhKrPohMRw3/ApgZotDotnP2i+K/JBHJpHoOQu1OheOb5L5L\n0ypT+l48ZSUMAzYqahyiVQU0/zhFAgMBAAECgYEAo91gifjy+m0DdwkYPYN2qxQ+\nYpbH5Er6L2xr5QD2dZeTP0PBvHZ+8vQdtxFZk6fT92Kuafn/MOFbUb91hfEsPQSH\ngmooOFJbz/6zbtt9S6y55Yt4KNyD43tTiuxE9Hu83Y1MdNlr9/7bPtccMHgXqSwJ\nQZdayFDL+0r4OZ5/FBECQQDzBrygnlNwWghxAR714jZ3GMGS6fwqMscZdb4nY8+c\naXjZIWjlk0cWH1ruce4XLVe6QO+HZor1z1KK2R1FVXEXAkEAyyBPBbdl3QfYYzoO\n0vIX7kQIjPKCUBkRf5gJH6rHemTUL8VoXIOyCmFlvNuevAmH3Hb/Xa9xeQVN0eRb\n47VjAwJAJPmKa1mLUlWwYRkNj9Vp+fa/RM3qurTdC+eZFb8e0CpP46Esp3kf4KLG\nn+6fjdEtPr4wc0ZLsBhp84wS4wCb4wJALhA/m15BvWQgEDCEWBYKkz/eaIg+QQfO\nTg8eUo4Z+omPDN5JkmFTKMN5nOB5GM9YfgiGVKqKoxUu1qBgrjzeHQJAAUMU4psi\nDs/wClupcOYcKfPEMvxHNXRIKqB3ochxQ8KEs3TuHTMXuq69opPfhVcUP6rnGICM\nu73nK92VI2KjXg\u003d\u003d\n-----END PRIVATE KEY-----\n",
4 | "client_email": "lolollo@lolol.com",
5 | "client_id": "herpderp.apps.googleusercontent.com",
6 | "type": "service_account"
7 | }
8 |
--------------------------------------------------------------------------------
/hdfs-scheduler/src/test/resources/s3-credentials.json:
--------------------------------------------------------------------------------
1 | {
2 | "accessKey": "lol",
3 | "secretKey": "lolololol"
4 | }
5 |
--------------------------------------------------------------------------------
/mesos-commons/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: "jacoco"
2 |
3 | jacocoTestReport {
4 | reports {
5 | xml.enabled false
6 | csv.enabled false
7 | html.destination "${buildDir}/jacoco"
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/collections/MapUtil.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.collections;
2 |
3 | import com.google.common.base.Predicate;
4 | import com.google.common.collect.Maps;
5 |
6 | import java.util.HashMap;
7 | import java.util.Map;
8 | import java.util.Properties;
9 |
10 | /**
11 | */
12 | public class MapUtil {
13 |
14 | public static Map propertyMapFilter(Properties properties, Predicate predicate) {
15 | if (properties == null) {
16 | return new HashMap<>();
17 | }
18 | if (predicate == null) {
19 | return Maps.fromProperties(properties);
20 | }
21 |
22 | return Maps.filterKeys(Maps.fromProperties(properties), predicate);
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/collections/StartsWithPredicate.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.collections;
2 |
3 | import com.google.common.base.Predicate;
4 |
5 | /**
6 | * A Predicate for filtering a collection based on what a string starts with.
7 | */
8 | public class StartsWithPredicate implements Predicate {
9 |
10 | private String startWithString;
11 |
12 | public StartsWithPredicate(String startWithString) {
13 | this.startWithString = startWithString;
14 | }
15 |
16 | @Override
17 | public boolean apply(String s) {
18 | return s.startsWith(startWithString);
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/file/FileUtils.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.file;
2 |
3 | import org.slf4j.Logger;
4 | import org.slf4j.LoggerFactory;
5 |
6 | import java.io.File;
7 | import java.io.IOException;
8 |
9 | /**
10 | * Used for hdfs file system operations.
11 | */
12 | public final class FileUtils {
13 |
14 | private static final Logger LOG = LoggerFactory.getLogger(FileUtils.class);
15 |
16 | private FileUtils() {
17 | }
18 |
19 | public static void createDir(File dataDir) {
20 | if (dataDir.exists()) {
21 | LOG.info("data dir exits:" + dataDir);
22 | } else if (!dataDir.mkdirs()) {
23 | LOG.error("unable to create dir: " + dataDir);
24 | }
25 | }
26 |
27 | /**
28 | * Delete a file or directory.
29 | */
30 | public static boolean deleteDirectory(File fileToDelete) {
31 | boolean deleted = false;
32 |
33 | try {
34 | if (fileToDelete.isDirectory()) {
35 | org.apache.commons.io.FileUtils.deleteDirectory(fileToDelete);
36 | deleted = true;
37 | } else {
38 | LOG.error("File is not a directory: " + fileToDelete);
39 | }
40 | } catch (IOException e) {
41 | LOG.error("Unable to delete directory: " + fileToDelete);
42 | }
43 |
44 | return deleted;
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/process/FailureUtils.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.process;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 |
6 | /**
7 | * Failure utilities.
8 | */
9 | public class FailureUtils {
10 | private static final Log log = LogFactory.getLog(FailureUtils.class);
11 |
12 | @edu.umd.cs.findbugs.annotations.SuppressWarnings(
13 | value = "DM_EXIT",
14 | justification = "Framework components should fail fast sometimes.")
15 | public static void exit(String msg, Integer exitCode) {
16 | log.fatal(msg);
17 | System.exit(exitCode);
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/process/ProcessFailureHandler.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.process;
2 |
3 | /**
4 | * Process failure handler interface.
5 | */
6 | public interface ProcessFailureHandler {
7 | public void handle();
8 | }
9 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/process/ProcessUtil.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.process;
2 |
3 | import org.apache.commons.collections.CollectionUtils;
4 | import org.apache.mesos.stream.StreamUtil;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import java.io.IOException;
9 | import java.util.Arrays;
10 | import java.util.Map;
11 |
12 | /**
13 | * Helps to create processes for command lines commonly used in mesos.
14 | */
15 | public class ProcessUtil {
16 |
17 | private static final Logger LOG = LoggerFactory.getLogger(ProcessUtil.class);
18 |
19 | public static Process startCmd(String cmd) throws IOException {
20 | LOG.info(String.format("Starting process: %s", cmd));
21 | return startCmd("sh", "-c", cmd);
22 | }
23 |
24 | public static Process startCmd(String... cmd) throws IOException {
25 | LOG.info(String.format("Starting process: %s", Arrays.asList(cmd)));
26 | return startCmd(null, cmd);
27 | }
28 |
29 | public static Process startCmd(Map envMap, String... cmd) throws IOException {
30 | LOG.info(String.format("Starting process: %s", Arrays.asList(cmd)));
31 | ProcessBuilder processBuilder = new ProcessBuilder(cmd);
32 | setEnvironment(envMap, processBuilder);
33 | Process process = processBuilder.start();
34 | StreamUtil.redirectProcess(process);
35 | return process;
36 | }
37 |
38 | private static void setEnvironment(Map envMap, ProcessBuilder processBuilder) {
39 | if (envMap != null && CollectionUtils.isNotEmpty(envMap.keySet())) {
40 | for (Map.Entry env : envMap.entrySet()) {
41 | processBuilder.environment().put(env.getKey(), env.getValue());
42 | }
43 | }
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/process/ProcessWatcher.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.process;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 |
6 | /**
7 | * Invokes the specified handler on process exit.
8 | */
9 | public class ProcessWatcher {
10 | private final Log log = LogFactory.getLog(ProcessWatcher.class);
11 | private ProcessFailureHandler handler;
12 |
13 | public ProcessWatcher(ProcessFailureHandler handler) {
14 | this.handler = handler;
15 | }
16 |
17 | public void watch(final Process proc) {
18 | log.info("Watching process: " + proc);
19 |
20 | Runnable r = new Runnable() {
21 | public void run() {
22 | try {
23 | proc.waitFor();
24 | } catch (Exception ex) {
25 | log.error("Process excited with exception: " + ex);
26 | }
27 |
28 | log.error("Handling failure of process: " + proc);
29 | handler.handle();
30 | }
31 | };
32 |
33 | new Thread(r).start();
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/protobuf/AttributeUtil.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.protobuf;
2 |
3 | import org.apache.mesos.Protos;
4 |
5 | import java.util.ArrayList;
6 | import java.util.Arrays;
7 |
8 | /**
9 | * Utility class for creating attributes. This class reduces the overhead of protobuf and makes code
10 | * easier to read.
11 | */
12 | public class AttributeUtil {
13 |
14 | public static Protos.Attribute createScalarAttribute(String name, double value) {
15 | return Protos.Attribute.newBuilder()
16 | .setName(name)
17 | .setType(Protos.Value.Type.SCALAR)
18 | .setScalar(Protos.Value.Scalar.newBuilder().setValue(value).build())
19 | .build();
20 | }
21 |
22 | public static Protos.Attribute createRangeAttribute(String name, long begin, long end) {
23 | Protos.Value.Range range = Protos.Value.Range.newBuilder().setBegin(begin).setEnd(end).build();
24 | return Protos.Attribute.newBuilder()
25 | .setName(name)
26 | .setType(Protos.Value.Type.RANGES)
27 | .setRanges(Protos.Value.Ranges.newBuilder().addRange(range))
28 | .build();
29 | }
30 |
31 | public static Protos.Attribute createTextAttribute(String name, String value) {
32 | return Protos.Attribute.newBuilder()
33 | .setName(name)
34 | .setType(Protos.Value.Type.TEXT)
35 | .setText(Protos.Value.Text.newBuilder().setValue(value).build())
36 | .build();
37 | }
38 |
39 | public static Protos.Attribute createTextAttributeSet(String name, String values) {
40 | return Protos.Attribute.newBuilder()
41 | .setName(name)
42 | .setType(Protos.Value.Type.SET)
43 | .setSet(Protos.Value.Set.newBuilder().addAllItem(new ArrayList(Arrays.asList(values.split(",")))))
44 | .build();
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/protobuf/CommandInfoBuilder.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.protobuf;
2 |
3 | import org.apache.mesos.Protos;
4 |
5 | import java.util.List;
6 | import java.util.Map;
7 |
8 | /**
9 | * Builder class for working with protobufs. It includes 2 different approaches;
10 | * 1) static functions useful for developers that want helpful protobuf functions for CommandInfo.
11 | * 2) builder class
12 | * All builder classes provide access to the protobuf builder for capabilities beyond the included
13 | * helpful functions.
14 | *
15 | * This builds CommandInfo objects.
16 | */
17 | public class CommandInfoBuilder {
18 |
19 | private Protos.CommandInfo.Builder builder = Protos.CommandInfo.newBuilder();
20 | private EnvironmentBuilder environmentBuilder = new EnvironmentBuilder();
21 |
22 | public CommandInfoBuilder addUri(String uri) {
23 | builder.addUris(createCmdInfoUri(uri));
24 | return this;
25 | }
26 |
27 | public CommandInfoBuilder addEnvironmentVar(String key, String value) {
28 | environmentBuilder.addVariable(key, value);
29 | builder.setEnvironment(environmentBuilder.build());
30 | return this;
31 | }
32 |
33 | public CommandInfoBuilder addEnvironmentMap(Map envMap) {
34 | environmentBuilder.addVariable(envMap);
35 | builder.setEnvironment(environmentBuilder.build());
36 | return this;
37 | }
38 |
39 | public CommandInfoBuilder addUriList(List uriList) {
40 | builder.addAllUris(uriList);
41 | return this;
42 | }
43 |
44 | public CommandInfoBuilder setCommand(String cmd) {
45 | builder.setValue(cmd);
46 | return this;
47 | }
48 |
49 | public Protos.CommandInfo build() {
50 | return builder.build();
51 | }
52 |
53 | public Protos.CommandInfo.Builder builder() {
54 | return builder;
55 | }
56 |
57 | public static Protos.CommandInfo.Builder createCommandInfoBuilder() {
58 | return Protos.CommandInfo.newBuilder();
59 | }
60 |
61 | public static Protos.CommandInfo createCmdInfo(String cmd,
62 | List uriList,
63 | List executorEnvironment) {
64 | return createCommandInfoBuilder()
65 | .addAllUris(uriList)
66 | .setEnvironment(EnvironmentBuilder.createEnvironmentBuilder()
67 | .addAllVariables(executorEnvironment))
68 | .setValue(cmd)
69 | .build();
70 | }
71 |
72 | public static Protos.CommandInfo.URI createCmdInfoUri(String uri) {
73 | return Protos.CommandInfo.URI.newBuilder().setValue(uri).build();
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/protobuf/EnvironmentBuilder.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.protobuf;
2 |
3 | import org.apache.mesos.Protos;
4 |
5 | import java.util.ArrayList;
6 | import java.util.List;
7 | import java.util.Map;
8 |
9 | /**
10 | * Builder class for working with protobufs. It includes 2 different approaches;
11 | * 1) static functions useful for developers that want helpful protobuf functions for Environment.Builder.
12 | * 2) builder class
13 | * All builder classes provide access to the protobuf builder for capabilities beyond the included
14 | * helpful functions.
15 | *
16 | * This builds Environment objects usually used with ExecutorInfo.
17 | */
18 | public class EnvironmentBuilder {
19 |
20 | Protos.Environment.Builder builder = Protos.Environment.newBuilder();
21 |
22 | public EnvironmentBuilder addVariable(String key, String value) {
23 | builder.addVariables(createEnvironment(key, value));
24 | return this;
25 | }
26 |
27 | public EnvironmentBuilder addVariable(Map envMap) {
28 | builder.addAllVariables(createEnvironment(envMap));
29 | return this;
30 | }
31 |
32 |
33 | public Protos.Environment build() {
34 | return builder.build();
35 | }
36 |
37 | public Protos.Environment.Builder builder() {
38 | return builder;
39 | }
40 |
41 | public static Protos.Environment.Variable createEnvironment(String key, String value) {
42 | return Protos.Environment.Variable.newBuilder().setName(key).setValue(value).build();
43 | }
44 |
45 | public static List createEnvironment(Map envMap) {
46 | List list = new ArrayList<>(envMap.size());
47 | for (Map.Entry var : envMap.entrySet()) {
48 | list.add(createEnvironment(var.getKey(), var.getValue()));
49 | }
50 | return list;
51 | }
52 |
53 | public static Protos.Environment.Builder createEnvironmentBuilder() {
54 | return Protos.Environment.newBuilder();
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/protobuf/ExecutorInfoBuilder.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.protobuf;
2 |
3 | import org.apache.mesos.Protos;
4 |
5 | import java.util.List;
6 |
7 | /**
8 | * Builder class for working with protobufs. It includes 2 different approaches;
9 | * 1) static functions useful for developers that want helpful protobuf functions for ExecutorInfo.
10 | * 2) builder class
11 | * All builder classes provide access to the protobuf builder for capabilities beyond the included
12 | * helpful functions.
13 | *
14 | * This builds ExecutorInfo objects.
15 | */
16 | public class ExecutorInfoBuilder {
17 |
18 | private Protos.ExecutorInfo.Builder builder = Protos.ExecutorInfo.newBuilder();
19 |
20 | public ExecutorInfoBuilder(String executorId) {
21 | setExecutorId(executorId);
22 | }
23 |
24 | public ExecutorInfoBuilder(String executorId, String name) {
25 | this(executorId);
26 | setName(name);
27 | }
28 |
29 | public ExecutorInfoBuilder setExecutorId(String executorId) {
30 | builder.setExecutorId(createExecutorId(executorId));
31 | return this;
32 | }
33 |
34 | public ExecutorInfoBuilder setName(String name) {
35 | builder.setName(name);
36 | return this;
37 | }
38 |
39 | public ExecutorInfoBuilder addAllResources(List resourceList) {
40 | builder.addAllResources(resourceList);
41 | return this;
42 | }
43 |
44 | public ExecutorInfoBuilder addResource(Protos.Resource resource) {
45 | builder.addResources(resource);
46 | return this;
47 | }
48 |
49 | public ExecutorInfoBuilder addCommandInfo(Protos.CommandInfo commandInfo) {
50 | builder.setCommand(commandInfo);
51 | return this;
52 | }
53 |
54 | public Protos.ExecutorInfo build() {
55 | return builder.build();
56 | }
57 |
58 | public Protos.ExecutorInfo.Builder builder() {
59 | return builder;
60 | }
61 |
62 | public static Protos.ExecutorID createExecutorId(String executorId) {
63 | return Protos.ExecutorID.newBuilder().setValue(executorId).build();
64 | }
65 |
66 | public static Protos.ExecutorInfo.Builder createExecutorInfoBuilder() {
67 | return Protos.ExecutorInfo.newBuilder();
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/protobuf/FrameworkInfoUtil.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.protobuf;
2 |
3 | import org.apache.mesos.Protos;
4 |
5 | /**
6 | *
7 | * Utility class for creating frameworkID. This class reduces the overhead of protobuf and makes code
8 | * easier to read.
9 | */
10 | public class FrameworkInfoUtil {
11 |
12 | public static Protos.FrameworkID createFrameworkId(String name) {
13 | return Protos.FrameworkID.newBuilder().setValue(name).build();
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/protobuf/LabelBuilder.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.protobuf;
2 |
3 | import org.apache.mesos.Protos;
4 |
5 | /**
6 | * Builder class for working with protobufs. It includes 2 different approaches;
7 | * 1) static functions useful for developers that want helpful protobuf functions for Label.
8 | * 2) builder class
9 | * All builder classes provide access to the protobuf builder for capabilities beyond the included
10 | * helpful functions.
11 | *
12 | * This builds Label objects.
13 | */
14 | public class LabelBuilder {
15 |
16 | Protos.Labels.Builder builder = Protos.Labels.newBuilder();
17 |
18 | public LabelBuilder addLabel(String name, String value) {
19 | builder.addLabels(createLabel(name, value));
20 | return this;
21 | }
22 |
23 | public LabelBuilder addLabels(Protos.Labels labels) {
24 | builder.addAllLabels(labels.getLabelsList());
25 | return this;
26 | }
27 |
28 | public Protos.Labels build() {
29 | return builder.build();
30 | }
31 |
32 | public Protos.Labels.Builder builder() {
33 | return builder;
34 | }
35 |
36 | public static Protos.Label createLabel(String name, String value) {
37 | return Protos.Label.newBuilder().setKey(name).setValue(value).build();
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/protobuf/OfferBuilder.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.protobuf;
2 |
3 | import org.apache.mesos.Protos;
4 |
5 | import java.util.List;
6 |
7 | /**
8 | * Builder class for working with protobufs. It includes 2 different approaches;
9 | * 1) static functions useful for developers that want helpful protobuf functions for Offer.
10 | * 2) builder class
11 | * All builder classes provide access to the protobuf builder for capabilities beyond the included
12 | * helpful functions.
13 | *
14 | * This builds Offer objects.
15 | */
16 | public class OfferBuilder {
17 |
18 | private Protos.Offer.Builder builder = Protos.Offer.newBuilder();
19 |
20 | public OfferBuilder(String offerId, String frameworkId, String slaveId, String hostname) {
21 | setOfferId(offerId);
22 | setFrameworkId(frameworkId);
23 | setSlaveId(slaveId);
24 | setHostname(hostname);
25 | }
26 |
27 | public OfferBuilder setOfferId(String id) {
28 | builder.setId(createOfferID(id));
29 | return this;
30 | }
31 |
32 | public OfferBuilder setFrameworkId(String id) {
33 | builder.setFrameworkId(FrameworkInfoUtil.createFrameworkId(id));
34 | return this;
35 | }
36 |
37 | public OfferBuilder setSlaveId(String id) {
38 | builder.setSlaveId(SlaveUtil.createSlaveID(id));
39 | return this;
40 | }
41 |
42 | public OfferBuilder setHostname(String host) {
43 | builder.setHostname(host);
44 | return this;
45 | }
46 |
47 | public OfferBuilder addResource(Protos.Resource resource) {
48 | builder.addResources(resource);
49 | return this;
50 | }
51 |
52 | public OfferBuilder addAllResources(List resourceList) {
53 | builder.addAllResources(resourceList);
54 | return this;
55 | }
56 |
57 |
58 | public OfferBuilder addAttribute(Protos.Attribute attribute) {
59 | builder.addAttributes(attribute);
60 | return this;
61 | }
62 |
63 | public Protos.Offer build() {
64 | return builder.build();
65 | }
66 |
67 | /**
68 | * intentional leak for extensions beyond this builder.
69 | *
70 | * @return
71 | */
72 | public Protos.Offer.Builder builder() {
73 | return builder;
74 | }
75 |
76 | public static Protos.Offer createOffer(Protos.FrameworkID frameworkID,
77 | Protos.OfferID offerID, Protos.SlaveID slaveID, String hostname) {
78 | return Protos.Offer.newBuilder()
79 | .setId(offerID)
80 | .setFrameworkId(frameworkID)
81 | .setSlaveId(slaveID)
82 | .setHostname(hostname)
83 | .build();
84 | }
85 |
86 | public static Protos.Offer createOffer(String frameworkID, String offerID, String slaveID, String hostname) {
87 | return createOffer(FrameworkInfoUtil.createFrameworkId(frameworkID),
88 | createOfferID(offerID), SlaveUtil.createSlaveID(slaveID), hostname);
89 | }
90 |
91 | public static Protos.OfferID createOfferID(String offerID) {
92 | return Protos.OfferID.newBuilder().setValue(offerID).build();
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/protobuf/ResourceBuilder.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.protobuf;
2 |
3 | import org.apache.mesos.Protos.Resource;
4 | import org.apache.mesos.Protos.Value;
5 |
6 | /**
7 | * Builder class for working with protobufs. It includes 2 different approaches;
8 | * 1) static functions useful for developers that want helpful protobuf functions for Resource.
9 | * 2) builder class
10 | * All builder classes provide access to the protobuf builder for capabilities beyond the included
11 | * helpful functions.
12 | *
13 | * This builds Resource objects and provides some convenience functions for common resources.
14 | */
15 |
16 | public class ResourceBuilder {
17 | private String role;
18 | static final String DEFAULT_ROLE = "*";
19 |
20 |
21 | public ResourceBuilder(String role) {
22 | this.role = role;
23 | }
24 |
25 | public Resource createCpuResource(double value) {
26 | return cpus(value, role);
27 | }
28 |
29 | public Resource createMemResource(double value) {
30 | return mem(value, role);
31 | }
32 |
33 | public Resource createPortResource(long begin, long end) {
34 | return ports(begin, end, role);
35 | }
36 |
37 | public Resource createScalarResource(String name, double value) {
38 | return ResourceBuilder.createScalarResource(name, value, role);
39 | }
40 |
41 | public Resource createRangeResource(String name, long begin, long end) {
42 | return ResourceBuilder.createRangeResource(name, begin, end, role);
43 | }
44 |
45 | public static Resource createScalarResource(String name, double value, String role) {
46 | return Resource.newBuilder()
47 | .setName(name)
48 | .setType(Value.Type.SCALAR)
49 | .setScalar(Value.Scalar.newBuilder().setValue(value).build())
50 | .setRole(role)
51 | .build();
52 | }
53 |
54 | public static Resource createRangeResource(String name, long begin, long end, String role) {
55 | Value.Range range = Value.Range.newBuilder().setBegin(begin).setEnd(end).build();
56 | return Resource.newBuilder()
57 | .setName(name)
58 | .setType(Value.Type.RANGES)
59 | .setRanges(Value.Ranges.newBuilder().addRange(range))
60 | .build();
61 | }
62 |
63 | public static Resource cpus(double value, String role) {
64 | return createScalarResource("cpus", value, role);
65 | }
66 |
67 | public static Resource cpus(double value) {
68 | return cpus(value, DEFAULT_ROLE);
69 | }
70 |
71 | public static Resource mem(double value, String role) {
72 | return createScalarResource("mem", value, role);
73 | }
74 |
75 | public static Resource mem(double value) {
76 | return mem(value, DEFAULT_ROLE);
77 | }
78 |
79 | public static Resource ports(long begin, long end, String role) {
80 | return createRangeResource("ports", begin, end, role);
81 | }
82 |
83 | public static Resource ports(long begin, long end) {
84 | return ports(begin, end, DEFAULT_ROLE);
85 | }
86 | }
87 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/protobuf/SlaveUtil.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.protobuf;
2 |
3 | import org.apache.mesos.Protos;
4 |
5 | /**
6 | *
7 | * Utility class for working with slaves. This class reduces the overhead of protobuf and makes code
8 | * easier to read.
9 | */
10 | public class SlaveUtil {
11 |
12 | public static Protos.SlaveID createSlaveID(String slaveID) {
13 | return Protos.SlaveID.newBuilder().setValue(slaveID).build();
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/protobuf/TaskInfoBuilder.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.protobuf;
2 |
3 | import com.google.protobuf.ByteString;
4 | import org.apache.mesos.Protos;
5 |
6 | import java.util.List;
7 |
8 | /**
9 | * Builder class for working with protobufs. It includes 2 different approaches;
10 | * 1) static functions useful for developers that want helpful protobuf functions for TaskInfo.
11 | * 2) builder class
12 | * All builder classes provide access to the protobuf builder for capabilities beyond the included
13 | * helpful functions.
14 | *
15 | * This builds TaskInfo objects.
16 | */
17 | public class TaskInfoBuilder {
18 |
19 | Protos.TaskInfo.Builder builder = Protos.TaskInfo.newBuilder();
20 |
21 | // min required fields to create a taskInfo
22 | public TaskInfoBuilder(String taskId, String name, String slaveId) {
23 | setId(taskId);
24 | setName(name);
25 | setSlaveId(slaveId);
26 | }
27 |
28 | public TaskInfoBuilder setId(String taskId) {
29 | builder.setTaskId(TaskUtil.createTaskId(taskId));
30 | return this;
31 | }
32 |
33 | public TaskInfoBuilder setName(String name) {
34 | builder.setName(name);
35 | return this;
36 | }
37 |
38 | public TaskInfoBuilder setSlaveId(String slaveId) {
39 | builder.setSlaveId(SlaveUtil.createSlaveID(slaveId));
40 | return this;
41 | }
42 |
43 | public TaskInfoBuilder setExecutorInfo(Protos.ExecutorInfo executorInfo) {
44 | builder.setExecutor(executorInfo);
45 | return this;
46 | }
47 |
48 | public TaskInfoBuilder addAllResources(List resourceList) {
49 | builder.addAllResources(resourceList);
50 | return this;
51 | }
52 |
53 | public TaskInfoBuilder addResource(Protos.Resource resource) {
54 | builder.addResources(resource);
55 | return this;
56 | }
57 |
58 | public TaskInfoBuilder setData(String data) {
59 | builder.setData(ByteString.copyFromUtf8(data));
60 | return this;
61 | }
62 |
63 | public Protos.TaskInfo build() {
64 | return builder.build();
65 | }
66 |
67 | public Protos.TaskInfo.Builder builder() {
68 | return builder;
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/protobuf/TaskStatusBuilder.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.protobuf;
2 |
3 | import com.google.protobuf.ByteString;
4 | import org.apache.mesos.Protos;
5 |
6 | /**
7 | * Builder class for working with protobufs. It includes 2 different approaches;
8 | * 1) static functions useful for developers that want helpful protobuf functions for TaskStatus.
9 | * 2) builder class
10 | * All builder classes provide access to the protobuf builder for capabilities beyond the included
11 | * helpful functions.
12 | *
13 | * This builds TaskStatus objects.
14 | */
15 | public class TaskStatusBuilder {
16 |
17 | Protos.TaskStatus.Builder builder = createTaskStatusBuilder();
18 | LabelBuilder labelBuilder = new LabelBuilder();
19 |
20 | public TaskStatusBuilder() {
21 | }
22 |
23 | public TaskStatusBuilder(Protos.TaskStatus prototype) {
24 | builder = createTaskStatusBuilder(prototype);
25 | }
26 |
27 | public TaskStatusBuilder setTaskId(String taskId) {
28 | setTaskId(TaskUtil.createTaskId(taskId));
29 | return this;
30 | }
31 |
32 | public TaskStatusBuilder setTaskId(Protos.TaskID taskId) {
33 | builder.setTaskId(taskId);
34 | return this;
35 | }
36 |
37 | public TaskStatusBuilder setSlaveId(String slaveId) {
38 | builder.setSlaveId(SlaveUtil.createSlaveID(slaveId));
39 | return this;
40 | }
41 |
42 | public TaskStatusBuilder setState(Protos.TaskState state) {
43 | builder.setState(state);
44 | return this;
45 | }
46 |
47 | public TaskStatusBuilder setMessage(String message) {
48 | builder.setMessage(message);
49 | return this;
50 | }
51 |
52 | public TaskStatusBuilder addLabel(String key, String value) {
53 | labelBuilder.addLabel(key, value);
54 | builder.setLabels(labelBuilder.build());
55 | return this;
56 | }
57 |
58 | public TaskStatusBuilder setLabels(Protos.Labels labels) {
59 | labelBuilder.addLabels(labels);
60 | builder.setLabels(labelBuilder.build());
61 | return this;
62 | }
63 |
64 | public TaskStatusBuilder setData(ByteString data) {
65 | builder.setData(data);
66 | return this;
67 | }
68 |
69 | public static Protos.TaskStatus.Builder createTaskStatusBuilder() {
70 | return Protos.TaskStatus.newBuilder();
71 | }
72 |
73 | public static Protos.TaskStatus.Builder createTaskStatusBuilder(Protos.TaskStatus prototype) {
74 | return Protos.TaskStatus.newBuilder(prototype);
75 | }
76 |
77 | public static TaskStatusBuilder newBuilder() {
78 | return new TaskStatusBuilder();
79 | }
80 |
81 | public Protos.TaskStatus build() {
82 | return builder.build();
83 | }
84 |
85 | public Protos.TaskStatus.Builder builder() {
86 | return builder;
87 | }
88 |
89 | public static Protos.TaskStatus createTaskStatus(String taskId, Protos.TaskState state) {
90 | return createTaskStatus(TaskUtil.createTaskId(taskId), state);
91 | }
92 |
93 | public static Protos.TaskStatus createTaskStatus(Protos.TaskID taskId, Protos.TaskState state) {
94 | return new TaskStatusBuilder().setTaskId(taskId).setState(state).build();
95 | }
96 |
97 | public static Protos.TaskStatus createTaskStatus(String taskId, String slaveId,
98 | Protos.TaskState state, String message) {
99 | return createTaskStatus(TaskUtil.createTaskId(taskId), slaveId, state, message);
100 | }
101 |
102 | public static Protos.TaskStatus createTaskStatus(Protos.TaskID taskId, String slaveId,
103 | Protos.TaskState state, String message) {
104 | return new TaskStatusBuilder()
105 | .setTaskId(taskId)
106 | .setState(state)
107 | .setSlaveId(slaveId)
108 | .setMessage(message)
109 | .build();
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/protobuf/TaskUtil.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.protobuf;
2 |
3 | import org.apache.mesos.Protos;
4 |
5 | /**
6 | * Utility class for working with Tasks. This class reduces the overhead of protobuf and makes code
7 | * easier to read.
8 | */
9 | public class TaskUtil {
10 |
11 | public static Protos.TaskID createTaskId(String taskId) {
12 | return Protos.TaskID.newBuilder().setValue(taskId).build();
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/stream/StreamRedirect.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.stream;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 |
6 | import java.io.BufferedReader;
7 | import java.io.IOException;
8 | import java.io.InputStream;
9 | import java.io.InputStreamReader;
10 | import java.io.PrintStream;
11 | import java.nio.charset.Charset;
12 |
13 | /**
14 | * Can be used to redirect the STDOUT and STDERR of a started process. Used for the executors.
15 | */
16 | public class StreamRedirect implements Runnable {
17 | private final Log log = LogFactory.getLog(StreamRedirect.class);
18 |
19 | private InputStream stream;
20 | private PrintStream outputStream;
21 |
22 | public StreamRedirect(InputStream stream, PrintStream outputStream) {
23 | this.stream = stream;
24 | this.outputStream = outputStream;
25 | }
26 |
27 | public void run() {
28 | try {
29 | InputStreamReader streamReader = new InputStreamReader(stream, Charset.defaultCharset());
30 | BufferedReader streamBuffer = new BufferedReader(streamReader);
31 |
32 | String streamLine = null;
33 | while ((streamLine = streamBuffer.readLine()) != null) {
34 | outputStream.println(streamLine);
35 | }
36 | } catch (IOException ioe) {
37 | log.error("Stream redirect error", ioe);
38 | }
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/mesos-commons/src/main/java/org/apache/mesos/stream/StreamUtil.java:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.stream;
2 |
3 | import org.apache.commons.io.IOUtils;
4 |
5 | import java.io.InputStream;
6 | import java.io.OutputStream;
7 | import java.io.PrintStream;
8 | import java.net.Socket;
9 |
10 | /**
11 | * Provides Steam utility functions.
12 | */
13 | public class StreamUtil {
14 |
15 | /**
16 | * Redirects a process to STDERR and STDOUT for logging and debugging purposes.
17 | */
18 | public static void redirectProcess(Process process, PrintStream out, PrintStream err) {
19 | StreamRedirect stdoutRedirect = new StreamRedirect(process.getInputStream(), out);
20 | new Thread(stdoutRedirect).start();
21 | StreamRedirect stderrRedirect = new StreamRedirect(process.getErrorStream(), err);
22 | new Thread(stderrRedirect).start();
23 | }
24 |
25 | public static void redirectProcess(Process process) {
26 | redirectProcess(process, System.out, System.err);
27 | }
28 |
29 | public static void closeQuietly(Socket socket) {
30 | IOUtils.closeQuietly(socket);
31 | }
32 |
33 | public static void closeQuietly(InputStream input) {
34 | IOUtils.closeQuietly(input);
35 | }
36 |
37 | public static void closeQuietly(OutputStream output) {
38 | IOUtils.closeQuietly(output);
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/mesos-commons/src/test/java/org/apache/mesos/collections/MapUtilSpec.groovy:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.collections
2 |
3 | import spock.lang.Specification
4 |
5 | /**
6 | *
7 | */
8 | class MapUtilSpec extends Specification {
9 |
10 | def "filtering maps"() {
11 | given:
12 | def map = ["MESOS_BLAH": "MESOS_BLAH", "MESOS_BLAH2" : "MESOS_BLAH2", "RED_LEADER1" : "Tsui Choi "]
13 | def props = new Properties()
14 | props.putAll(map)
15 |
16 | expect:
17 | map.size() == 3
18 | MapUtil.propertyMapFilter(props, new StartsWithPredicate("MESOS")).size() == 2
19 | MapUtil.propertyMapFilter(props, new StartsWithPredicate("RED")).size() == 1
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/mesos-commons/src/test/java/org/apache/mesos/collections/StartsWithPredicateSpec.groovy:
--------------------------------------------------------------------------------
1 | package org.apache.mesos.collections
2 |
3 | import com.google.common.collect.Maps
4 | import spock.lang.Specification
5 |
6 | /**
7 | *
8 | */
9 | class StartsWithPredicateSpec extends Specification {
10 |
11 | def "predicate filter"() {
12 | given:
13 | def map = ["MESOS_BLAH": "MESOS_BLAH", "MESOS_BLAH2": "MESOS_BLAH2", "RED_LEADER1": "Tsui Choi "]
14 |
15 | expect:
16 | map.size() == 3
17 | Maps.filterKeys(map, new StartsWithPredicate("MESOS")).size() == 2
18 | Maps.filterKeys(map, new StartsWithPredicate("RED")).size() == 1
19 |
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | org.apache.mesos
8 | hdfs
9 | 0.1.5
10 |
11 |
12 |
13 | apache-releases
14 | Apache releases
15 | https://repository.apache.org/content/repositories/releases/
16 |
17 |
18 |
19 |
20 | UTF-8
21 |
22 | 1.7
23 | 0.21.1
24 | 1.7.10
25 | 1.1.2
26 | 2.5.0
27 | 9.2.2.v20140723
28 | 2.4
29 | 3.0
30 | 3.0
31 | 18.0
32 | 4.11
33 | 1.9.5
34 |
35 |
36 |
37 |
38 |
39 | org.apache.mesos
40 | mesos
41 | ${mesos.version}
42 |
43 |
44 |
45 | org.slf4j
46 | log4j-over-slf4j
47 | ${slf4j.version}
48 |
49 |
50 |
51 | org.slf4j
52 | jcl-over-slf4j
53 | ${slf4j.version}
54 |
55 |
56 |
57 | ch.qos.logback
58 | logback-classic
59 | ${logback-classic.version}
60 |
61 |
62 |
63 | org.apache.hadoop
64 | hadoop-common
65 | ${hadoop.version}
66 |
67 |
68 | log4j
69 | log4j
70 |
71 |
72 | org.slf4j
73 | slf4j-log4j12
74 |
75 |
76 | javax.servlet
77 | servlet-api
78 |
79 |
80 | commons-httpclient
81 | commons-httpclient
82 |
83 |
84 | net.java.dev.jets3t
85 | jets3t
86 |
87 |
88 |
89 |
90 |
91 | org.eclipse.jetty
92 | jetty-server
93 | ${jetty.version}
94 |
95 |
96 |
97 | joda-time
98 | joda-time
99 | ${joda-time.version}
100 |
101 |
102 |
103 | com.floreysoft
104 | jmte
105 | ${jmte.version}
106 |
107 |
108 |
109 | com.google.inject
110 | guice
111 | ${guice.version}
112 |
113 |
114 |
115 | com.google.guava
116 | guava
117 | ${guava.version}
118 |
119 |
120 |
121 | junit
122 | junit
123 | ${junit.version}
124 | test
125 |
126 |
127 |
128 | org.mockito
129 | mockito-all
130 | ${mockito.version}
131 | test
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 | maven-compiler-plugin
142 | 2.3.2
143 |
144 | ${java.abi}
145 | ${java.abi}
146 |
147 |
148 |
149 |
150 |
151 |
152 | org.apache.maven.plugins
153 | maven-shade-plugin
154 | 2.3
155 |
156 |
157 |
158 |
159 |
160 |
161 | com.googlecode.maven-java-formatter-plugin
162 | maven-java-formatter-plugin
163 | 0.4
164 |
165 | Mesosphere-JavaFormatter.xml
166 |
167 |
168 |
169 |
170 | format
171 |
172 |
173 |
174 |
175 |
176 | org.apache.maven.plugins
177 | maven-shade-plugin
178 |
179 |
180 | package
181 |
182 | shade
183 |
184 |
185 |
186 | false
187 |
188 | true
189 | uber
190 |
191 |
192 | *:*
193 |
194 |
195 |
196 |
197 |
198 | commons-logging:commons-logging
199 |
200 | **
201 |
202 |
203 |
204 | *:*
205 |
206 | META-INF/*.SF
207 | META-INF/*.DSA
208 | META-INF/*.RSA
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 | org.apache.maven.plugins
220 | maven-jar-plugin
221 | 2.5
222 |
223 |
224 |
225 | org.apache.mesos.hdfs.Main
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
--------------------------------------------------------------------------------
/settings.gradle:
--------------------------------------------------------------------------------
1 | rootProject.name = 'hdfs'
2 |
3 |
4 | include "mesos-commons"
5 | include "hdfs-commons"
6 | include "hdfs-scheduler"
7 | include "hdfs-executor"
8 |
--------------------------------------------------------------------------------