├── .gitignore ├── AUTHORS ├── ChangeLog ├── LICENSE ├── NOTICE ├── README.md ├── VERSION ├── buildoop ├── bin │ ├── .gitignore │ └── buildoop ├── conf │ ├── buildoop.conf │ └── log4jconfig.conf ├── doc │ ├── .gitignore │ ├── DEPENDENCIES │ ├── README │ ├── buildoop-howto-0.0.1.lyx │ ├── buildoop-howto-0.0.1.pdf │ └── recipe.json └── lib │ ├── .gitignore │ ├── EmbeddedHTTPServer.groovy │ ├── FileDownloader.groovy │ ├── FileExtracter.groovy │ ├── MainController.groovy │ ├── PackageBuilder.groovy │ ├── ParseOptions.groovy │ ├── RepositoryDownloader.groovy │ ├── RunCommand.groovy │ ├── SanityChecking.groovy │ └── SourceBuilder.groovy ├── conf ├── targets │ └── targets.conf └── tests │ ├── minimal.test │ ├── openbus-0.0.1.test │ └── ping.test ├── deploy ├── README ├── cm │ ├── .gitignore │ ├── README │ ├── puppet │ │ ├── manifests │ │ │ ├── cluster.pp │ │ │ ├── extdata │ │ │ │ └── site.csv │ │ │ └── site.pp │ │ └── modules │ │ │ └── hadoop-conf │ │ │ ├── files │ │ │ └── conf.openbus │ │ │ │ ├── _slaves │ │ │ │ ├── capacity-scheduler.xml │ │ │ │ ├── configuration.xsl │ │ │ │ ├── container-executor.cfg │ │ │ │ ├── core-site.xml │ │ │ │ ├── hadoop-env.cmd │ │ │ │ ├── hadoop-env.sh │ │ │ │ ├── hadoop-metrics.properties │ │ │ │ ├── hadoop-metrics2.properties │ │ │ │ ├── hadoop-policy.xml │ │ │ │ ├── hdfs-site.xml │ │ │ │ ├── log4j.properties │ │ │ │ ├── mapred-env.cmd │ │ │ │ ├── mapred-env.sh │ │ │ │ ├── mapred-queues.xml.template │ │ │ │ ├── mapred-site.xml │ │ │ │ ├── mapred-site.xml.template │ │ │ │ ├── slaves │ │ │ │ ├── ssl-client.xml.example │ │ │ │ ├── ssl-server.xml.example │ │ │ │ ├── yarn-env.cmd │ │ │ │ ├── yarn-env.sh │ │ │ │ └── yarn-site.xml │ │ │ └── manifests │ │ │ └── init.pp │ └── test-config │ │ ├── buildoop.repo │ │ ├── conf.openbus.basic │ │ ├── capacity-scheduler.xml │ │ ├── configuration.xsl │ │ ├── container-executor.cfg │ │ ├── core-site.xml │ │ ├── hadoop-env.cmd │ │ ├── hadoop-env.sh │ │ ├── hadoop-metrics.properties │ │ ├── hadoop-metrics2.properties │ │ ├── hadoop-policy.xml │ │ ├── hdfs-site.xml │ │ ├── log4j.properties │ │ ├── mapred-env.cmd │ │ ├── mapred-env.sh │ │ ├── mapred-site.xml │ │ ├── yarn-env.cmd │ │ ├── yarn-env.sh │ │ └── yarn-site.xml │ │ ├── conf.openbus.ha-failover-kerberos │ │ ├── capacity-scheduler.xml │ │ ├── configuration.xsl │ │ ├── container-executor.cfg │ │ ├── core-site.xml │ │ ├── hadoop-env.cmd │ │ ├── hadoop-env.sh │ │ ├── hadoop-metrics.properties │ │ ├── hadoop-metrics2.properties │ │ ├── hadoop-policy.xml │ │ ├── hdfs-site.xml │ │ ├── log4j.properties │ │ ├── mapred-env.cmd │ │ ├── mapred-env.sh │ │ ├── mapred-site.xml │ │ ├── security │ │ │ ├── HTTP.keytab │ │ │ ├── flume.keytab │ │ │ ├── hdfs.keytab │ │ │ ├── mapred.keytab │ │ │ ├── oozie.keytab │ │ │ ├── secret │ │ │ ├── vagrant.keytab │ │ │ ├── yarn.keytab │ │ │ └── zookeeper.keytab │ │ ├── yarn-env.cmd │ │ ├── yarn-env.sh │ │ └── yarn-site.xml │ │ └── conf.openbus.ha-failover │ │ ├── capacity-scheduler.xml │ │ ├── configuration.xsl │ │ ├── container-executor.cfg │ │ ├── core-site.xml │ │ ├── hadoop-env.cmd │ │ ├── hadoop-env.sh │ │ ├── hadoop-metrics.properties │ │ ├── hadoop-metrics2.properties │ │ ├── hadoop-policy.xml │ │ ├── hdfs-site.xml │ │ ├── log4j.properties │ │ ├── mapred-env.cmd │ │ ├── mapred-env.sh │ │ ├── mapred-site.xml │ │ ├── yarn-env.cmd │ │ ├── yarn-env.sh │ │ └── yarn-site.xml └── vm │ ├── .gitignore │ ├── README │ └── buildoop-cluster │ └── Vagrantfile ├── scripts ├── README.jenkins ├── README.vim ├── build-package.sh ├── create-recipe.sh ├── file-permission-checker.sh ├── home.ctags ├── java-head.env ├── run-buildoop-cluster.sh ├── run-buildoop.sh ├── run-repo-server ├── set-enviromet.env └── set-version-file.sh ├── set-buildoop-env ├── sit ├── .gitignore ├── README └── poc │ ├── test-client.groovy │ └── test-server.groovy └── toolchain ├── .gitignore ├── target-Centos-6.x ├── .gitignore ├── binutils │ └── binutils-2.20.51.bd ├── gcc │ └── gcc-4.4.7.bd ├── glibc │ └── glibc-2.12.bd └── rpm │ └── rpm-4.8.0.bd └── target-RedHat-6.x └── .gitignore /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | build 3 | *.swp 4 | cscope.files 5 | cscope.out 6 | tags 7 | .vagrant 8 | *~ 9 | remote 10 | recipes 11 | conf/bom 12 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Javi Roman 2 | Marcelo Valle 3 | -------------------------------------------------------------------------------- /ChangeLog: -------------------------------------------------------------------------------- 1 | 08-03-2014 Javi Roman 2 | 3 | * Added Kafka and Spark packages for Real Time, aka 4 | Speed Layer for Lambda Architecture. 5 | 6 | Spark is based in the upstream of BigTop 0.7.0. The 7 | kafka package is based on the [BIGTOP-989] pull request 8 | not included in the upstream. 9 | 10 | 26-02-2014 Javi Roman 11 | 12 | * Change RPM name nomenclature: 13 | 14 | toolName-toolVersion-distroTag_packageVersion.arch.rpm 15 | 16 | Examples: 17 | flume-1.4.0-openbus0.0.1_1.noarch.rpm 18 | flume-1.4.0-openbus0.0.1_2.noarch.rpm 19 | flume-1.4.0-openbus0.0.1_3.noarch.rpm 20 | 21 | hadoop-hdfs-datanode-2.2.0-openbus0.0.1_1.x86_64.rpm 22 | 23 | 29-01-2014 Javi Roman 24 | 25 | * Initial draft on github. 26 | 27 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Apache Buildoop 2 | 3 | Copyright 2014-2015 The Apache Software Foundation 4 | 5 | This product includes software developed under 6 | The Apache Software Foundation (http://www.apache.org/) license. 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Buildoop: Hadoop Ecosystem Builder Version 1.0 2 | ================================================ 3 | 4 | The Hadoop Ecosystem Builder -Buildoop- provides interoperable tools, metadata, 5 | and processes that enable the rapid, repeatable development of a Linux 6 | Hadoop based system. 7 | 8 | With Buildoop you can build a complete set of Hadoop ecosystem components based 9 | on RPM or DEB packages, make integration tests for this tools on a RedHat/CentOS, 10 | or Debian/Ubuntu virtual system, and maintain a set of configuration files for 11 | baremetal deployment. 12 | 13 | NEW VERSION 1.0 NOTES 14 | -------------------- 15 | Buildoop have receive a reestructuration of the code to isolate the core (builder and packager) and the recipes. 16 | The previous recipes has been moved to https://github.com/keedio/buildoopRecipes 17 | With these detachment we obtain an easier recipes versions maintenance. 18 | Also brings to the project the possibility to be used for build no-hadoop tools. 19 | 20 | Fundations 21 | ---------- 22 | The Buildoop is splitted in the following fundations: 23 | 24 | 1. A main command line program for metadata operations: **buildoop**. 25 | 2. A set of system integration tests: **SIT framework**. 26 | 3. A central repository for **baremetal deployment** configuration. 27 | 4. An external repository with the distribution **recipes**. 28 | 29 | Technology 30 | ---------- 31 | From the technology point of view Buildoop is based on: 32 | 33 | 1. Command line "buildoop" based on **Groovy**. 34 | 2. Packaging recipes based on **JSON**. 35 | 3. SIT Framework: based on Groovy test scripts, and **Vagrant** for 36 | virtual development enviroment. 37 | 38 | 39 | Folder scheme 40 | ------------- 41 | 42 | * buildoop: 43 | Main folder for Buildoop main controler. 44 | 45 | * conf: 46 | Buildoop configuration folder, BOM definitions, targets definitions. 47 | 48 | * deploy: 49 | Folder for deploy in VM and Baremetal systems. Based on Puppet and Chef. 50 | 51 | * sit: 52 | System Integration Testing tests for VM pseudo-cluster system. 53 | 54 | * toolchain: 55 | Tools for cross-compiling for diferent targets. 56 | 57 | HowTo 58 | ----- 59 | 60 | 1. Download Groovy binary: 61 | 62 | `wget http://dl.bintray.com/groovy/maven/groovy-binary-2.3.3.zip` 63 | 64 | 2. Clone the project: 65 | 66 | `git clone https://github.com/keedio/buildoop.git` 67 | 68 | 3. Set the enviroment: 69 | 70 | `cd buildoop && source set-buildoop-env` 71 | 72 | 4. In order to build some packages you need install some dependecies: 73 | 74 | `less buildoop/doc/DEPENDENCIES` 75 | 76 | 77 | 5. Usage examples: 78 | 79 | - List available distributions-versions in the external repository 80 | `buildoop -remoterepo https://github.com/keedio/buildoopRecipes` 81 | 82 | - Select a distribution-version and download it 83 | `buildoop -downloadrepo https://github.com/keedio/buildoopRecipes openbus-v1.0` 84 | 85 | - Build the whole ecosystem for the distribution openbus-v1.0: 86 | `buildoop openbus-v1.0 -build` 87 | 88 | - Build the zookeeper package for the distribuion openbus-v1.0: 89 | `buildoop openbus-v1.0 zookeeper -build` 90 | 91 | 6. For more commands: 92 | 93 | `less buildoop/doc/README` 94 | 95 | Read More 96 | --------- 97 | 98 | http://buildoop.github.io/ 99 | 100 | Pull request flow 101 | ------------------ 102 | 103 | Clone the repository from your project fork: 104 | 105 | `$ git clone https://github.com/keedio/buildoop.git` 106 | 107 | The clone has as default active branch "buildoop-v1-dev" 108 | 109 | `$ git branch 110 | * buildoop-v1-development` 111 | 112 | Yo have to make your changes in the "buildoop-v1-dev" branch. 113 | 114 | `$ git add .` 115 | 116 | `$ git commit -m "...."` 117 | 118 | `$ git push origin`1 119 | 120 | When you are ready to purpose a change to the original repository, you have 121 | to use the "Pull Request" button from GitHub interface. 122 | 123 | The point is the pull request have to go to the "buildoop-v1-dev" branch so the pull 124 | request revisor can check the change, pull to original "buildoop-v1-dev" branch, and 125 | the last step is to push this "development pull request" to the "buildoop-v1-master" branch. 126 | 127 | So the project has two branches: 128 | 129 | 1. The "buildoop-v1-master" branch: The deployable branch, only hard tested code and ready to use. 130 | 2. The "buildoop-v1-dev": Where the work is made and where the pull request has to make. 131 | 132 | 133 | Roadmap 134 | ------- 135 | 136 | | Feature | Desc | State | 137 | | ------------- |:-------------- | :-----:| 138 | | Core Engine |Core building engine | Done | 139 | | POM versioning | Simple BOM multi-versioning | Done | 140 | | Git repsotory | Download sources from GIT | Done | 141 | | Svn repsotory | Download sources from Subversion | Pending | 142 | | Code refactoring | More elegant code | Forever Pending | 143 | | Cross-Architecture | Cross build from different distributions | Pending | 144 | | DEB Support | Debian/Ubuntu Support | Pending | 145 | | Layers | Add/Modify features without modify the core folders | Pending | 146 | | SIT | System Integration Tests | Pending | 147 | 148 | -- 149 | Javi Roman 150 | Marcelo Valle 151 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | Buildoop v1.1 2 | -------------------------------------------------------------------------------- /buildoop/bin/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/buildoop/bin/.gitignore -------------------------------------------------------------------------------- /buildoop/bin/buildoop: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env groovy 2 | /* vim:set ts=4:sw=4:et:sts=4:ai:tw=80 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | 21 | /* 22 | * Grape for add maven repository dependencies to your classpath 23 | */ 24 | 25 | // Add maven LOG4J logger dependencies 26 | @Grab('log4j:log4j:1.2.17') 27 | 28 | import org.apache.log4j.* 29 | import groovy.util.logging.* 30 | import groovy.grape.Grape 31 | 32 | /** 33 | * Main class for Buildoop system. 34 | * 35 | * @author Javi Roman 36 | * 37 | */ 38 | @Log4j 39 | class Buildoop { 40 | final static def GENCONFIG = "buildoop/conf/buildoop.conf" 41 | def globalConfig 42 | def ROOT 43 | def wo = [] 44 | 45 | /** 46 | * Print header with provided text. 47 | * 48 | * @param textForHeader Text to be included in the header. 49 | * @param sizeOfHeader Number of characters in each row of header. 50 | */ 51 | def Buildoop() { 52 | loadConfigurations() 53 | def env = System.getenv() 54 | ROOT = env["BDROOT"] 55 | assert ROOT != null, 'Enviroment must not be null' 56 | } 57 | 58 | def loadConfigurations() { 59 | System.setProperty("log4j.ignoreTCL", "true"); 60 | System.setProperty("java.security.egd", "file:/dev/./urandom"); 61 | 62 | // load general configuration properties 63 | globalConfig = new ConfigSlurper(). \ 64 | parse(new File(GENCONFIG).toURL()) 65 | 66 | // load Log4j configuration properties 67 | def config = new ConfigSlurper(). \ 68 | parse(new File(globalConfig.buildoop.log4jconf).toURL()) 69 | 70 | PropertyConfigurator.configure(config.toProperties()) 71 | log.info "\n-- LOG4J Logging (${log.name}: ${log.class}) --" 72 | } 73 | 74 | def displayBanner() { 75 | println userMessage("INFO", "Buildoop: Hadoop Ecosystem Builder\n\n") 76 | } 77 | 78 | def createLogfolder() { 79 | new File(globalConfig.buildoop.logfolder).mkdir() 80 | } 81 | 82 | def createBuildFolders() { 83 | new File(globalConfig.buildoop.logfolder).mkdir() 84 | new File(globalConfig.buildoop.cache).mkdir() 85 | new File(globalConfig.buildoop.stamps).mkdir() 86 | new File(globalConfig.buildoop.downloads).mkdir() 87 | new File(globalConfig.buildoop.work).mkdir() 88 | new File(globalConfig.buildoop.bomdeploy).mkdir() 89 | } 90 | 91 | def userMessage(type, msg) { 92 | def ANSI_RESET = "0m" 93 | def ANSI_RED = "31;1m" 94 | def ANSI_GREEN = "32;1m" 95 | def ANSI_YELLOW = "33;1m" 96 | def ANSI_PURPLE = "35;1m" 97 | def ANSI_CYAN = "36;1m" 98 | def ANSI_BLUE = "34;1m" 99 | def CSI="\u001B[" 100 | 101 | def colors = ["OK":ANSI_GREEN, 102 | "ERROR":ANSI_RED, 103 | "WARNING":ANSI_YELLOW, 104 | "INFO":ANSI_BLUE] 105 | 106 | return CSI + colors[type] + msg + CSI + ANSI_RESET 107 | } 108 | 109 | static def main(args) { 110 | def buildoop = new Buildoop() 111 | 112 | buildoop.displayBanner() 113 | buildoop.createLogfolder() 114 | buildoop.createBuildFolders() 115 | 116 | String[] roots = [buildoop.globalConfig.buildoop.classfolder] 117 | def engine = new GroovyScriptEngine(roots) 118 | 119 | // 1. Parsing the command line 120 | log.info "Parsing command line" 121 | def ParseOptionsClass = engine.loadScriptByName('ParseOptions.groovy') 122 | def parseOptions = ParseOptionsClass.newInstance(buildoop) 123 | 124 | buildoop.wo = parseOptions.parseOpt(args) 125 | assert buildoop.wo != null, 'must not be null' 126 | 127 | // 2. Host tools and BOM file sanity checking 128 | log.info "Sanity checking bits" 129 | def SanityCheckingClass = engine.loadScriptByName('SanityChecking.groovy') 130 | def sanityChecking = SanityCheckingClass.newInstance(log) 131 | 132 | // 3. Real works with the working options from user 133 | log.info "Main controller" 134 | def MainControllerClass = engine.loadScriptByName('MainController.groovy') 135 | def mainController = MainControllerClass.newInstance(buildoop) 136 | } 137 | } 138 | 139 | -------------------------------------------------------------------------------- /buildoop/conf/buildoop.conf: -------------------------------------------------------------------------------- 1 | buildoop.log4jconf = "buildoop/conf/log4jconfig.conf" 2 | buildoop.classfolder = "buildoop/lib" 3 | buildoop.logfolder = "build/log" 4 | buildoop.cache = "build/cache" 5 | buildoop.stamps = "build/stamps" 6 | buildoop.downloads = "build/downloads" 7 | buildoop.work = "build/work" 8 | buildoop.recipes = "recipes" 9 | buildoop.toolchain = "buildoop/toolchain" 10 | buildoop.targetfiles = "conf/targets" 11 | buildoop.bomfiles = "conf/bom" 12 | buildoop.bomdeploy = "build/deploy" 13 | buildoop.bomdeploysrc = "build/deploy/%DIST/src" 14 | buildoop.bomdeploybin = "build/deploy/%DIST/bin" 15 | buildoop.remoterepodata = "remote" 16 | buildoop.version="Buildoop v1.1" 17 | buildoop.buildRetries = 1 18 | -------------------------------------------------------------------------------- /buildoop/conf/log4jconfig.conf: -------------------------------------------------------------------------------- 1 | log4j { 2 | // log file appender with file rotation. 3 | appender.scrlog = "org.apache.log4j.FileAppender" 4 | appender.scrlog = "org.apache.log4j.RollingFileAppender" 5 | appender."scrlog.MaxFileSize"="1MB" 6 | appender.'srclog.MaxBackupIndex'="1" 7 | appender."scrlog.layout"="org.apache.log4j.PatternLayout" 8 | appender."scrlog.layout.ConversionPattern"="%d %5p %r %c{1}: %m%n" 9 | appender."scrlog.file"="build/log/buildoop.log" 10 | 11 | // root logger level and appender attached. 12 | rootLogger="trace,scrlog" 13 | } 14 | -------------------------------------------------------------------------------- /buildoop/doc/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/buildoop/doc/.gitignore -------------------------------------------------------------------------------- /buildoop/doc/DEPENDENCIES: -------------------------------------------------------------------------------- 1 | CentOS 6.x based system: 2 | ----------------------- 3 | 4 | yum groupinstall "Development Tools" 5 | 6 | yum install -y fuse-devel fuse-libs lzo-devel openssl-devel ant ant-trax asciidoc \ 7 | python-devel fuse cmake libxml2-devel libxslt-devel cyrus-sasl-devel \ 8 | sqlite-devel openldap-devel mysql-devel python-setuptools 9 | 10 | 11 | Protobuf (for hadoop native library): 12 | ------------------------------------ 13 | (FIXME: This must be handled by the Buildoop dependency system in chroot enviroment) 14 | 15 | wget https://protobuf.googlecode.com/files/protobuf-2.5.0.tar.gz 16 | cd protobuf && ./configure && make && make install 17 | 18 | Maven 3.x: 19 | --------- 20 | 21 | wget ftp://ftp.pbone.net/mirror/ftp5.gwdg.de/pub/opensuse/repositories/Application:/Geo/CentOS_6/noarch/maven-3.0.4-2.2.noarch.rpm 22 | yum localinstall maven-3.0.4-2.2.noarch.rpm 23 | 24 | CMake 2.8.x 25 | ------------ 26 | 27 | wget ftp://ftp.pbone.net/mirror/atrpms.net/el6-x86_64/atrpms/testing/cmake-2.8.8-4.el6.x86_64.rpm 28 | yum localinstall cmake-2.8.8-4.el6.x86_64.rpm 29 | 30 | 31 | System environment: 32 | ------------------ 33 | 34 | Before use the buildoop command you have to set up the enviroment with something simmilar to: 35 | 36 | JAVA_HOME=/usr/java/jdk1.7.0_51/ 37 | GROOVY_HOME=/opt/groovy-2.2.1 38 | MAVEN_HOME=/usr/share/java/maven 39 | SCALA_HOME=/opt/scala-2.10.3 40 | 41 | export GRADLE_HOME GROOVY_HOME MAVEN_HOME SCALA_HOME 42 | export PATH=$PATH:$GROOVY_HOME/bin:$SCALA_HOME/bin:$JAVA_HOME/bin 43 | 44 | -------------------------------------------------------------------------------- /buildoop/doc/README: -------------------------------------------------------------------------------- 1 | usage: buildoop [options] | <[options]> 2 | 3 | General commands: 4 | ~~~~~~~~~~~~~~~~ 5 | 6 | $ buildoop -v, -version 7 | Show buildoop version information and git hash. 8 | 9 | $ buildoop -h, -help 10 | Simple help of buildroop commands. 11 | 12 | $ buildoop -t, -targets 13 | List availables targets. 14 | 15 | $ buildoop -b, -bom 16 | List all BOM files availables (previously downloaded with -downloadrepo) 17 | 18 | Remote recipes repository commands: 19 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 20 | 21 | $ buildoop -remoterepo 22 | 23 | List all available distributions and its versions in the external repository 24 | 25 | $ buildoop -downloadrepo 26 | 27 | Download the distribution chosen to start building 28 | 29 | BOM and package level commands: 30 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 31 | 32 | $ buildoop [package name] -i, -info 33 | 34 | Show general info of BOM package. Or info about package name. 35 | 36 | $ buildoop [package name] -b, -build 37 | 38 | Build all packages listed in BOM. Or build the package name. 39 | 40 | $ buildoop [package name] -c, -clean 41 | 42 | Clean all "build objects" of packages listed in BOM. 43 | Or all "build objects" of package name. 44 | 45 | $ buildoop [package name] -cleanall 46 | 47 | Clean all "build objects", metadata, and download files of packages 48 | listed in BOM. Or the same only for the package name. 49 | 50 | 51 | Examples: 52 | 53 | $ buildoop -version 54 | $ buildoop -info 55 | $ buildoop -targets 56 | $ buildoop -bom 57 | 58 | 59 | $ buildoop stable -info 60 | $ buildoop openbus-0.0.1 -build 61 | $ buildoop stable -clean 62 | 63 | $ buildoop openbus-0.0.1 hbase -build 64 | $ buildoop stable hadoop -info 65 | $ buildoop stable hadoop -build 66 | $ buildoop stable hadoop -clean 67 | 68 | 69 | Integration Tests level commands (Currently not implemented): 70 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | 72 | Examples: 73 | 74 | $ buildoop cluster -up 75 | $ buildoop cluster -halt 76 | $ buildoop cluster -deploy 77 | 78 | $ buildoop -tests 79 | openbus-tests 80 | other-tests 81 | minimal-tests 82 | 83 | $ buildoop openbus-tests -info 84 | Available tests for openbus set: 85 | hdfs-test 86 | zookeeper-test 87 | 88 | $ buildoop openbus-tests hdfs-test -info 89 | $ buildoop openbus-tests zookepper-test -info 90 | 91 | $ buildoop openbus-tests hdfs-test -run 92 | $ buildoop openbus-tests all-test -run 93 | 94 | 95 | Deployment level commands: 96 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 97 | 98 | TODO 99 | 100 | -------------------------------------------------------------------------------- /buildoop/doc/buildoop-howto-0.0.1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/buildoop/doc/buildoop-howto-0.0.1.pdf -------------------------------------------------------------------------------- /buildoop/doc/recipe.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-03/schema", 3 | "title": "Recipe", 4 | "description" : "Recipe Package JSON Schema", 5 | "type":"object", 6 | "required":false, 7 | "properties":{ 8 | "do_clean": { 9 | "type":"object", 10 | "required":false, 11 | "properties":{ 12 | "command": { 13 | "description": "Command for clean objects from building", 14 | "type":"string", 15 | "required":false 16 | } 17 | } 18 | }, 19 | "do_compile": { 20 | "type":"object", 21 | "required":false, 22 | "properties":{ 23 | "commands": { 24 | "description": "List of commands for compiling source code", 25 | "type":"array", 26 | "required":false, 27 | "items": 28 | { 29 | "type":"string", 30 | "required":false 31 | } 32 | } 33 | } 34 | }, 35 | "do_dependencies": { 36 | "type":"object", 37 | "required":false, 38 | "properties":{ 39 | "packages": { 40 | "description": "List of packages for build dependencies", 41 | "type":"array", 42 | "required":false, 43 | "items": 44 | { 45 | "type":"string", 46 | "required":false 47 | } 48 | } 49 | } 50 | }, 51 | "do_download": { 52 | "type":"object", 53 | "required":true, 54 | "properties":{ 55 | "src_md5sum": { 56 | "description": "MD5sum for the source code downloaded", 57 | "type":"string", 58 | "required":false 59 | }, 60 | "src_uri": { 61 | "description": "URI for download the source code", 62 | "type":"string", 63 | "required":false 64 | } 65 | } 66 | }, 67 | "do_fetch": { 68 | "type":"object", 69 | "required":true, 70 | "properties":{ 71 | "download_cmd": { 72 | "description": "Base command for download source code [wget, git, svn]", 73 | "type":"string", 74 | "type":"string", 75 | "required":false 76 | } 77 | } 78 | }, 79 | "do_info": { 80 | "type":"object", 81 | "required":true, 82 | "properties":{ 83 | "description": { 84 | "description": "Description of recipe", 85 | "type":"string", 86 | "required":false 87 | }, 88 | "filename": { 89 | "description": "Full filename of recipe", 90 | "type":"string", 91 | "required":false 92 | }, 93 | "homepage": { 94 | "description": "Home site for the tool", 95 | "type":"string", 96 | "required":false 97 | }, 98 | "license": { 99 | "description": "License type for the tool", 100 | "type":"string", 101 | "required":false 102 | } 103 | } 104 | }, 105 | "do_install": { 106 | "type":"object", 107 | "required":false, 108 | "properties":{ 109 | "commands": { 110 | "description": "Install command for the build source", 111 | "type":"string", 112 | "required":false 113 | }, 114 | "destination": { 115 | "description": "Destination of install [native, stage]", 116 | "type":"string", 117 | "required":false 118 | } 119 | } 120 | }, 121 | "do_package": { 122 | "type":"object", 123 | "required":false, 124 | "properties":{ 125 | "commands": { 126 | "description": "List of RPM/DEB build package commands", 127 | "type":"array", 128 | "required":false, 129 | "items": 130 | { 131 | "type":"string", 132 | "required":false 133 | } 134 | } 135 | } 136 | } 137 | } 138 | } 139 | 140 | -------------------------------------------------------------------------------- /buildoop/lib/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/buildoop/lib/.gitignore -------------------------------------------------------------------------------- /buildoop/lib/EmbeddedHTTPServer.groovy: -------------------------------------------------------------------------------- 1 | /* vim:set ts=4:sw=4:et:sts=4:ai:tw=80 2 | * 3 | * Licensed to the Apache Software Foundation (ASF) under one 4 | * or more contributor license agreements. See the NOTICE file 5 | * distributed with this work for additional information 6 | * regarding copyright ownership. The ASF licenses this file 7 | * to you under the Apache License, Version 2.0 (the 8 | * "License"); you may not use this file except in compliance 9 | * with the License. You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | * See the License for the specific language governing permissions and 17 | * limitations under the License. 18 | */ 19 | import com.sun.net.httpserver.* 20 | import java.util.concurrent.Executors 21 | 22 | /** 23 | * Simple HTTP server for namerepo.repo downloads 24 | * 25 | * 26 | * @author Javi Roman 27 | * 28 | */ 29 | class EmbeddedHTTPServer { 30 | def BDROOT 31 | def LOG 32 | def globalConfig 33 | 34 | def EmbeddedHTTPServer(buildoop) { 35 | LOG = buildoop.log 36 | BDROOT = buildoop.ROOT 37 | globalConfig = buildoop.globalConfig 38 | LOG.info "[EmbeddedHTTPServer] constructor" 39 | 40 | def HTTP_SERVER_PORT=8080 41 | println "Create server port " + HTTP_SERVER_PORT 42 | def server = HttpServer.create(new InetSocketAddress(HTTP_SERVER_PORT),0); 43 | 44 | server.createContext("/", new RepoHandler(server:server)); 45 | server.setExecutor(Executors.newCachedThreadPool()) 46 | println "Starting server" 47 | server.start(); 48 | println "Server Started" 49 | //exchange.close(); 50 | //server.stop(3) //max wait 3 second 51 | } 52 | 53 | class RepoHandler implements HttpHandler { 54 | def server 55 | 56 | public void handle(HttpExchange exchange) throws IOException { 57 | println "getRequestMethod:" 58 | println exchange.getRequestMethod() 59 | println "getRequestHeaders:" 60 | println exchange.getRequestHeaders() 61 | println "getRequestURI:" 62 | def fileName = exchange.getRequestURI() 63 | println fileName 64 | 65 | def file = new File("." + fileName) 66 | def bytearray = new byte [(int)file.length()] 67 | def fis = new FileInputStream(file) 68 | def bis = new BufferedInputStream(fis) 69 | bis.read(bytearray, 0, bytearray.length) 70 | 71 | // ok, we are ready to send the response. 72 | exchange.sendResponseHeaders(200, file.length()) 73 | def os = exchange.getResponseBody() 74 | os.write(bytearray,0,bytearray.length) 75 | os.close() 76 | } 77 | } 78 | 79 | -------------------------------------------------------------------------------- /buildoop/lib/FileDownloader.groovy: -------------------------------------------------------------------------------- 1 | /* vim:set ts=4:sw=4:et:sts=4:ai:tw=80 2 | * 3 | * Licensed to the Apache Software Foundation (ASF) under one 4 | * or more contributor license agreements. See the NOTICE file 5 | * distributed with this work for additional information 6 | * regarding copyright ownership. The ASF licenses this file 7 | * to you under the Apache License, Version 2.0 (the 8 | * "License"); you may not use this file except in compliance 9 | * with the License. You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | * See the License for the specific language governing permissions and 17 | * limitations under the License. 18 | */ 19 | import org.apache.log4j.* 20 | import groovy.util.logging.* 21 | import java.security.MessageDigest 22 | 23 | /** 24 | * Class for download the source files 25 | * 26 | * This class implements methods for HTTP, FTP, 27 | * GIT, SVN source code downloads. 28 | * 29 | * @author Javi Roman 30 | * 31 | */ 32 | class FileDownloader { 33 | def BDROOT 34 | def LOG 35 | def globalConfig 36 | def runCommand 37 | 38 | def FileDownloader(buildoop) { 39 | LOG = buildoop.log 40 | BDROOT = buildoop.ROOT 41 | globalConfig = buildoop.globalConfig 42 | LOG.info "[FileDownloader] constructor, checking enviroment" 43 | 44 | String[] roots = [globalConfig.buildoop.classfolder] 45 | def engine = new GroovyScriptEngine(roots) 46 | def RunCommandClass = engine.loadScriptByName('RunCommand.groovy') 47 | runCommand = RunCommandClass.newInstance(buildoop.log) 48 | } 49 | 50 | def getMD5sum(file, len) { 51 | File f = new File(file) 52 | if (!f.exists() || !f.isFile()) { 53 | println "Invalid file $f provided" 54 | } 55 | 56 | def messageDigest = MessageDigest.getInstance("MD5") 57 | 58 | //long start = System.currentTimeMillis() 59 | 60 | f.eachByte(len) { byte[] buf, int bytesRead -> 61 | messageDigest.update(buf, 0, bytesRead); 62 | } 63 | 64 | def sha1Hex = new BigInteger(1, messageDigest.digest()).toString(16) 65 | 66 | //long delta = System.currentTimeMillis()-start 67 | 68 | return "$sha1Hex" 69 | } 70 | 71 | 72 | def downloadFromGIT(uri, git_hash, outFile) { 73 | 74 | def repository_folder = BDROOT + "/" + globalConfig.buildoop.downloads + 75 | "/" + uri.split('/')[-1] 76 | 77 | new File(repository_folder).mkdir() 78 | 79 | def repository = repository_folder + "/" + uri.split('/')[-1] 80 | 81 | def command = "git clone " + uri + " " + repository 82 | 83 | new AntBuilder().delete(dir: repository_folder) 84 | 85 | println "cloning repository: " + command 86 | println runCommand.runCommand(["bash", "-c", command]) 87 | 88 | command = "git " + "--work-tree " + repository + " --git-dir " + repository + 89 | "/.git" + " checkout " + 90 | git_hash 91 | 92 | println "checking out hash: " + command 93 | println runCommand.runCommand(["bash", "-c", command]) 94 | 95 | new AntBuilder().tar(destfile: outFile, 96 | basedir: repository_folder, 97 | longfile: "gnu", 98 | compression: "gzip", 99 | excludes: ".git") 100 | 101 | return 0 102 | } 103 | 104 | def downloadFromURL(address, outFile) { 105 | def contentLength 106 | 107 | def strUrl = address 108 | def url = new URL(strUrl) 109 | def connection = url.openConnection() 110 | connection.connect() 111 | 112 | // Check if the request is handled successfully 113 | if(connection.getResponseCode() / 100 == 2) { 114 | // size of the file to download (in bytes) 115 | contentLength = connection.getContentLength() 116 | } 117 | 118 | def file = new FileOutputStream(outFile) 119 | def out = new BufferedOutputStream(file) 120 | out << new URL(address).openStream() 121 | 122 | out.close() 123 | 124 | return contentLength 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /buildoop/lib/FileExtracter.groovy: -------------------------------------------------------------------------------- 1 | /* vim:set ts=4:sw=4:et:sts=4:ai:tw=80 2 | * 3 | * Licensed to the Apache Software Foundation (ASF) under one 4 | * or more contributor license agreements. See the NOTICE file 5 | * distributed with this work for additional information 6 | * regarding copyright ownership. The ASF licenses this file 7 | * to you under the Apache License, Version 2.0 (the 8 | * "License"); you may not use this file except in compliance 9 | * with the License. You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | * See the License for the specific language governing permissions and 17 | * limitations under the License. 18 | */ 19 | import org.apache.log4j.* 20 | import groovy.util.logging.* 21 | 22 | /** 23 | * Class for ... 24 | * 25 | * http://ant.apache.org/manual/tasksoverview.html 26 | * 27 | * @author Javi Roman 28 | * 29 | */ 30 | class FileExtracter { 31 | def LOG 32 | 33 | def FileExtracter(log) { 34 | LOG = log 35 | LOG.info "[FileExtracter] constructor" 36 | } 37 | //def ant = new AntBuilder(); 38 | // ant.unzip( src:"file.zip", dest:".", overwrite:"true"){ mapper(type:"flatten")} 39 | } 40 | -------------------------------------------------------------------------------- /buildoop/lib/RunCommand.groovy: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env groovy 2 | /* vim:set ts=4:sw=4:et:sts=4:ai:tw=80 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | import org.apache.log4j.* 21 | import groovy.util.logging.* 22 | 23 | /** 24 | * Run generic commands 25 | * 26 | * This class 27 | * 28 | * @author Javi Roman 29 | * @author Marcelo Valle 30 | * 31 | */ 32 | class RunCommand { 33 | def LOG 34 | 35 | def RunCommand(log) { 36 | LOG = log 37 | LOG.info "[RunCommand] constructor" 38 | } 39 | 40 | def runCommand(strList) { 41 | assert (strList instanceof String || 42 | (strList instanceof List && strList.each{ it instanceof String })) 43 | 44 | def commandOutput = "" 45 | 46 | /* 47 | * -string.execute- currently make use of java.lang.Process 48 | * under the covers, the deficiencies of that class must 49 | * currently be taken into consideration. 50 | * http://groovy.codehaus.org/Process+Management 51 | * 52 | * java.lang.Process: in/out/err streams and exit code. 53 | */ 54 | def proc = strList.execute() 55 | 56 | /* 57 | * print InputStream of proc line at a line. This gobble the stdout of 58 | * the executed command. The try-catch is the recommended way to use 59 | * the streams in Java. This will make sure, that the system resources 60 | * associated with the stream will be released anyway. 61 | */ 62 | try { 63 | proc.in.eachLine { 64 | line -> commandOutput += line + "\n" 65 | } 66 | } catch (e) { 67 | println "Stream closed" 68 | } finally { 69 | proc.in.close() 70 | } 71 | 72 | /* 73 | * Causes the current thread to wait, if necessary, until the 74 | * process represented by this Process object has terminated. 75 | */ 76 | println "waitFor process" 77 | proc.waitFor() 78 | 79 | print "[INFO] ( " 80 | if(strList instanceof List) { 81 | strList.each { print "${it} " } 82 | } else { 83 | print "command: " + strList 84 | } 85 | println " )" 86 | 87 | if (proc.exitValue()) { 88 | println "gave the following error: " 89 | println "[ERROR] ${proc.getErrorStream()}" 90 | } 91 | assert !proc.exitValue() 92 | 93 | return commandOutput 94 | } 95 | 96 | def runCommandGetOutput(strList) { 97 | assert (strList instanceof String || 98 | (strList instanceof List && strList.each{ it instanceof String })) 99 | 100 | /* 101 | * -string.execute- currently make use of java.lang.Process 102 | * under the covers, the deficiencies of that class must 103 | * currently be taken into consideration. 104 | * http://groovy.codehaus.org/Process+Management 105 | * 106 | * java.lang.Process: in/out/err streams and exit code. 107 | */ 108 | def proc = strList.execute() 109 | 110 | /* 111 | * print InputStream of proc line at a line. This gobble the stdout of 112 | * the executed command. The try-catch is the recommended way to use 113 | * the streams in Java. This will make sure, that the system resources 114 | * associated with the stream will be released anyway. 115 | */ 116 | try { 117 | def salidaComando = "" 118 | proc.in.eachLine { 119 | line -> salidaComando += line +"\n" 120 | } 121 | } catch (e) { 122 | println "Stream closed" 123 | } finally { 124 | proc.in.close() 125 | } 126 | 127 | /* 128 | * Causes the current thread to wait, if necessary, until the 129 | * process represented by this Process object has terminated. 130 | */ 131 | println "waitFor process" 132 | proc.waitFor() 133 | 134 | print "[INFO] ( " 135 | if(strList instanceof List) { 136 | strList.each { print "${it} " } 137 | } else { 138 | print "command: " + strList 139 | } 140 | println " )" 141 | 142 | if (proc.exitValue()) { 143 | println "gave the following error: " 144 | println "[ERROR] ${proc.getErrorStream()}" 145 | } 146 | assert !proc.exitValue() 147 | 148 | return salidaC 149 | } 150 | } 151 | 152 | -------------------------------------------------------------------------------- /buildoop/lib/SanityChecking.groovy: -------------------------------------------------------------------------------- 1 | /* vim:set ts=4:sw=4:et:sts=4:ai:tw=80 2 | * 3 | * Licensed to the Apache Software Foundation (ASF) under one 4 | * or more contributor license agreements. See the NOTICE file 5 | * distributed with this work for additional information 6 | * regarding copyright ownership. The ASF licenses this file 7 | * to you under the Apache License, Version 2.0 (the 8 | * "License"); you may not use this file except in compliance 9 | * with the License. You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | * See the License for the specific language governing permissions and 17 | * limitations under the License. 18 | */ 19 | import org.apache.log4j.* 20 | import groovy.util.logging.* 21 | 22 | /** 23 | * Class for ... 24 | * 25 | * 26 | * @author Javi Roman 27 | * 28 | */ 29 | class SanityChecking { 30 | def LOG 31 | 32 | SanityChecking(log) { 33 | LOG = log 34 | LOG.info "SanityChecking constructor" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /buildoop/lib/SourceBuilder.groovy: -------------------------------------------------------------------------------- 1 | /* vim:set ts=4:sw=4:et:sts=4:ai:tw=80 2 | * 3 | * Licensed to the Apache Software Foundation (ASF) under one 4 | * or more contributor license agreements. See the NOTICE file 5 | * distributed with this work for additional information 6 | * regarding copyright ownership. The ASF licenses this file 7 | * to you under the Apache License, Version 2.0 (the 8 | * "License"); you may not use this file except in compliance 9 | * with the License. You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | * See the License for the specific language governing permissions and 17 | * limitations under the License. 18 | */ 19 | import org.apache.log4j.* 20 | import groovy.util.logging.* 21 | 22 | /** 23 | * This class is for building native or stagging tools 24 | * which are not necessaries to fecth in a RPM/DEB package. 25 | * 26 | * If you have to make a RPM/DEB package you have to use 27 | * the class PackageBuilder. 28 | * 29 | * @author Javi Roman 30 | */ 31 | class SourceBuilder { 32 | def LOG 33 | 34 | def SourceBuilder(log) { 35 | LOG = log 36 | LOG.info "[SourceBuilder] constructor" 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /conf/targets/targets.conf: -------------------------------------------------------------------------------- 1 | # Availables targets 2 | 3 | # RPM based 4 | RedHat-6.x 5 | RedHat-7.x 6 | Fedora-19 7 | Fedora-20 8 | CentOS-6.x 9 | CentOS-7.x 10 | 11 | # DEB based 12 | Ubuntu-12.x 13 | 14 | -------------------------------------------------------------------------------- /conf/tests/minimal.test: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/conf/tests/minimal.test -------------------------------------------------------------------------------- /conf/tests/openbus-0.0.1.test: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/conf/tests/openbus-0.0.1.test -------------------------------------------------------------------------------- /conf/tests/ping.test: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/conf/tests/ping.test -------------------------------------------------------------------------------- /deploy/README: -------------------------------------------------------------------------------- 1 | deploy/ 2 | ├── cm ====> Configuration Management 3 | │   └── puppet 4 | └── vm =====> Vagrant cluster staff 5 | └── buildoop-cluster 6 | -------------------------------------------------------------------------------- /deploy/cm/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/deploy/cm/.gitignore -------------------------------------------------------------------------------- /deploy/cm/README: -------------------------------------------------------------------------------- 1 | Configuration Management 2 | ~~~~~~~~~~~~~~~~~~~~~~~~ 3 | -------------------------------------------------------------------------------- /deploy/cm/puppet/manifests/cluster.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | class hadoop_base { 17 | info("Hadoop Base constructor") 18 | } 19 | 20 | class hadoop_datanode { 21 | info("Hadoop DN contructor") 22 | } 23 | 24 | class hadoop_resourcemanager { 25 | info("Hadoop RM contructor") 26 | } 27 | 28 | class hadoop_client { 29 | info("Hadoop Client contructor") 30 | } 31 | 32 | class hadoop_gateway { 33 | info("Hadoop GW contructor") 34 | } 35 | 36 | class hadoop_worker { 37 | info("Hadoop Worker contructor") 38 | 39 | } 40 | -------------------------------------------------------------------------------- /deploy/cm/puppet/manifests/extdata/site.csv: -------------------------------------------------------------------------------- 1 | # External Data lookup CSV file used for customize the 2 | # cluster. manifests/init.pp expects configuration 3 | # to live in this CSV. 4 | 5 | # Buildoop yum repository, or Spacewalk channel 6 | buildoop_yumrepo_uri,http://buildooprepo:8080/ 7 | 8 | # For use OpenJDK or Sun JDK 9 | jdk_package_name,jdk 10 | 11 | # NameNode list, for HDFS HA and Federation 12 | hadoop_datanodes,mncarsnas.condor.local,mncars001.condor.local 13 | 14 | # ResourceManager server 15 | hadoop_resourcemanager,mncars002.condor.local 16 | 17 | # Hadoop client server 18 | hadoop_client,none 19 | 20 | # Hadoop Gateway server 21 | hadoop_gateway,none 22 | 23 | # MapReduce HistoryServer 24 | hadoop_historyserver,node 25 | 26 | -------------------------------------------------------------------------------- /deploy/cm/puppet/manifests/site.pp: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # Base configuration variables 17 | 18 | $extlookup_datadir="/etc/puppet/manifests/extdata" 19 | $extlookup_precedence = ["site", "default"] 20 | $puppetserver = 'mncarsnas.condor.local' 21 | $default_buildoop_yumrepo_uri = "http://192.168.33.1:8080/" 22 | $jdk_package_name = extlookup("jdk_package_name", "jdk") 23 | 24 | # Base resources for all servers 25 | 26 | case $::operatingsystem { 27 | /(CentOS|RedHat)/: { 28 | yumrepo { "buildoop": 29 | baseurl => extlookup("buildoop_yumrepo_uri", $default_buildoop_yumrepo_uri), 30 | descr => "Buildoop Hadoop Ecosystem", 31 | enabled => 1, 32 | gpgcheck => 0, 33 | } 34 | } 35 | default: { 36 | notify{"WARNING: running on a non-yum platform -- make sure Buildoop repo is setup": } 37 | } 38 | } 39 | 40 | package { $jdk_package_name: 41 | ensure => "installed", 42 | alias => "jdk", 43 | } 44 | 45 | exec { "yum makecache": 46 | command => "/usr/bin/yum makecache", 47 | require => Yumrepo["buildoop"] 48 | } 49 | 50 | import "cluster.pp" 51 | 52 | # Server node roles available: 53 | # NameNodes 54 | # ResourceManager 55 | # Client 56 | # Gateway 57 | # HistoryServer 58 | # Workers 59 | node default { 60 | $hadoop_datanodes = extlookup("hadoop_datanodes") 61 | $hadoop_resourcemanager = extlookup("hadoop_resourcemanager") 62 | $hadoop_client = extlookup("hadoop_client") 63 | $hadoop_gateway = extlookup("hadoop_gateway") 64 | $hadoop_historyserver = extlookup("hadoop_historyserver") 65 | 66 | # This node logic has the following assumptions: 67 | # 68 | # 1. There is more than one NameNode, so $hadoop_datanodes 69 | # is a list of hostnames. This is due to HDFS HA and 70 | # Federation. 71 | # 2. There is only one ResourceManager, no is taken into 72 | # account the further YARN HA and horizontal scalability. 73 | # 3. All the manager nodes (NameNodes, and ResourceManager) 74 | # have a Zookeeper Server. 75 | # 4. All the NameNodes have a Zookeeper Failover Controller. 76 | if $::fqdn in $hadoop_datanodes { 77 | info("Hadoop NameNode: ${fqdn}") 78 | include hadoop_datanode 79 | exec { "touch MIERDA": 80 | command => "/bin/touch /tmp/MIERDA", 81 | } 82 | } else { 83 | case $::fqdn { 84 | $hadoop_resourcemanager: { 85 | info("Hadoop ResourceManager: ${fqdn}") 86 | include hadoop_resourcemanager 87 | } 88 | $hadoop_client: { 89 | info("Hadoop Client: ${fqdn}") 90 | include hadoop_client 91 | } 92 | $hadoop_gateway: { 93 | info("Hadoop Gateway: ${fqdn}") 94 | include hadoop_gateway 95 | } 96 | default: { 97 | info("Hadoop Worker: ${fqdn}") 98 | include hadoop_worker 99 | } 100 | } 101 | } 102 | } 103 | 104 | 105 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/_slaves: -------------------------------------------------------------------------------- 1 | hadoop-node1.buildoop.org 2 | hadoop-node2.buildoop.org 3 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/capacity-scheduler.xml: -------------------------------------------------------------------------------- 1 | 14 | 15 | 16 | 17 | yarn.scheduler.capacity.maximum-applications 18 | 10000 19 | 20 | Maximum number of applications that can be pending and running. 21 | 22 | 23 | 24 | 25 | yarn.scheduler.capacity.maximum-am-resource-percent 26 | 0.1 27 | 28 | Maximum percent of resources in the cluster which can be used to run 29 | application masters i.e. controls number of concurrent running 30 | applications. 31 | 32 | 33 | 34 | 35 | yarn.scheduler.capacity.resource-calculator 36 | org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator 37 | 38 | The ResourceCalculator implementation to be used to compare 39 | Resources in the scheduler. 40 | The default i.e. DefaultResourceCalculator only uses Memory while 41 | DominantResourceCalculator uses dominant-resource to compare 42 | multi-dimensional resources such as Memory, CPU etc. 43 | 44 | 45 | 46 | 47 | yarn.scheduler.capacity.root.queues 48 | default 49 | 50 | The queues at the this level (root is the root queue). 51 | 52 | 53 | 54 | 55 | yarn.scheduler.capacity.root.default.capacity 56 | 100 57 | Default queue target capacity. 58 | 59 | 60 | 61 | yarn.scheduler.capacity.root.default.user-limit-factor 62 | 1 63 | 64 | Default queue user limit a percentage from 0.0 to 1.0. 65 | 66 | 67 | 68 | 69 | yarn.scheduler.capacity.root.default.maximum-capacity 70 | 100 71 | 72 | The maximum capacity of the default queue. 73 | 74 | 75 | 76 | 77 | yarn.scheduler.capacity.root.default.state 78 | RUNNING 79 | 80 | The state of the default queue. State can be one of RUNNING or STOPPED. 81 | 82 | 83 | 84 | 85 | yarn.scheduler.capacity.root.default.acl_submit_applications 86 | * 87 | 88 | The ACL of who can submit jobs to the default queue. 89 | 90 | 91 | 92 | 93 | yarn.scheduler.capacity.root.default.acl_administer_queue 94 | * 95 | 96 | The ACL of who can administer jobs on the default queue. 97 | 98 | 99 | 100 | 101 | yarn.scheduler.capacity.node-locality-delay 102 | -1 103 | 104 | Number of missed scheduling opportunities after which the CapacityScheduler 105 | attempts to schedule rack-local containers. 106 | Typically this should be set to number of racks in the cluster, this 107 | feature is disabled by default, set to -1. 108 | 109 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/container-executor.cfg: -------------------------------------------------------------------------------- 1 | yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group 2 | banned.users=#comma separated list of users who can not run applications 3 | min.user.id=1000#Prevent other super-users 4 | allowed.system.users=##comma separated list of system users who CAN run applications 5 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | fs.defaultFS 21 | hdfs://buildoopcluster 22 | HA-enabled logical URI NameService ID 23 | 24 | 25 | 26 | 27 | 28 | ha.zookeeper.quorum 29 | hadoop-manager:2181,hadoop-node1:2181,hadoop-node2:2181 30 | This lists the host-port pairs running the ZooKeeper service 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/hadoop-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | @rem Set Hadoop-specific environment variables here. 18 | 19 | @rem The only required environment variable is JAVA_HOME. All others are 20 | @rem optional. When running a distributed configuration it is best to 21 | @rem set JAVA_HOME in this file, so that it is correctly defined on 22 | @rem remote nodes. 23 | 24 | @rem The java implementation to use. Required. 25 | set JAVA_HOME=%JAVA_HOME% 26 | 27 | @rem The jsvc implementation to use. Jsvc is required to run secure datanodes. 28 | @rem set JSVC_HOME=%JSVC_HOME% 29 | 30 | @rem set HADOOP_CONF_DIR= 31 | 32 | @rem Extra Java CLASSPATH elements. Automatically insert capacity-scheduler. 33 | if exist %HADOOP_HOME%\contrib\capacity-scheduler ( 34 | if not defined HADOOP_CLASSPATH ( 35 | set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar 36 | ) else ( 37 | set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar 38 | ) 39 | ) 40 | 41 | @rem The maximum amount of heap to use, in MB. Default is 1000. 42 | @rem set HADOOP_HEAPSIZE= 43 | @rem set HADOOP_NAMENODE_INIT_HEAPSIZE="" 44 | 45 | @rem Extra Java runtime options. Empty by default. 46 | @rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true 47 | 48 | @rem Command specific options appended to HADOOP_OPTS when specified 49 | if not defined HADOOP_SECURITY_LOGGER ( 50 | set HADOOP_SECURITY_LOGGER=INFO,RFAS 51 | ) 52 | if not defined HDFS_AUDIT_LOGGER ( 53 | set HDFS_AUDIT_LOGGER=INFO,NullAppender 54 | ) 55 | 56 | set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS% 57 | set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS% 58 | set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS% 59 | 60 | @rem The following applies to multiple commands (fs, dfs, fsck, distcp etc) 61 | set HADOOP_CLIENT_OPTS=-Xmx128m %HADOOP_CLIENT_OPTS% 62 | @rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%" 63 | 64 | @rem On secure datanodes, user to run the datanode as after dropping privileges 65 | set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER% 66 | 67 | @rem Where log files are stored. %HADOOP_HOME%/logs by default. 68 | @rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME% 69 | 70 | @rem Where log files are stored in the secure data environment. 71 | set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER% 72 | 73 | @rem The directory where pid files are stored. /tmp by default. 74 | @rem NOTE: this should be set to a directory that can only be written to by 75 | @rem the user that will run the hadoop daemons. Otherwise there is the 76 | @rem potential for a symlink attack. 77 | set HADOOP_PID_DIR=%HADOOP_PID_DIR% 78 | set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR% 79 | 80 | @rem A string representing this instance of hadoop. %USERNAME% by default. 81 | set HADOOP_IDENT_STRING=%USERNAME% 82 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/hadoop-env.sh: -------------------------------------------------------------------------------- 1 | # Copyright 2011 The Apache Software Foundation 2 | # 3 | # Licensed to the Apache Software Foundation (ASF) under one 4 | # or more contributor license agreements. See the NOTICE file 5 | # distributed with this work for additional information 6 | # regarding copyright ownership. The ASF licenses this file 7 | # to you under the Apache License, Version 2.0 (the 8 | # "License"); you may not use this file except in compliance 9 | # with the License. You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | # Set Hadoop-specific environment variables here. 20 | 21 | # The only required environment variable is JAVA_HOME. All others are 22 | # optional. When running a distributed configuration it is best to 23 | # set JAVA_HOME in this file, so that it is correctly defined on 24 | # remote nodes. 25 | 26 | # The java implementation to use. 27 | export JAVA_HOME=${JAVA_HOME} 28 | 29 | # The jsvc implementation to use. Jsvc is required to run secure datanodes. 30 | #export JSVC_HOME=${JSVC_HOME} 31 | 32 | export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"} 33 | 34 | # Extra Java CLASSPATH elements. Automatically insert capacity-scheduler. 35 | for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do 36 | if [ "$HADOOP_CLASSPATH" ]; then 37 | export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f 38 | else 39 | export HADOOP_CLASSPATH=$f 40 | fi 41 | done 42 | 43 | # The maximum amount of heap to use, in MB. Default is 1000. 44 | #export HADOOP_HEAPSIZE= 45 | #export HADOOP_NAMENODE_INIT_HEAPSIZE="" 46 | 47 | # Extra Java runtime options. Empty by default. 48 | export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true" 49 | 50 | # Command specific options appended to HADOOP_OPTS when specified 51 | export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS" 52 | export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS" 53 | 54 | export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS" 55 | 56 | # The following applies to multiple commands (fs, dfs, fsck, distcp etc) 57 | export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS" 58 | #HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS" 59 | 60 | # On secure datanodes, user to run the datanode as after dropping privileges 61 | export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER} 62 | 63 | # Where log files are stored. $HADOOP_HOME/logs by default. 64 | #export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER 65 | 66 | # Where log files are stored in the secure data environment. 67 | export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER} 68 | 69 | # The directory where pid files are stored. /tmp by default. 70 | # NOTE: this should be set to a directory that can only be written to by 71 | # the user that will run the hadoop daemons. Otherwise there is the 72 | # potential for a symlink attack. 73 | #export HADOOP_PID_DIR=${HADOOP_PID_DIR} 74 | export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR} 75 | 76 | # A string representing this instance of hadoop. $USER by default. 77 | export HADOOP_IDENT_STRING=$USER 78 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/hadoop-metrics.properties: -------------------------------------------------------------------------------- 1 | # Configuration of the "dfs" context for null 2 | dfs.class=org.apache.hadoop.metrics.spi.NullContext 3 | 4 | # Configuration of the "dfs" context for file 5 | #dfs.class=org.apache.hadoop.metrics.file.FileContext 6 | #dfs.period=10 7 | #dfs.fileName=/tmp/dfsmetrics.log 8 | 9 | # Configuration of the "dfs" context for ganglia 10 | # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter) 11 | # dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext 12 | # dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 13 | # dfs.period=10 14 | # dfs.servers=localhost:8649 15 | 16 | 17 | # Configuration of the "mapred" context for null 18 | mapred.class=org.apache.hadoop.metrics.spi.NullContext 19 | 20 | # Configuration of the "mapred" context for file 21 | #mapred.class=org.apache.hadoop.metrics.file.FileContext 22 | #mapred.period=10 23 | #mapred.fileName=/tmp/mrmetrics.log 24 | 25 | # Configuration of the "mapred" context for ganglia 26 | # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter) 27 | # mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext 28 | # mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 29 | # mapred.period=10 30 | # mapred.servers=localhost:8649 31 | 32 | 33 | # Configuration of the "jvm" context for null 34 | #jvm.class=org.apache.hadoop.metrics.spi.NullContext 35 | 36 | # Configuration of the "jvm" context for file 37 | #jvm.class=org.apache.hadoop.metrics.file.FileContext 38 | #jvm.period=10 39 | #jvm.fileName=/tmp/jvmmetrics.log 40 | 41 | # Configuration of the "jvm" context for ganglia 42 | # jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext 43 | # jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 44 | # jvm.period=10 45 | # jvm.servers=localhost:8649 46 | 47 | # Configuration of the "rpc" context for null 48 | rpc.class=org.apache.hadoop.metrics.spi.NullContext 49 | 50 | # Configuration of the "rpc" context for file 51 | #rpc.class=org.apache.hadoop.metrics.file.FileContext 52 | #rpc.period=10 53 | #rpc.fileName=/tmp/rpcmetrics.log 54 | 55 | # Configuration of the "rpc" context for ganglia 56 | # rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext 57 | # rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 58 | # rpc.period=10 59 | # rpc.servers=localhost:8649 60 | 61 | 62 | # Configuration of the "ugi" context for null 63 | ugi.class=org.apache.hadoop.metrics.spi.NullContext 64 | 65 | # Configuration of the "ugi" context for file 66 | #ugi.class=org.apache.hadoop.metrics.file.FileContext 67 | #ugi.period=10 68 | #ugi.fileName=/tmp/ugimetrics.log 69 | 70 | # Configuration of the "ugi" context for ganglia 71 | # ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext 72 | # ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 73 | # ugi.period=10 74 | # ugi.servers=localhost:8649 75 | 76 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/hadoop-metrics2.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # syntax: [prefix].[source|sink].[instance].[options] 19 | # See javadoc of package-info.java for org.apache.hadoop.metrics2 for details 20 | 21 | *.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink 22 | # default sampling period, in seconds 23 | *.period=10 24 | 25 | # The namenode-metrics.out will contain metrics from all context 26 | #namenode.sink.file.filename=namenode-metrics.out 27 | # Specifying a special sampling period for namenode: 28 | #namenode.sink.*.period=8 29 | 30 | #datanode.sink.file.filename=datanode-metrics.out 31 | 32 | # the following example split metrics of different 33 | # context to different sinks (in this case files) 34 | #jobtracker.sink.file_jvm.context=jvm 35 | #jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out 36 | #jobtracker.sink.file_mapred.context=mapred 37 | #jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out 38 | 39 | #tasktracker.sink.file.filename=tasktracker-metrics.out 40 | 41 | #maptask.sink.file.filename=maptask-metrics.out 42 | 43 | #reducetask.sink.file.filename=reducetask-metrics.out 44 | 45 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA 20 | 21 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/mapred-env.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # export JAVA_HOME=/home/y/libexec/jdk1.6.0/ 17 | 18 | export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 19 | 20 | export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA 21 | 22 | #export HADOOP_JOB_HISTORYSERVER_OPTS= 23 | #export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default. 24 | #export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger. 25 | #export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default. 26 | #export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default 27 | #export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0. 28 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/mapred-queues.xml.template: -------------------------------------------------------------------------------- 1 | 2 | 18 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | default 31 | 32 | 34 | 35 | 36 | 37 | 39 | running 40 | 41 | 54 | 55 | 56 | 73 | 74 | 75 | 76 | 92 | 93 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | mapreduce.framework.name 21 | yarn 22 | The runtime framework for executing 23 | MapReduce jobs. Can be one of local, classic or yarn. 24 | 25 | 26 | 27 | mapreduce.jobhistory.address 28 | hadoop-manager.buildoop.org:10020 29 | 30 | 31 | 32 | mapreduce.jobhistory.webapp.address 33 | hadoop-manager.buildoop.org:19888 34 | 35 | 36 | 37 | yarn.app.mapreduce.am.staging-dir 38 | /user 39 | 40 | 41 | 42 | 43 | 61 | 62 | 68 | 69 | 75 | 76 | 80 | 81 | 87 | 88 | 94 | 95 | 101 | 102 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/mapred-site.xml.template: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/slaves: -------------------------------------------------------------------------------- 1 | hadoop-node3.buildoop.org 2 | hadoop-node4.buildoop.org 3 | hadoop-node5.buildoop.org 4 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/ssl-client.xml.example: -------------------------------------------------------------------------------- 1 | 2 | 3 | 19 | 20 | 21 | 22 | ssl.client.truststore.location 23 | 24 | Truststore to be used by clients like distcp. Must be 25 | specified. 26 | 27 | 28 | 29 | 30 | ssl.client.truststore.password 31 | 32 | Optional. Default value is "". 33 | 34 | 35 | 36 | 37 | ssl.client.truststore.type 38 | jks 39 | Optional. The keystore file format, default value is "jks". 40 | 41 | 42 | 43 | 44 | ssl.client.truststore.reload.interval 45 | 10000 46 | Truststore reload check interval, in milliseconds. 47 | Default value is 10000 (10 seconds). 48 | 49 | 50 | 51 | 52 | ssl.client.keystore.location 53 | 54 | Keystore to be used by clients like distcp. Must be 55 | specified. 56 | 57 | 58 | 59 | 60 | ssl.client.keystore.password 61 | 62 | Optional. Default value is "". 63 | 64 | 65 | 66 | 67 | ssl.client.keystore.keypassword 68 | 69 | Optional. Default value is "". 70 | 71 | 72 | 73 | 74 | ssl.client.keystore.type 75 | jks 76 | Optional. The keystore file format, default value is "jks". 77 | 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/ssl-server.xml.example: -------------------------------------------------------------------------------- 1 | 2 | 3 | 19 | 20 | 21 | 22 | ssl.server.truststore.location 23 | 24 | Truststore to be used by NN and DN. Must be specified. 25 | 26 | 27 | 28 | 29 | ssl.server.truststore.password 30 | 31 | Optional. Default value is "". 32 | 33 | 34 | 35 | 36 | ssl.server.truststore.type 37 | jks 38 | Optional. The keystore file format, default value is "jks". 39 | 40 | 41 | 42 | 43 | ssl.server.truststore.reload.interval 44 | 10000 45 | Truststore reload check interval, in milliseconds. 46 | Default value is 10000 (10 seconds). 47 | 48 | 49 | 50 | ssl.server.keystore.location 51 | 52 | Keystore to be used by NN and DN. Must be specified. 53 | 54 | 55 | 56 | 57 | ssl.server.keystore.password 58 | 59 | Must be specified. 60 | 61 | 62 | 63 | 64 | ssl.server.keystore.keypassword 65 | 66 | Must be specified. 67 | 68 | 69 | 70 | 71 | ssl.server.keystore.type 72 | jks 73 | Optional. The keystore file format, default value is "jks". 74 | 75 | 76 | 77 | 78 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/yarn-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | @rem User for YARN daemons 18 | if not defined HADOOP_YARN_USER ( 19 | set HADOOP_YARN_USER=%yarn% 20 | ) 21 | 22 | if not defined YARN_CONF_DIR ( 23 | set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf 24 | ) 25 | 26 | if defined YARN_HEAPSIZE ( 27 | @rem echo run with Java heapsize %YARN_HEAPSIZE% 28 | set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m 29 | ) 30 | 31 | if not defined YARN_LOG_DIR ( 32 | set YARN_LOG_DIR=%HADOOP_YARN_HOME%\logs 33 | ) 34 | 35 | if not defined YARN_LOGFILE ( 36 | set YARN_LOGFILE=yarn.log 37 | ) 38 | 39 | @rem default policy file for service-level authorization 40 | if not defined YARN_POLICYFILE ( 41 | set YARN_POLICYFILE=hadoop-policy.xml 42 | ) 43 | 44 | if not defined YARN_ROOT_LOGGER ( 45 | set YARN_ROOT_LOGGER=INFO,console 46 | ) 47 | 48 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.dir=%YARN_LOG_DIR% 49 | set YARN_OPTS=%YARN_OPTS% -Dyarn.log.dir=%YARN_LOG_DIR% 50 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.file=%YARN_LOGFILE% 51 | set YARN_OPTS=%YARN_OPTS% -Dyarn.log.file=%YARN_LOGFILE% 52 | set YARN_OPTS=%YARN_OPTS% -Dyarn.home.dir=%HADOOP_YARN_HOME% 53 | set YARN_OPTS=%YARN_OPTS% -Dyarn.id.str=%YARN_IDENT_STRING% 54 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.home.dir=%HADOOP_YARN_HOME% 55 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.root.logger=%YARN_ROOT_LOGGER% 56 | set YARN_OPTS=%YARN_OPTS% -Dyarn.root.logger=%YARN_ROOT_LOGGER% 57 | if defined JAVA_LIBRARY_PATH ( 58 | set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH% 59 | ) 60 | set YARN_OPTS=%YARN_OPTS% -Dyarn.policy.file=%YARN_POLICYFILE% -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/files/conf.openbus/yarn-env.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # User for YARN daemons 17 | export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn} 18 | 19 | # resolve links - $0 may be a softlink 20 | export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}" 21 | 22 | # some Java parameters 23 | # export JAVA_HOME=/home/y/libexec/jdk1.6.0/ 24 | if [ "$JAVA_HOME" != "" ]; then 25 | #echo "run java in $JAVA_HOME" 26 | JAVA_HOME=$JAVA_HOME 27 | fi 28 | 29 | if [ "$JAVA_HOME" = "" ]; then 30 | echo "Error: JAVA_HOME is not set." 31 | exit 1 32 | fi 33 | 34 | JAVA=$JAVA_HOME/bin/java 35 | JAVA_HEAP_MAX=-Xmx1000m 36 | 37 | # For setting YARN specific HEAP sizes please use this 38 | # Parameter and set appropriately 39 | # YARN_HEAPSIZE=1000 40 | 41 | # check envvars which might override default args 42 | if [ "$YARN_HEAPSIZE" != "" ]; then 43 | JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m" 44 | fi 45 | 46 | # Resource Manager specific parameters 47 | 48 | # Specify the max Heapsize for the ResourceManager using a numerical value 49 | # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 50 | # the value to 1000. 51 | # This value will be overridden by an Xmx setting specified in either YARN_OPTS 52 | # and/or YARN_RESOURCEMANAGER_OPTS. 53 | # If not specified, the default value will be picked from either YARN_HEAPMAX 54 | # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 55 | #export YARN_RESOURCEMANAGER_HEAPSIZE=1000 56 | 57 | # Specify the JVM options to be used when starting the ResourceManager. 58 | # These options will be appended to the options specified as YARN_OPTS 59 | # and therefore may override any similar flags set in YARN_OPTS 60 | #export YARN_RESOURCEMANAGER_OPTS= 61 | 62 | # Node Manager specific parameters 63 | 64 | # Specify the max Heapsize for the NodeManager using a numerical value 65 | # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 66 | # the value to 1000. 67 | # This value will be overridden by an Xmx setting specified in either YARN_OPTS 68 | # and/or YARN_NODEMANAGER_OPTS. 69 | # If not specified, the default value will be picked from either YARN_HEAPMAX 70 | # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 71 | #export YARN_NODEMANAGER_HEAPSIZE=1000 72 | 73 | # Specify the JVM options to be used when starting the NodeManager. 74 | # These options will be appended to the options specified as YARN_OPTS 75 | # and therefore may override any similar flags set in YARN_OPTS 76 | #export YARN_NODEMANAGER_OPTS= 77 | 78 | # so that filenames w/ spaces are handled correctly in loops below 79 | IFS= 80 | 81 | 82 | # default log directory & file 83 | if [ "$YARN_LOG_DIR" = "" ]; then 84 | YARN_LOG_DIR="$HADOOP_YARN_HOME/logs" 85 | fi 86 | if [ "$YARN_LOGFILE" = "" ]; then 87 | YARN_LOGFILE='yarn.log' 88 | fi 89 | 90 | # default policy file for service-level authorization 91 | if [ "$YARN_POLICYFILE" = "" ]; then 92 | YARN_POLICYFILE="hadoop-policy.xml" 93 | fi 94 | 95 | # restore ordinary behaviour 96 | unset IFS 97 | 98 | 99 | YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR" 100 | YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR" 101 | YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE" 102 | YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE" 103 | YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME" 104 | YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING" 105 | YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" 106 | YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" 107 | if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then 108 | YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" 109 | fi 110 | YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE" 111 | 112 | 113 | -------------------------------------------------------------------------------- /deploy/cm/puppet/modules/hadoop-conf/manifests/init.pp: -------------------------------------------------------------------------------- 1 | class hadoop-conf { 2 | # one resource in this class: one file resource. 3 | 4 | file {"/etc/hadoop/conf.openbus": 5 | recurse => true, 6 | owner => 'root', 7 | group => 'root', 8 | mode => 0755, 9 | source => "puppet://$puppetserver/modules/hadoop-conf/conf.openbus", 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /deploy/cm/test-config/buildoop.repo: -------------------------------------------------------------------------------- 1 | [buildoop] 2 | name=Buildoop Hadoop Ecosystem 3 | baseurl=http://192.168.33.1:8080/ 4 | enabled=1 5 | gpgcheck=0 6 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/capacity-scheduler.xml: -------------------------------------------------------------------------------- 1 | 14 | 15 | 16 | 17 | yarn.scheduler.capacity.maximum-applications 18 | 10000 19 | 20 | Maximum number of applications that can be pending and running. 21 | 22 | 23 | 24 | 25 | yarn.scheduler.capacity.maximum-am-resource-percent 26 | 0.1 27 | 28 | Maximum percent of resources in the cluster which can be used to run 29 | application masters i.e. controls number of concurrent running 30 | applications. 31 | 32 | 33 | 34 | 35 | yarn.scheduler.capacity.resource-calculator 36 | org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator 37 | 38 | The ResourceCalculator implementation to be used to compare 39 | Resources in the scheduler. 40 | The default i.e. DefaultResourceCalculator only uses Memory while 41 | DominantResourceCalculator uses dominant-resource to compare 42 | multi-dimensional resources such as Memory, CPU etc. 43 | 44 | 45 | 46 | 47 | yarn.scheduler.capacity.root.queues 48 | default 49 | 50 | The queues at the this level (root is the root queue). 51 | 52 | 53 | 54 | 55 | yarn.scheduler.capacity.root.default.capacity 56 | 100 57 | Default queue target capacity. 58 | 59 | 60 | 61 | yarn.scheduler.capacity.root.default.user-limit-factor 62 | 1 63 | 64 | Default queue user limit a percentage from 0.0 to 1.0. 65 | 66 | 67 | 68 | 69 | yarn.scheduler.capacity.root.default.maximum-capacity 70 | 100 71 | 72 | The maximum capacity of the default queue. 73 | 74 | 75 | 76 | 77 | yarn.scheduler.capacity.root.default.state 78 | RUNNING 79 | 80 | The state of the default queue. State can be one of RUNNING or STOPPED. 81 | 82 | 83 | 84 | 85 | yarn.scheduler.capacity.root.default.acl_submit_applications 86 | * 87 | 88 | The ACL of who can submit jobs to the default queue. 89 | 90 | 91 | 92 | 93 | yarn.scheduler.capacity.root.default.acl_administer_queue 94 | * 95 | 96 | The ACL of who can administer jobs on the default queue. 97 | 98 | 99 | 100 | 101 | yarn.scheduler.capacity.node-locality-delay 102 | -1 103 | 104 | Number of missed scheduling opportunities after which the CapacityScheduler 105 | attempts to schedule rack-local containers. 106 | Typically this should be set to number of racks in the cluster, this 107 | feature is disabled by default, set to -1. 108 | 109 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/container-executor.cfg: -------------------------------------------------------------------------------- 1 | yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group 2 | banned.users=#comma separated list of users who can not run applications 3 | min.user.id=1000#Prevent other super-users 4 | allowed.system.users=##comma separated list of system users who CAN run applications 5 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | fs.defaultFS 22 | hdfs://hadoop-manager.buildoop.org:8020 23 | Enter your NameNode hostname 24 | 25 | 26 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/hadoop-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | @rem Set Hadoop-specific environment variables here. 18 | 19 | @rem The only required environment variable is JAVA_HOME. All others are 20 | @rem optional. When running a distributed configuration it is best to 21 | @rem set JAVA_HOME in this file, so that it is correctly defined on 22 | @rem remote nodes. 23 | 24 | @rem The java implementation to use. Required. 25 | set JAVA_HOME=%JAVA_HOME% 26 | 27 | @rem The jsvc implementation to use. Jsvc is required to run secure datanodes. 28 | @rem set JSVC_HOME=%JSVC_HOME% 29 | 30 | @rem set HADOOP_CONF_DIR= 31 | 32 | @rem Extra Java CLASSPATH elements. Automatically insert capacity-scheduler. 33 | if exist %HADOOP_HOME%\contrib\capacity-scheduler ( 34 | if not defined HADOOP_CLASSPATH ( 35 | set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar 36 | ) else ( 37 | set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar 38 | ) 39 | ) 40 | 41 | @rem The maximum amount of heap to use, in MB. Default is 1000. 42 | @rem set HADOOP_HEAPSIZE= 43 | @rem set HADOOP_NAMENODE_INIT_HEAPSIZE="" 44 | 45 | @rem Extra Java runtime options. Empty by default. 46 | @rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true 47 | 48 | @rem Command specific options appended to HADOOP_OPTS when specified 49 | if not defined HADOOP_SECURITY_LOGGER ( 50 | set HADOOP_SECURITY_LOGGER=INFO,RFAS 51 | ) 52 | if not defined HDFS_AUDIT_LOGGER ( 53 | set HDFS_AUDIT_LOGGER=INFO,NullAppender 54 | ) 55 | 56 | set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS% 57 | set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS% 58 | set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS% 59 | 60 | @rem The following applies to multiple commands (fs, dfs, fsck, distcp etc) 61 | set HADOOP_CLIENT_OPTS=-Xmx128m %HADOOP_CLIENT_OPTS% 62 | @rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%" 63 | 64 | @rem On secure datanodes, user to run the datanode as after dropping privileges 65 | set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER% 66 | 67 | @rem Where log files are stored. %HADOOP_HOME%/logs by default. 68 | @rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME% 69 | 70 | @rem Where log files are stored in the secure data environment. 71 | set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER% 72 | 73 | @rem The directory where pid files are stored. /tmp by default. 74 | @rem NOTE: this should be set to a directory that can only be written to by 75 | @rem the user that will run the hadoop daemons. Otherwise there is the 76 | @rem potential for a symlink attack. 77 | set HADOOP_PID_DIR=%HADOOP_PID_DIR% 78 | set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR% 79 | 80 | @rem A string representing this instance of hadoop. %USERNAME% by default. 81 | set HADOOP_IDENT_STRING=%USERNAME% 82 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/hadoop-env.sh: -------------------------------------------------------------------------------- 1 | # Copyright 2011 The Apache Software Foundation 2 | # 3 | # Licensed to the Apache Software Foundation (ASF) under one 4 | # or more contributor license agreements. See the NOTICE file 5 | # distributed with this work for additional information 6 | # regarding copyright ownership. The ASF licenses this file 7 | # to you under the Apache License, Version 2.0 (the 8 | # "License"); you may not use this file except in compliance 9 | # with the License. You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | # Set Hadoop-specific environment variables here. 20 | 21 | # The only required environment variable is JAVA_HOME. All others are 22 | # optional. When running a distributed configuration it is best to 23 | # set JAVA_HOME in this file, so that it is correctly defined on 24 | # remote nodes. 25 | 26 | # The java implementation to use. 27 | export JAVA_HOME=${JAVA_HOME} 28 | 29 | # The jsvc implementation to use. Jsvc is required to run secure datanodes. 30 | #export JSVC_HOME=${JSVC_HOME} 31 | 32 | export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"} 33 | 34 | # Extra Java CLASSPATH elements. Automatically insert capacity-scheduler. 35 | for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do 36 | if [ "$HADOOP_CLASSPATH" ]; then 37 | export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f 38 | else 39 | export HADOOP_CLASSPATH=$f 40 | fi 41 | done 42 | 43 | # The maximum amount of heap to use, in MB. Default is 1000. 44 | #export HADOOP_HEAPSIZE= 45 | #export HADOOP_NAMENODE_INIT_HEAPSIZE="" 46 | 47 | # Extra Java runtime options. Empty by default. 48 | export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true" 49 | 50 | # Command specific options appended to HADOOP_OPTS when specified 51 | export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS" 52 | export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS" 53 | 54 | export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS" 55 | 56 | # The following applies to multiple commands (fs, dfs, fsck, distcp etc) 57 | export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS" 58 | #HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS" 59 | 60 | # On secure datanodes, user to run the datanode as after dropping privileges 61 | export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER} 62 | 63 | # Where log files are stored. $HADOOP_HOME/logs by default. 64 | #export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER 65 | 66 | # Where log files are stored in the secure data environment. 67 | export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER} 68 | 69 | # The directory where pid files are stored. /tmp by default. 70 | # NOTE: this should be set to a directory that can only be written to by 71 | # the user that will run the hadoop daemons. Otherwise there is the 72 | # potential for a symlink attack. 73 | #export HADOOP_PID_DIR=${HADOOP_PID_DIR} 74 | export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR} 75 | 76 | # A string representing this instance of hadoop. $USER by default. 77 | export HADOOP_IDENT_STRING=$USER 78 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/hadoop-metrics.properties: -------------------------------------------------------------------------------- 1 | # Configuration of the "dfs" context for null 2 | dfs.class=org.apache.hadoop.metrics.spi.NullContext 3 | 4 | # Configuration of the "dfs" context for file 5 | #dfs.class=org.apache.hadoop.metrics.file.FileContext 6 | #dfs.period=10 7 | #dfs.fileName=/tmp/dfsmetrics.log 8 | 9 | # Configuration of the "dfs" context for ganglia 10 | # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter) 11 | # dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext 12 | # dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 13 | # dfs.period=10 14 | # dfs.servers=localhost:8649 15 | 16 | 17 | # Configuration of the "mapred" context for null 18 | mapred.class=org.apache.hadoop.metrics.spi.NullContext 19 | 20 | # Configuration of the "mapred" context for file 21 | #mapred.class=org.apache.hadoop.metrics.file.FileContext 22 | #mapred.period=10 23 | #mapred.fileName=/tmp/mrmetrics.log 24 | 25 | # Configuration of the "mapred" context for ganglia 26 | # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter) 27 | # mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext 28 | # mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 29 | # mapred.period=10 30 | # mapred.servers=localhost:8649 31 | 32 | 33 | # Configuration of the "jvm" context for null 34 | #jvm.class=org.apache.hadoop.metrics.spi.NullContext 35 | 36 | # Configuration of the "jvm" context for file 37 | #jvm.class=org.apache.hadoop.metrics.file.FileContext 38 | #jvm.period=10 39 | #jvm.fileName=/tmp/jvmmetrics.log 40 | 41 | # Configuration of the "jvm" context for ganglia 42 | # jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext 43 | # jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 44 | # jvm.period=10 45 | # jvm.servers=localhost:8649 46 | 47 | # Configuration of the "rpc" context for null 48 | rpc.class=org.apache.hadoop.metrics.spi.NullContext 49 | 50 | # Configuration of the "rpc" context for file 51 | #rpc.class=org.apache.hadoop.metrics.file.FileContext 52 | #rpc.period=10 53 | #rpc.fileName=/tmp/rpcmetrics.log 54 | 55 | # Configuration of the "rpc" context for ganglia 56 | # rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext 57 | # rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 58 | # rpc.period=10 59 | # rpc.servers=localhost:8649 60 | 61 | 62 | # Configuration of the "ugi" context for null 63 | ugi.class=org.apache.hadoop.metrics.spi.NullContext 64 | 65 | # Configuration of the "ugi" context for file 66 | #ugi.class=org.apache.hadoop.metrics.file.FileContext 67 | #ugi.period=10 68 | #ugi.fileName=/tmp/ugimetrics.log 69 | 70 | # Configuration of the "ugi" context for ganglia 71 | # ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext 72 | # ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 73 | # ugi.period=10 74 | # ugi.servers=localhost:8649 75 | 76 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/hadoop-metrics2.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # syntax: [prefix].[source|sink].[instance].[options] 19 | # See javadoc of package-info.java for org.apache.hadoop.metrics2 for details 20 | 21 | *.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink 22 | # default sampling period, in seconds 23 | *.period=10 24 | 25 | # The namenode-metrics.out will contain metrics from all context 26 | #namenode.sink.file.filename=namenode-metrics.out 27 | # Specifying a special sampling period for namenode: 28 | #namenode.sink.*.period=8 29 | 30 | #datanode.sink.file.filename=datanode-metrics.out 31 | 32 | # the following example split metrics of different 33 | # context to different sinks (in this case files) 34 | #jobtracker.sink.file_jvm.context=jvm 35 | #jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out 36 | #jobtracker.sink.file_mapred.context=mapred 37 | #jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out 38 | 39 | #tasktracker.sink.file.filename=tasktracker-metrics.out 40 | 41 | #maptask.sink.file.filename=maptask-metrics.out 42 | 43 | #reducetask.sink.file.filename=reducetask-metrics.out 44 | 45 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/hdfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | dfs.namenode.name.dir 22 | /cluster/metadata/dfs/nn 23 | true 24 | Determines where on the local filesystem the DFS name node 25 | should store the name table(fsimage). If this is a comma-delimited 26 | list of directories then the name table is replicated in all of 27 | the directories, for redundancy 28 | 29 | 30 | dfs.datanode.data.dir 31 | /cluster/data/1/dfs/dn 32 | true 33 | Determines where on the local filesystem an DFS data node 34 | should store its blocks. If this is a comma-delimited list of directories, 35 | then data will be stored in all named directories, typically on different 36 | devices. Directories that do not exist are ignored. This property specifies 37 | the directories where the DataNode stores blocks. The recommendation is 38 | that you configure the disks on the DataNode in a JBOD configuration, 39 | mounted at /data/1/ through /data/N, and configure dfs.datanode.data.dir 40 | to specify /data/1/dfs/dn through /data/N/dfs/dn/. 41 | 42 | 43 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA 20 | 21 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/mapred-env.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # export JAVA_HOME=/home/y/libexec/jdk1.6.0/ 17 | 18 | export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 19 | 20 | export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA 21 | 22 | #export HADOOP_JOB_HISTORYSERVER_OPTS= 23 | #export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default. 24 | #export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger. 25 | #export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default. 26 | #export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default 27 | #export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0. 28 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | mapreduce.framework.name 21 | yarn 22 | The runtime framework for executing 23 | MapReduce jobs. Can be one of local, classic or yarn. 24 | 25 | 26 | 27 | 28 | 46 | 47 | 53 | 54 | 60 | 61 | 65 | 66 | 72 | 73 | 79 | 80 | 86 | 87 | 93 | 94 | 95 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/yarn-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | @rem User for YARN daemons 18 | if not defined HADOOP_YARN_USER ( 19 | set HADOOP_YARN_USER=%yarn% 20 | ) 21 | 22 | if not defined YARN_CONF_DIR ( 23 | set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf 24 | ) 25 | 26 | if defined YARN_HEAPSIZE ( 27 | @rem echo run with Java heapsize %YARN_HEAPSIZE% 28 | set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m 29 | ) 30 | 31 | if not defined YARN_LOG_DIR ( 32 | set YARN_LOG_DIR=%HADOOP_YARN_HOME%\logs 33 | ) 34 | 35 | if not defined YARN_LOGFILE ( 36 | set YARN_LOGFILE=yarn.log 37 | ) 38 | 39 | @rem default policy file for service-level authorization 40 | if not defined YARN_POLICYFILE ( 41 | set YARN_POLICYFILE=hadoop-policy.xml 42 | ) 43 | 44 | if not defined YARN_ROOT_LOGGER ( 45 | set YARN_ROOT_LOGGER=INFO,console 46 | ) 47 | 48 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.dir=%YARN_LOG_DIR% 49 | set YARN_OPTS=%YARN_OPTS% -Dyarn.log.dir=%YARN_LOG_DIR% 50 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.file=%YARN_LOGFILE% 51 | set YARN_OPTS=%YARN_OPTS% -Dyarn.log.file=%YARN_LOGFILE% 52 | set YARN_OPTS=%YARN_OPTS% -Dyarn.home.dir=%HADOOP_YARN_HOME% 53 | set YARN_OPTS=%YARN_OPTS% -Dyarn.id.str=%YARN_IDENT_STRING% 54 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.home.dir=%HADOOP_YARN_HOME% 55 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.root.logger=%YARN_ROOT_LOGGER% 56 | set YARN_OPTS=%YARN_OPTS% -Dyarn.root.logger=%YARN_ROOT_LOGGER% 57 | if defined JAVA_LIBRARY_PATH ( 58 | set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH% 59 | ) 60 | set YARN_OPTS=%YARN_OPTS% -Dyarn.policy.file=%YARN_POLICYFILE% -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.basic/yarn-env.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # User for YARN daemons 17 | export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn} 18 | 19 | # resolve links - $0 may be a softlink 20 | export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}" 21 | 22 | # some Java parameters 23 | # export JAVA_HOME=/home/y/libexec/jdk1.6.0/ 24 | if [ "$JAVA_HOME" != "" ]; then 25 | #echo "run java in $JAVA_HOME" 26 | JAVA_HOME=$JAVA_HOME 27 | fi 28 | 29 | if [ "$JAVA_HOME" = "" ]; then 30 | echo "Error: JAVA_HOME is not set." 31 | exit 1 32 | fi 33 | 34 | JAVA=$JAVA_HOME/bin/java 35 | JAVA_HEAP_MAX=-Xmx1000m 36 | 37 | # For setting YARN specific HEAP sizes please use this 38 | # Parameter and set appropriately 39 | # YARN_HEAPSIZE=1000 40 | 41 | # check envvars which might override default args 42 | if [ "$YARN_HEAPSIZE" != "" ]; then 43 | JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m" 44 | fi 45 | 46 | # Resource Manager specific parameters 47 | 48 | # Specify the max Heapsize for the ResourceManager using a numerical value 49 | # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 50 | # the value to 1000. 51 | # This value will be overridden by an Xmx setting specified in either YARN_OPTS 52 | # and/or YARN_RESOURCEMANAGER_OPTS. 53 | # If not specified, the default value will be picked from either YARN_HEAPMAX 54 | # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 55 | #export YARN_RESOURCEMANAGER_HEAPSIZE=1000 56 | 57 | # Specify the JVM options to be used when starting the ResourceManager. 58 | # These options will be appended to the options specified as YARN_OPTS 59 | # and therefore may override any similar flags set in YARN_OPTS 60 | #export YARN_RESOURCEMANAGER_OPTS= 61 | 62 | # Node Manager specific parameters 63 | 64 | # Specify the max Heapsize for the NodeManager using a numerical value 65 | # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 66 | # the value to 1000. 67 | # This value will be overridden by an Xmx setting specified in either YARN_OPTS 68 | # and/or YARN_NODEMANAGER_OPTS. 69 | # If not specified, the default value will be picked from either YARN_HEAPMAX 70 | # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 71 | #export YARN_NODEMANAGER_HEAPSIZE=1000 72 | 73 | # Specify the JVM options to be used when starting the NodeManager. 74 | # These options will be appended to the options specified as YARN_OPTS 75 | # and therefore may override any similar flags set in YARN_OPTS 76 | #export YARN_NODEMANAGER_OPTS= 77 | 78 | # so that filenames w/ spaces are handled correctly in loops below 79 | IFS= 80 | 81 | 82 | # default log directory & file 83 | if [ "$YARN_LOG_DIR" = "" ]; then 84 | YARN_LOG_DIR="$HADOOP_YARN_HOME/logs" 85 | fi 86 | if [ "$YARN_LOGFILE" = "" ]; then 87 | YARN_LOGFILE='yarn.log' 88 | fi 89 | 90 | # default policy file for service-level authorization 91 | if [ "$YARN_POLICYFILE" = "" ]; then 92 | YARN_POLICYFILE="hadoop-policy.xml" 93 | fi 94 | 95 | # restore ordinary behaviour 96 | unset IFS 97 | 98 | 99 | YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR" 100 | YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR" 101 | YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE" 102 | YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE" 103 | YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME" 104 | YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING" 105 | YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" 106 | YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" 107 | if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then 108 | YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" 109 | fi 110 | YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE" 111 | 112 | 113 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/capacity-scheduler.xml: -------------------------------------------------------------------------------- 1 | 14 | 15 | 16 | 17 | yarn.scheduler.capacity.maximum-applications 18 | 10000 19 | 20 | Maximum number of applications that can be pending and running. 21 | 22 | 23 | 24 | 25 | yarn.scheduler.capacity.maximum-am-resource-percent 26 | 0.1 27 | 28 | Maximum percent of resources in the cluster which can be used to run 29 | application masters i.e. controls number of concurrent running 30 | applications. 31 | 32 | 33 | 34 | 35 | yarn.scheduler.capacity.resource-calculator 36 | org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator 37 | 38 | The ResourceCalculator implementation to be used to compare 39 | Resources in the scheduler. 40 | The default i.e. DefaultResourceCalculator only uses Memory while 41 | DominantResourceCalculator uses dominant-resource to compare 42 | multi-dimensional resources such as Memory, CPU etc. 43 | 44 | 45 | 46 | 47 | yarn.scheduler.capacity.root.queues 48 | default 49 | 50 | The queues at the this level (root is the root queue). 51 | 52 | 53 | 54 | 55 | yarn.scheduler.capacity.root.default.capacity 56 | 100 57 | Default queue target capacity. 58 | 59 | 60 | 61 | yarn.scheduler.capacity.root.default.user-limit-factor 62 | 1 63 | 64 | Default queue user limit a percentage from 0.0 to 1.0. 65 | 66 | 67 | 68 | 69 | yarn.scheduler.capacity.root.default.maximum-capacity 70 | 100 71 | 72 | The maximum capacity of the default queue. 73 | 74 | 75 | 76 | 77 | yarn.scheduler.capacity.root.default.state 78 | RUNNING 79 | 80 | The state of the default queue. State can be one of RUNNING or STOPPED. 81 | 82 | 83 | 84 | 85 | yarn.scheduler.capacity.root.default.acl_submit_applications 86 | * 87 | 88 | The ACL of who can submit jobs to the default queue. 89 | 90 | 91 | 92 | 93 | yarn.scheduler.capacity.root.default.acl_administer_queue 94 | * 95 | 96 | The ACL of who can administer jobs on the default queue. 97 | 98 | 99 | 100 | 101 | yarn.scheduler.capacity.node-locality-delay 102 | -1 103 | 104 | Number of missed scheduling opportunities after which the CapacityScheduler 105 | attempts to schedule rack-local containers. 106 | Typically this should be set to number of racks in the cluster, this 107 | feature is disabled by default, set to -1. 108 | 109 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/container-executor.cfg: -------------------------------------------------------------------------------- 1 | yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group 2 | banned.users=#comma separated list of users who can not run applications 3 | min.user.id=1000#Prevent other super-users 4 | allowed.system.users=##comma separated list of system users who CAN run applications 5 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/hadoop-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | @rem Set Hadoop-specific environment variables here. 18 | 19 | @rem The only required environment variable is JAVA_HOME. All others are 20 | @rem optional. When running a distributed configuration it is best to 21 | @rem set JAVA_HOME in this file, so that it is correctly defined on 22 | @rem remote nodes. 23 | 24 | @rem The java implementation to use. Required. 25 | set JAVA_HOME=%JAVA_HOME% 26 | 27 | @rem The jsvc implementation to use. Jsvc is required to run secure datanodes. 28 | @rem set JSVC_HOME=%JSVC_HOME% 29 | 30 | @rem set HADOOP_CONF_DIR= 31 | 32 | @rem Extra Java CLASSPATH elements. Automatically insert capacity-scheduler. 33 | if exist %HADOOP_HOME%\contrib\capacity-scheduler ( 34 | if not defined HADOOP_CLASSPATH ( 35 | set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar 36 | ) else ( 37 | set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar 38 | ) 39 | ) 40 | 41 | @rem The maximum amount of heap to use, in MB. Default is 1000. 42 | @rem set HADOOP_HEAPSIZE= 43 | @rem set HADOOP_NAMENODE_INIT_HEAPSIZE="" 44 | 45 | @rem Extra Java runtime options. Empty by default. 46 | @rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true 47 | 48 | @rem Command specific options appended to HADOOP_OPTS when specified 49 | if not defined HADOOP_SECURITY_LOGGER ( 50 | set HADOOP_SECURITY_LOGGER=INFO,RFAS 51 | ) 52 | if not defined HDFS_AUDIT_LOGGER ( 53 | set HDFS_AUDIT_LOGGER=INFO,NullAppender 54 | ) 55 | 56 | set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS% 57 | set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS% 58 | set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS% 59 | 60 | @rem The following applies to multiple commands (fs, dfs, fsck, distcp etc) 61 | set HADOOP_CLIENT_OPTS=-Xmx128m %HADOOP_CLIENT_OPTS% 62 | @rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%" 63 | 64 | @rem On secure datanodes, user to run the datanode as after dropping privileges 65 | set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER% 66 | 67 | @rem Where log files are stored. %HADOOP_HOME%/logs by default. 68 | @rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME% 69 | 70 | @rem Where log files are stored in the secure data environment. 71 | set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER% 72 | 73 | @rem The directory where pid files are stored. /tmp by default. 74 | @rem NOTE: this should be set to a directory that can only be written to by 75 | @rem the user that will run the hadoop daemons. Otherwise there is the 76 | @rem potential for a symlink attack. 77 | set HADOOP_PID_DIR=%HADOOP_PID_DIR% 78 | set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR% 79 | 80 | @rem A string representing this instance of hadoop. %USERNAME% by default. 81 | set HADOOP_IDENT_STRING=%USERNAME% 82 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/hadoop-env.sh: -------------------------------------------------------------------------------- 1 | # Copyright 2011 The Apache Software Foundation 2 | # 3 | # Licensed to the Apache Software Foundation (ASF) under one 4 | # or more contributor license agreements. See the NOTICE file 5 | # distributed with this work for additional information 6 | # regarding copyright ownership. The ASF licenses this file 7 | # to you under the Apache License, Version 2.0 (the 8 | # "License"); you may not use this file except in compliance 9 | # with the License. You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | # Set Hadoop-specific environment variables here. 20 | 21 | # The only required environment variable is JAVA_HOME. All others are 22 | # optional. When running a distributed configuration it is best to 23 | # set JAVA_HOME in this file, so that it is correctly defined on 24 | # remote nodes. 25 | 26 | # The java implementation to use. 27 | export JAVA_HOME=${JAVA_HOME} 28 | 29 | # The jsvc implementation to use. Jsvc is required to run secure datanodes. 30 | #export JSVC_HOME=${JSVC_HOME} 31 | 32 | export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"} 33 | 34 | # Extra Java CLASSPATH elements. Automatically insert capacity-scheduler. 35 | for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do 36 | if [ "$HADOOP_CLASSPATH" ]; then 37 | export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f 38 | else 39 | export HADOOP_CLASSPATH=$f 40 | fi 41 | done 42 | 43 | # The maximum amount of heap to use, in MB. Default is 1000. 44 | #export HADOOP_HEAPSIZE= 45 | #export HADOOP_NAMENODE_INIT_HEAPSIZE="" 46 | 47 | # Extra Java runtime options. Empty by default. 48 | export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true" 49 | 50 | # Command specific options appended to HADOOP_OPTS when specified 51 | export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS" 52 | export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS" 53 | 54 | export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS" 55 | 56 | # The following applies to multiple commands (fs, dfs, fsck, distcp etc) 57 | export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS" 58 | #HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS" 59 | 60 | # On secure datanodes, user to run the datanode as after dropping privileges 61 | export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER} 62 | 63 | # Where log files are stored. $HADOOP_HOME/logs by default. 64 | #export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER 65 | 66 | # Where log files are stored in the secure data environment. 67 | export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER} 68 | 69 | # The directory where pid files are stored. /tmp by default. 70 | # NOTE: this should be set to a directory that can only be written to by 71 | # the user that will run the hadoop daemons. Otherwise there is the 72 | # potential for a symlink attack. 73 | #export HADOOP_PID_DIR=${HADOOP_PID_DIR} 74 | export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR} 75 | 76 | # A string representing this instance of hadoop. $USER by default. 77 | export HADOOP_IDENT_STRING=$USER 78 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/hadoop-metrics.properties: -------------------------------------------------------------------------------- 1 | # Configuration of the "dfs" context for null 2 | dfs.class=org.apache.hadoop.metrics.spi.NullContext 3 | 4 | # Configuration of the "dfs" context for file 5 | #dfs.class=org.apache.hadoop.metrics.file.FileContext 6 | #dfs.period=10 7 | #dfs.fileName=/tmp/dfsmetrics.log 8 | 9 | # Configuration of the "dfs" context for ganglia 10 | # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter) 11 | # dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext 12 | # dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 13 | # dfs.period=10 14 | # dfs.servers=localhost:8649 15 | 16 | 17 | # Configuration of the "mapred" context for null 18 | mapred.class=org.apache.hadoop.metrics.spi.NullContext 19 | 20 | # Configuration of the "mapred" context for file 21 | #mapred.class=org.apache.hadoop.metrics.file.FileContext 22 | #mapred.period=10 23 | #mapred.fileName=/tmp/mrmetrics.log 24 | 25 | # Configuration of the "mapred" context for ganglia 26 | # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter) 27 | # mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext 28 | # mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 29 | # mapred.period=10 30 | # mapred.servers=localhost:8649 31 | 32 | 33 | # Configuration of the "jvm" context for null 34 | #jvm.class=org.apache.hadoop.metrics.spi.NullContext 35 | 36 | # Configuration of the "jvm" context for file 37 | #jvm.class=org.apache.hadoop.metrics.file.FileContext 38 | #jvm.period=10 39 | #jvm.fileName=/tmp/jvmmetrics.log 40 | 41 | # Configuration of the "jvm" context for ganglia 42 | # jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext 43 | # jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 44 | # jvm.period=10 45 | # jvm.servers=localhost:8649 46 | 47 | # Configuration of the "rpc" context for null 48 | rpc.class=org.apache.hadoop.metrics.spi.NullContext 49 | 50 | # Configuration of the "rpc" context for file 51 | #rpc.class=org.apache.hadoop.metrics.file.FileContext 52 | #rpc.period=10 53 | #rpc.fileName=/tmp/rpcmetrics.log 54 | 55 | # Configuration of the "rpc" context for ganglia 56 | # rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext 57 | # rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 58 | # rpc.period=10 59 | # rpc.servers=localhost:8649 60 | 61 | 62 | # Configuration of the "ugi" context for null 63 | ugi.class=org.apache.hadoop.metrics.spi.NullContext 64 | 65 | # Configuration of the "ugi" context for file 66 | #ugi.class=org.apache.hadoop.metrics.file.FileContext 67 | #ugi.period=10 68 | #ugi.fileName=/tmp/ugimetrics.log 69 | 70 | # Configuration of the "ugi" context for ganglia 71 | # ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext 72 | # ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 73 | # ugi.period=10 74 | # ugi.servers=localhost:8649 75 | 76 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/hadoop-metrics2.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # syntax: [prefix].[source|sink].[instance].[options] 19 | # See javadoc of package-info.java for org.apache.hadoop.metrics2 for details 20 | 21 | *.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink 22 | # default sampling period, in seconds 23 | *.period=10 24 | 25 | # The namenode-metrics.out will contain metrics from all context 26 | #namenode.sink.file.filename=namenode-metrics.out 27 | # Specifying a special sampling period for namenode: 28 | #namenode.sink.*.period=8 29 | 30 | #datanode.sink.file.filename=datanode-metrics.out 31 | 32 | # the following example split metrics of different 33 | # context to different sinks (in this case files) 34 | #jobtracker.sink.file_jvm.context=jvm 35 | #jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out 36 | #jobtracker.sink.file_mapred.context=mapred 37 | #jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out 38 | 39 | #tasktracker.sink.file.filename=tasktracker-metrics.out 40 | 41 | #maptask.sink.file.filename=maptask-metrics.out 42 | 43 | #reducetask.sink.file.filename=reducetask-metrics.out 44 | 45 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA 20 | 21 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/mapred-env.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # export JAVA_HOME=/home/y/libexec/jdk1.6.0/ 17 | 18 | export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 19 | 20 | export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA 21 | 22 | #export HADOOP_JOB_HISTORYSERVER_OPTS= 23 | #export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default. 24 | #export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger. 25 | #export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default. 26 | #export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default 27 | #export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0. 28 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | mapreduce.framework.name 21 | yarn 22 | The runtime framework for executing 23 | MapReduce jobs. Can be one of local, classic or yarn. 24 | 25 | 26 | 27 | mapreduce.jobhistory.address 28 | hadoop-manager.buildoop.org:10020 29 | 30 | 31 | 32 | mapreduce.jobhistory.webapp.address 33 | hadoop-manager.buildoop.org:19888 34 | 35 | 36 | 37 | yarn.app.mapreduce.am.staging-dir 38 | /user 39 | 40 | 41 | 42 | 43 | 61 | 62 | 68 | 69 | 75 | 76 | 80 | 81 | 87 | 88 | 94 | 95 | 101 | 102 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/HTTP.keytab: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/HTTP.keytab -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/flume.keytab: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/flume.keytab -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/hdfs.keytab: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/hdfs.keytab -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/mapred.keytab: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/mapred.keytab -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/oozie.keytab: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/oozie.keytab -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/secret: -------------------------------------------------------------------------------- 1 | 1 2 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/vagrant.keytab: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/vagrant.keytab -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/yarn.keytab: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/yarn.keytab -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/zookeeper.keytab: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/deploy/cm/test-config/conf.openbus.ha-failover-kerberos/security/zookeeper.keytab -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/yarn-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | @rem User for YARN daemons 18 | if not defined HADOOP_YARN_USER ( 19 | set HADOOP_YARN_USER=%yarn% 20 | ) 21 | 22 | if not defined YARN_CONF_DIR ( 23 | set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf 24 | ) 25 | 26 | if defined YARN_HEAPSIZE ( 27 | @rem echo run with Java heapsize %YARN_HEAPSIZE% 28 | set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m 29 | ) 30 | 31 | if not defined YARN_LOG_DIR ( 32 | set YARN_LOG_DIR=%HADOOP_YARN_HOME%\logs 33 | ) 34 | 35 | if not defined YARN_LOGFILE ( 36 | set YARN_LOGFILE=yarn.log 37 | ) 38 | 39 | @rem default policy file for service-level authorization 40 | if not defined YARN_POLICYFILE ( 41 | set YARN_POLICYFILE=hadoop-policy.xml 42 | ) 43 | 44 | if not defined YARN_ROOT_LOGGER ( 45 | set YARN_ROOT_LOGGER=INFO,console 46 | ) 47 | 48 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.dir=%YARN_LOG_DIR% 49 | set YARN_OPTS=%YARN_OPTS% -Dyarn.log.dir=%YARN_LOG_DIR% 50 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.file=%YARN_LOGFILE% 51 | set YARN_OPTS=%YARN_OPTS% -Dyarn.log.file=%YARN_LOGFILE% 52 | set YARN_OPTS=%YARN_OPTS% -Dyarn.home.dir=%HADOOP_YARN_HOME% 53 | set YARN_OPTS=%YARN_OPTS% -Dyarn.id.str=%YARN_IDENT_STRING% 54 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.home.dir=%HADOOP_YARN_HOME% 55 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.root.logger=%YARN_ROOT_LOGGER% 56 | set YARN_OPTS=%YARN_OPTS% -Dyarn.root.logger=%YARN_ROOT_LOGGER% 57 | if defined JAVA_LIBRARY_PATH ( 58 | set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH% 59 | ) 60 | set YARN_OPTS=%YARN_OPTS% -Dyarn.policy.file=%YARN_POLICYFILE% -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover-kerberos/yarn-env.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # User for YARN daemons 17 | export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn} 18 | 19 | # resolve links - $0 may be a softlink 20 | export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}" 21 | 22 | # some Java parameters 23 | # export JAVA_HOME=/home/y/libexec/jdk1.6.0/ 24 | if [ "$JAVA_HOME" != "" ]; then 25 | #echo "run java in $JAVA_HOME" 26 | JAVA_HOME=$JAVA_HOME 27 | fi 28 | 29 | if [ "$JAVA_HOME" = "" ]; then 30 | echo "Error: JAVA_HOME is not set." 31 | exit 1 32 | fi 33 | 34 | JAVA=$JAVA_HOME/bin/java 35 | JAVA_HEAP_MAX=-Xmx1000m 36 | 37 | # For setting YARN specific HEAP sizes please use this 38 | # Parameter and set appropriately 39 | # YARN_HEAPSIZE=1000 40 | 41 | # check envvars which might override default args 42 | if [ "$YARN_HEAPSIZE" != "" ]; then 43 | JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m" 44 | fi 45 | 46 | # Resource Manager specific parameters 47 | 48 | # Specify the max Heapsize for the ResourceManager using a numerical value 49 | # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 50 | # the value to 1000. 51 | # This value will be overridden by an Xmx setting specified in either YARN_OPTS 52 | # and/or YARN_RESOURCEMANAGER_OPTS. 53 | # If not specified, the default value will be picked from either YARN_HEAPMAX 54 | # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 55 | #export YARN_RESOURCEMANAGER_HEAPSIZE=1000 56 | 57 | # Specify the JVM options to be used when starting the ResourceManager. 58 | # These options will be appended to the options specified as YARN_OPTS 59 | # and therefore may override any similar flags set in YARN_OPTS 60 | #export YARN_RESOURCEMANAGER_OPTS= 61 | 62 | # Node Manager specific parameters 63 | 64 | # Specify the max Heapsize for the NodeManager using a numerical value 65 | # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 66 | # the value to 1000. 67 | # This value will be overridden by an Xmx setting specified in either YARN_OPTS 68 | # and/or YARN_NODEMANAGER_OPTS. 69 | # If not specified, the default value will be picked from either YARN_HEAPMAX 70 | # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 71 | #export YARN_NODEMANAGER_HEAPSIZE=1000 72 | 73 | # Specify the JVM options to be used when starting the NodeManager. 74 | # These options will be appended to the options specified as YARN_OPTS 75 | # and therefore may override any similar flags set in YARN_OPTS 76 | #export YARN_NODEMANAGER_OPTS= 77 | 78 | # so that filenames w/ spaces are handled correctly in loops below 79 | IFS= 80 | 81 | 82 | # default log directory & file 83 | if [ "$YARN_LOG_DIR" = "" ]; then 84 | YARN_LOG_DIR="$HADOOP_YARN_HOME/logs" 85 | fi 86 | if [ "$YARN_LOGFILE" = "" ]; then 87 | YARN_LOGFILE='yarn.log' 88 | fi 89 | 90 | # default policy file for service-level authorization 91 | if [ "$YARN_POLICYFILE" = "" ]; then 92 | YARN_POLICYFILE="hadoop-policy.xml" 93 | fi 94 | 95 | # restore ordinary behaviour 96 | unset IFS 97 | 98 | 99 | YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR" 100 | YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR" 101 | YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE" 102 | YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE" 103 | YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME" 104 | YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING" 105 | YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" 106 | YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" 107 | if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then 108 | YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" 109 | fi 110 | YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE" 111 | 112 | 113 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/capacity-scheduler.xml: -------------------------------------------------------------------------------- 1 | 14 | 15 | 16 | 17 | yarn.scheduler.capacity.maximum-applications 18 | 10000 19 | 20 | Maximum number of applications that can be pending and running. 21 | 22 | 23 | 24 | 25 | yarn.scheduler.capacity.maximum-am-resource-percent 26 | 0.1 27 | 28 | Maximum percent of resources in the cluster which can be used to run 29 | application masters i.e. controls number of concurrent running 30 | applications. 31 | 32 | 33 | 34 | 35 | yarn.scheduler.capacity.resource-calculator 36 | org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator 37 | 38 | The ResourceCalculator implementation to be used to compare 39 | Resources in the scheduler. 40 | The default i.e. DefaultResourceCalculator only uses Memory while 41 | DominantResourceCalculator uses dominant-resource to compare 42 | multi-dimensional resources such as Memory, CPU etc. 43 | 44 | 45 | 46 | 47 | yarn.scheduler.capacity.root.queues 48 | default 49 | 50 | The queues at the this level (root is the root queue). 51 | 52 | 53 | 54 | 55 | yarn.scheduler.capacity.root.default.capacity 56 | 100 57 | Default queue target capacity. 58 | 59 | 60 | 61 | yarn.scheduler.capacity.root.default.user-limit-factor 62 | 1 63 | 64 | Default queue user limit a percentage from 0.0 to 1.0. 65 | 66 | 67 | 68 | 69 | yarn.scheduler.capacity.root.default.maximum-capacity 70 | 100 71 | 72 | The maximum capacity of the default queue. 73 | 74 | 75 | 76 | 77 | yarn.scheduler.capacity.root.default.state 78 | RUNNING 79 | 80 | The state of the default queue. State can be one of RUNNING or STOPPED. 81 | 82 | 83 | 84 | 85 | yarn.scheduler.capacity.root.default.acl_submit_applications 86 | * 87 | 88 | The ACL of who can submit jobs to the default queue. 89 | 90 | 91 | 92 | 93 | yarn.scheduler.capacity.root.default.acl_administer_queue 94 | * 95 | 96 | The ACL of who can administer jobs on the default queue. 97 | 98 | 99 | 100 | 101 | yarn.scheduler.capacity.node-locality-delay 102 | -1 103 | 104 | Number of missed scheduling opportunities after which the CapacityScheduler 105 | attempts to schedule rack-local containers. 106 | Typically this should be set to number of racks in the cluster, this 107 | feature is disabled by default, set to -1. 108 | 109 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/container-executor.cfg: -------------------------------------------------------------------------------- 1 | yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group 2 | banned.users=#comma separated list of users who can not run applications 3 | min.user.id=1000#Prevent other super-users 4 | allowed.system.users=##comma separated list of system users who CAN run applications 5 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | fs.defaultFS 21 | hdfs://buildoopcluster 22 | HA-enabled logical URI NameService ID 23 | 24 | 25 | 26 | 27 | 28 | ha.zookeeper.quorum 29 | hadoop-manager:2181,hadoop-node1:2181,hadoop-node2:2181 30 | This lists the host-port pairs running the ZooKeeper service 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/hadoop-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | @rem Set Hadoop-specific environment variables here. 18 | 19 | @rem The only required environment variable is JAVA_HOME. All others are 20 | @rem optional. When running a distributed configuration it is best to 21 | @rem set JAVA_HOME in this file, so that it is correctly defined on 22 | @rem remote nodes. 23 | 24 | @rem The java implementation to use. Required. 25 | set JAVA_HOME=%JAVA_HOME% 26 | 27 | @rem The jsvc implementation to use. Jsvc is required to run secure datanodes. 28 | @rem set JSVC_HOME=%JSVC_HOME% 29 | 30 | @rem set HADOOP_CONF_DIR= 31 | 32 | @rem Extra Java CLASSPATH elements. Automatically insert capacity-scheduler. 33 | if exist %HADOOP_HOME%\contrib\capacity-scheduler ( 34 | if not defined HADOOP_CLASSPATH ( 35 | set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar 36 | ) else ( 37 | set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar 38 | ) 39 | ) 40 | 41 | @rem The maximum amount of heap to use, in MB. Default is 1000. 42 | @rem set HADOOP_HEAPSIZE= 43 | @rem set HADOOP_NAMENODE_INIT_HEAPSIZE="" 44 | 45 | @rem Extra Java runtime options. Empty by default. 46 | @rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true 47 | 48 | @rem Command specific options appended to HADOOP_OPTS when specified 49 | if not defined HADOOP_SECURITY_LOGGER ( 50 | set HADOOP_SECURITY_LOGGER=INFO,RFAS 51 | ) 52 | if not defined HDFS_AUDIT_LOGGER ( 53 | set HDFS_AUDIT_LOGGER=INFO,NullAppender 54 | ) 55 | 56 | set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS% 57 | set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS% 58 | set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS% 59 | 60 | @rem The following applies to multiple commands (fs, dfs, fsck, distcp etc) 61 | set HADOOP_CLIENT_OPTS=-Xmx128m %HADOOP_CLIENT_OPTS% 62 | @rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%" 63 | 64 | @rem On secure datanodes, user to run the datanode as after dropping privileges 65 | set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER% 66 | 67 | @rem Where log files are stored. %HADOOP_HOME%/logs by default. 68 | @rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME% 69 | 70 | @rem Where log files are stored in the secure data environment. 71 | set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER% 72 | 73 | @rem The directory where pid files are stored. /tmp by default. 74 | @rem NOTE: this should be set to a directory that can only be written to by 75 | @rem the user that will run the hadoop daemons. Otherwise there is the 76 | @rem potential for a symlink attack. 77 | set HADOOP_PID_DIR=%HADOOP_PID_DIR% 78 | set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR% 79 | 80 | @rem A string representing this instance of hadoop. %USERNAME% by default. 81 | set HADOOP_IDENT_STRING=%USERNAME% 82 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/hadoop-env.sh: -------------------------------------------------------------------------------- 1 | # Copyright 2011 The Apache Software Foundation 2 | # 3 | # Licensed to the Apache Software Foundation (ASF) under one 4 | # or more contributor license agreements. See the NOTICE file 5 | # distributed with this work for additional information 6 | # regarding copyright ownership. The ASF licenses this file 7 | # to you under the Apache License, Version 2.0 (the 8 | # "License"); you may not use this file except in compliance 9 | # with the License. You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | # Set Hadoop-specific environment variables here. 20 | 21 | # The only required environment variable is JAVA_HOME. All others are 22 | # optional. When running a distributed configuration it is best to 23 | # set JAVA_HOME in this file, so that it is correctly defined on 24 | # remote nodes. 25 | 26 | # The java implementation to use. 27 | export JAVA_HOME=${JAVA_HOME} 28 | 29 | # The jsvc implementation to use. Jsvc is required to run secure datanodes. 30 | #export JSVC_HOME=${JSVC_HOME} 31 | 32 | export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"} 33 | 34 | # Extra Java CLASSPATH elements. Automatically insert capacity-scheduler. 35 | for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do 36 | if [ "$HADOOP_CLASSPATH" ]; then 37 | export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f 38 | else 39 | export HADOOP_CLASSPATH=$f 40 | fi 41 | done 42 | 43 | # The maximum amount of heap to use, in MB. Default is 1000. 44 | #export HADOOP_HEAPSIZE= 45 | #export HADOOP_NAMENODE_INIT_HEAPSIZE="" 46 | 47 | # Extra Java runtime options. Empty by default. 48 | export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true" 49 | 50 | # Command specific options appended to HADOOP_OPTS when specified 51 | export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS" 52 | export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS" 53 | 54 | export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS" 55 | 56 | # The following applies to multiple commands (fs, dfs, fsck, distcp etc) 57 | export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS" 58 | #HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS" 59 | 60 | # On secure datanodes, user to run the datanode as after dropping privileges 61 | export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER} 62 | 63 | # Where log files are stored. $HADOOP_HOME/logs by default. 64 | #export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER 65 | 66 | # Where log files are stored in the secure data environment. 67 | export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER} 68 | 69 | # The directory where pid files are stored. /tmp by default. 70 | # NOTE: this should be set to a directory that can only be written to by 71 | # the user that will run the hadoop daemons. Otherwise there is the 72 | # potential for a symlink attack. 73 | #export HADOOP_PID_DIR=${HADOOP_PID_DIR} 74 | export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR} 75 | 76 | # A string representing this instance of hadoop. $USER by default. 77 | export HADOOP_IDENT_STRING=$USER 78 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/hadoop-metrics.properties: -------------------------------------------------------------------------------- 1 | # Configuration of the "dfs" context for null 2 | dfs.class=org.apache.hadoop.metrics.spi.NullContext 3 | 4 | # Configuration of the "dfs" context for file 5 | #dfs.class=org.apache.hadoop.metrics.file.FileContext 6 | #dfs.period=10 7 | #dfs.fileName=/tmp/dfsmetrics.log 8 | 9 | # Configuration of the "dfs" context for ganglia 10 | # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter) 11 | # dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext 12 | # dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 13 | # dfs.period=10 14 | # dfs.servers=localhost:8649 15 | 16 | 17 | # Configuration of the "mapred" context for null 18 | mapred.class=org.apache.hadoop.metrics.spi.NullContext 19 | 20 | # Configuration of the "mapred" context for file 21 | #mapred.class=org.apache.hadoop.metrics.file.FileContext 22 | #mapred.period=10 23 | #mapred.fileName=/tmp/mrmetrics.log 24 | 25 | # Configuration of the "mapred" context for ganglia 26 | # Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter) 27 | # mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext 28 | # mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 29 | # mapred.period=10 30 | # mapred.servers=localhost:8649 31 | 32 | 33 | # Configuration of the "jvm" context for null 34 | #jvm.class=org.apache.hadoop.metrics.spi.NullContext 35 | 36 | # Configuration of the "jvm" context for file 37 | #jvm.class=org.apache.hadoop.metrics.file.FileContext 38 | #jvm.period=10 39 | #jvm.fileName=/tmp/jvmmetrics.log 40 | 41 | # Configuration of the "jvm" context for ganglia 42 | # jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext 43 | # jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 44 | # jvm.period=10 45 | # jvm.servers=localhost:8649 46 | 47 | # Configuration of the "rpc" context for null 48 | rpc.class=org.apache.hadoop.metrics.spi.NullContext 49 | 50 | # Configuration of the "rpc" context for file 51 | #rpc.class=org.apache.hadoop.metrics.file.FileContext 52 | #rpc.period=10 53 | #rpc.fileName=/tmp/rpcmetrics.log 54 | 55 | # Configuration of the "rpc" context for ganglia 56 | # rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext 57 | # rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 58 | # rpc.period=10 59 | # rpc.servers=localhost:8649 60 | 61 | 62 | # Configuration of the "ugi" context for null 63 | ugi.class=org.apache.hadoop.metrics.spi.NullContext 64 | 65 | # Configuration of the "ugi" context for file 66 | #ugi.class=org.apache.hadoop.metrics.file.FileContext 67 | #ugi.period=10 68 | #ugi.fileName=/tmp/ugimetrics.log 69 | 70 | # Configuration of the "ugi" context for ganglia 71 | # ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext 72 | # ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31 73 | # ugi.period=10 74 | # ugi.servers=localhost:8649 75 | 76 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/hadoop-metrics2.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # syntax: [prefix].[source|sink].[instance].[options] 19 | # See javadoc of package-info.java for org.apache.hadoop.metrics2 for details 20 | 21 | *.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink 22 | # default sampling period, in seconds 23 | *.period=10 24 | 25 | # The namenode-metrics.out will contain metrics from all context 26 | #namenode.sink.file.filename=namenode-metrics.out 27 | # Specifying a special sampling period for namenode: 28 | #namenode.sink.*.period=8 29 | 30 | #datanode.sink.file.filename=datanode-metrics.out 31 | 32 | # the following example split metrics of different 33 | # context to different sinks (in this case files) 34 | #jobtracker.sink.file_jvm.context=jvm 35 | #jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out 36 | #jobtracker.sink.file_mapred.context=mapred 37 | #jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out 38 | 39 | #tasktracker.sink.file.filename=tasktracker-metrics.out 40 | 41 | #maptask.sink.file.filename=maptask-metrics.out 42 | 43 | #reducetask.sink.file.filename=reducetask-metrics.out 44 | 45 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA 20 | 21 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/mapred-env.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # export JAVA_HOME=/home/y/libexec/jdk1.6.0/ 17 | 18 | export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 19 | 20 | export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA 21 | 22 | #export HADOOP_JOB_HISTORYSERVER_OPTS= 23 | #export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default. 24 | #export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger. 25 | #export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default. 26 | #export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default 27 | #export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0. 28 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | mapreduce.framework.name 21 | yarn 22 | The runtime framework for executing 23 | MapReduce jobs. Can be one of local, classic or yarn. 24 | 25 | 26 | 27 | mapreduce.jobhistory.address 28 | hadoop-manager.buildoop.org:10020 29 | 30 | 31 | 32 | mapreduce.jobhistory.webapp.address 33 | hadoop-manager.buildoop.org:19888 34 | 35 | 36 | 37 | yarn.app.mapreduce.am.staging-dir 38 | /user 39 | 40 | 41 | 42 | 43 | 61 | 62 | 68 | 69 | 75 | 76 | 80 | 81 | 87 | 88 | 94 | 95 | 101 | 102 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/yarn-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | @rem User for YARN daemons 18 | if not defined HADOOP_YARN_USER ( 19 | set HADOOP_YARN_USER=%yarn% 20 | ) 21 | 22 | if not defined YARN_CONF_DIR ( 23 | set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf 24 | ) 25 | 26 | if defined YARN_HEAPSIZE ( 27 | @rem echo run with Java heapsize %YARN_HEAPSIZE% 28 | set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m 29 | ) 30 | 31 | if not defined YARN_LOG_DIR ( 32 | set YARN_LOG_DIR=%HADOOP_YARN_HOME%\logs 33 | ) 34 | 35 | if not defined YARN_LOGFILE ( 36 | set YARN_LOGFILE=yarn.log 37 | ) 38 | 39 | @rem default policy file for service-level authorization 40 | if not defined YARN_POLICYFILE ( 41 | set YARN_POLICYFILE=hadoop-policy.xml 42 | ) 43 | 44 | if not defined YARN_ROOT_LOGGER ( 45 | set YARN_ROOT_LOGGER=INFO,console 46 | ) 47 | 48 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.dir=%YARN_LOG_DIR% 49 | set YARN_OPTS=%YARN_OPTS% -Dyarn.log.dir=%YARN_LOG_DIR% 50 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.file=%YARN_LOGFILE% 51 | set YARN_OPTS=%YARN_OPTS% -Dyarn.log.file=%YARN_LOGFILE% 52 | set YARN_OPTS=%YARN_OPTS% -Dyarn.home.dir=%HADOOP_YARN_HOME% 53 | set YARN_OPTS=%YARN_OPTS% -Dyarn.id.str=%YARN_IDENT_STRING% 54 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.home.dir=%HADOOP_YARN_HOME% 55 | set YARN_OPTS=%YARN_OPTS% -Dhadoop.root.logger=%YARN_ROOT_LOGGER% 56 | set YARN_OPTS=%YARN_OPTS% -Dyarn.root.logger=%YARN_ROOT_LOGGER% 57 | if defined JAVA_LIBRARY_PATH ( 58 | set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH% 59 | ) 60 | set YARN_OPTS=%YARN_OPTS% -Dyarn.policy.file=%YARN_POLICYFILE% -------------------------------------------------------------------------------- /deploy/cm/test-config/conf.openbus.ha-failover/yarn-env.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # User for YARN daemons 17 | export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn} 18 | 19 | # resolve links - $0 may be a softlink 20 | export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}" 21 | 22 | # some Java parameters 23 | # export JAVA_HOME=/home/y/libexec/jdk1.6.0/ 24 | if [ "$JAVA_HOME" != "" ]; then 25 | #echo "run java in $JAVA_HOME" 26 | JAVA_HOME=$JAVA_HOME 27 | fi 28 | 29 | if [ "$JAVA_HOME" = "" ]; then 30 | echo "Error: JAVA_HOME is not set." 31 | exit 1 32 | fi 33 | 34 | JAVA=$JAVA_HOME/bin/java 35 | JAVA_HEAP_MAX=-Xmx1000m 36 | 37 | # For setting YARN specific HEAP sizes please use this 38 | # Parameter and set appropriately 39 | # YARN_HEAPSIZE=1000 40 | 41 | # check envvars which might override default args 42 | if [ "$YARN_HEAPSIZE" != "" ]; then 43 | JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m" 44 | fi 45 | 46 | # Resource Manager specific parameters 47 | 48 | # Specify the max Heapsize for the ResourceManager using a numerical value 49 | # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 50 | # the value to 1000. 51 | # This value will be overridden by an Xmx setting specified in either YARN_OPTS 52 | # and/or YARN_RESOURCEMANAGER_OPTS. 53 | # If not specified, the default value will be picked from either YARN_HEAPMAX 54 | # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 55 | #export YARN_RESOURCEMANAGER_HEAPSIZE=1000 56 | 57 | # Specify the JVM options to be used when starting the ResourceManager. 58 | # These options will be appended to the options specified as YARN_OPTS 59 | # and therefore may override any similar flags set in YARN_OPTS 60 | #export YARN_RESOURCEMANAGER_OPTS= 61 | 62 | # Node Manager specific parameters 63 | 64 | # Specify the max Heapsize for the NodeManager using a numerical value 65 | # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set 66 | # the value to 1000. 67 | # This value will be overridden by an Xmx setting specified in either YARN_OPTS 68 | # and/or YARN_NODEMANAGER_OPTS. 69 | # If not specified, the default value will be picked from either YARN_HEAPMAX 70 | # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. 71 | #export YARN_NODEMANAGER_HEAPSIZE=1000 72 | 73 | # Specify the JVM options to be used when starting the NodeManager. 74 | # These options will be appended to the options specified as YARN_OPTS 75 | # and therefore may override any similar flags set in YARN_OPTS 76 | #export YARN_NODEMANAGER_OPTS= 77 | 78 | # so that filenames w/ spaces are handled correctly in loops below 79 | IFS= 80 | 81 | 82 | # default log directory & file 83 | if [ "$YARN_LOG_DIR" = "" ]; then 84 | YARN_LOG_DIR="$HADOOP_YARN_HOME/logs" 85 | fi 86 | if [ "$YARN_LOGFILE" = "" ]; then 87 | YARN_LOGFILE='yarn.log' 88 | fi 89 | 90 | # default policy file for service-level authorization 91 | if [ "$YARN_POLICYFILE" = "" ]; then 92 | YARN_POLICYFILE="hadoop-policy.xml" 93 | fi 94 | 95 | # restore ordinary behaviour 96 | unset IFS 97 | 98 | 99 | YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR" 100 | YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR" 101 | YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE" 102 | YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE" 103 | YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME" 104 | YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING" 105 | YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" 106 | YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" 107 | if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then 108 | YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" 109 | fi 110 | YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE" 111 | 112 | 113 | -------------------------------------------------------------------------------- /deploy/vm/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/deploy/vm/.gitignore -------------------------------------------------------------------------------- /deploy/vm/README: -------------------------------------------------------------------------------- 1 | vagrant box add buildoop-centos http://asterix.cloud.cediant.es/buildoop/boxes/buildoop-centos.box 2 | vagrant box list 3 | 4 | vagrant up --no-provision 5 | vagrant provision 6 | -------------------------------------------------------------------------------- /deploy/vm/buildoop-cluster/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | VAGRANTFILE_API_VERSION = "2" 4 | 5 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 6 | config.vm.box = "buildoop-cluster" 7 | 8 | # Internal resource. 9 | # config.vm.box_url = "http://asterix.cloud.cediant.es/buildoop/boxes/buildoop-centos.box" 10 | # Public resource. 11 | config.vm.box_url = "https://www.dropbox.com/s/ody2jezcm4ueu9y/buildoop-centos.box" 12 | 13 | manager_ram = 1024 14 | node_ram = 1024 15 | node_count = 2 16 | 17 | # Create /etc/hosts per host 18 | hosts = "192.168.33.11 hadoop-manager.buildoop.org hadoop-manager\n" 19 | node_count.times do |i| 20 | id = i+1 21 | hosts << "192.168.33.1#{id+1} hadoop-node#{id}.buildoop.org hadoop-node#{id}\n" 22 | end 23 | config.vm.provision :shell, :inline => "echo \"#{hosts}\" | sudo tee -a /etc/hosts" 24 | 25 | # Create Hadoop Manager 26 | config.vm.define "manager" do |manager_config| 27 | manager_config.vm.hostname = "hadoop-manager.buildoop.org" 28 | manager_config.vm.network :forwarded_port, guest: 7180, host: 7180 29 | manager_config.vm.network :private_network, ip: "192.168.33.11" 30 | manager_config.vm.provider :virtualbox do |vb| 31 | vb.gui = false 32 | vb.customize ["modifyvm", :id, "--ioapic", "on"] 33 | vb.customize ["modifyvm", :id, "--memory", "#{manager_ram}"] 34 | vb.customize ["modifyvm", :id, "--cpus", 2] 35 | manager_config.vm.provision :puppet do |puppet| 36 | puppet.manifests_path = "../../cm/manifests" 37 | puppet.manifest_file = "default.pp" 38 | end 39 | end 40 | end 41 | 42 | # Create Hadoop Datanodes 43 | node_count.times do |i| 44 | id = i+1 45 | config.vm.define "node#{id}" do |node_config| 46 | node_config.vm.hostname = "hadoop-node#{id}.buildoop.org" 47 | node_config.vm.network :private_network, ip: "192.168.33.1#{id+1}" 48 | node_config.vm.provider :virtualbox do |vb| 49 | vb.customize ["modifyvm", :id, "--memory", "#{node_ram}"] 50 | node_config.vm.provision :puppet do |puppet| 51 | puppet.manifests_path = "../../cm/manifests" 52 | puppet.manifest_file = "default.pp" 53 | end 54 | end 55 | end 56 | end 57 | 58 | end 59 | -------------------------------------------------------------------------------- /scripts/README.jenkins: -------------------------------------------------------------------------------- 1 | A simple way of Jenkins configuration for 2 | Buildoop continuous integration. 3 | 4 | 1. sudo wget -O /etc/yum.repos.d/jenkins.repo http://pkg.jenkins-ci.org/redhat/jenkins.repo 5 | 2. sudo rpm --import http://pkg.jenkins-ci.org/redhat/jenkins-ci.org.key 6 | 3. yum install jenkins -y 7 | 4. service jenkins start && chkconfig jenkins on 8 | 5. http://openbus-buildoop:8080/ 9 | 10 | Configure a new project with the workspace for example in /opt. In this folder 11 | we clonned the buildoop project and make a project Jenkins build script: 12 | 13 | /opt/ 14 | ├── buildoop.git 15 | ├── groovy-2.2.1 16 | ├── run-buildoop.sh 17 | └── scala-2.10.3 18 | 19 | You can find the build script in this folder. 20 | -------------------------------------------------------------------------------- /scripts/README.vim: -------------------------------------------------------------------------------- 1 | This commands are for cli execution and 2 | for testing purposes. 3 | 4 | Notes for use Vim with Groovy 5 | ----------------------------- 6 | 7 | 1. Enable TagList: 8 | 9 | vim ~/.vim/plugin/taglist.vim 10 | 11 | let s:tlist_def_groovy_settings = 'groovy;p:package;c:class;i:interface;' . 12 | / 'f:function;v:variables' 13 | 14 | 2. Enable ctags: 15 | 16 | cp home.ctags ~/.ctags 17 | ctags -R buildoop/ 18 | 19 | 3. Enable cscope: 20 | 21 | find buildoop/ -name *.groovy > cscope.files 22 | cscope -b 23 | 24 | -------------------------------------------------------------------------------- /scripts/build-package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | BUILDOOP_BRANCH=$1 3 | RECIPES_ORG=$2 4 | RECIPES_REPO=$3 5 | BRANCH=$4 6 | FORCE_CLEAN=$5 7 | PKG=$6 8 | 9 | REMOTE_REPO=$RECIPES_ORG/$RECIPES_REPO 10 | echo $HOME 11 | echo $BUILDOOP_BRANCH 12 | 13 | echo $REMOTE_REPO 14 | echo $BRANCH 15 | echo $FORCE_CLEAN 16 | echo $PKG 17 | #echo $BDROOT 18 | 19 | 20 | #export HOME=/home/$USER 21 | source $HOME/.bash_profile &>/dev/null 22 | 23 | echo $BDROOT 24 | 25 | github_prefix="git@github.com:" 26 | #github_prefix="https://github.com/" 27 | 28 | echo -e "\n >>> Building $PKG on $(whoami)@$(hostname) \n" 29 | 30 | cd $BDROOT 31 | 32 | echo -e "\n >>> Checking out branch $BUILDOOP_BRANCH \n" 33 | git checkout $BUILDOOP_BRANCH 34 | 35 | echo -e "\n >>> Pulling changes from $BUILDOOP_BRANCH \n" 36 | git pull origin 37 | 38 | #if [ ! -d "./$REMOTE_REPO" ]; then 39 | # echo "remoterepo: $github_prefix/$REMOTE_REPO" 40 | # buildoop -remoterepo $github_prefix/$REMOTE_REPO 41 | #fi 42 | 43 | if [ ! -d "./recipes/$REMOTE_REPO" ]; then 44 | echo -e "\n >>> Downloading $BRANCH from REPO: $github_prefix$REMOTE_REPO \n" 45 | buildoop -downloadrepo $github_prefix$REMOTE_REPO $BRANCH 46 | fi 47 | 48 | cd ./recipes/$BRANCH 49 | echo -e "\n >>> Pulling changes from $github_prefix/$REMOTE_REPO $BRANCH \n" 50 | git pull origin 51 | 52 | cd $BDROOT 53 | 54 | if $FORCE_CLEAN ; then 55 | echo -e "\n >>> Cleaning previously built artifact for package $PKG \n" 56 | buildoop $BRANCH $PKG -clean 57 | fi 58 | 59 | echo -e "\n >>> Building package $PKG \n" 60 | buildoop $BRANCH $PKG -build 61 | 62 | -------------------------------------------------------------------------------- /scripts/create-recipe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:set ts=4:sw=4:et:sts=4:ai:tw=80 3 | # 4 | # Licensed to the Apache Software Foundation (ASF) under one 5 | # or more contributor license agreements. See the NOTICE file 6 | # distributed with this work for additional information 7 | # regarding copyright ownership. The ASF licenses this file 8 | # to you under the Apache License, Version 2.0 (the 9 | # "License"); you may not use this file except in compliance 10 | # with the License. You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | 20 | if [ -z "$BDROOT" ]; then 21 | echo "BDROOT is not set" 22 | exit 1 23 | fi 24 | 25 | BDROOT=/tmp 26 | 27 | usage() { 28 | echo "Usage: $0 -n RECIPE_NAME -v RECIPE_VERSION -r RECIPE_REVISION " 29 | echo -e "\nExample:" 30 | echo -e "$0 -n kafka -v 0.9.0 -r openbus-0.0.1-r1\n" 31 | exit 1 32 | } 33 | 34 | confirm () { 35 | # call with a prompt string or use a default 36 | read -r -p "${1:-Are you sure? [y/N]} " response 37 | case $response in 38 | [yY][eE][sS]|[yY]) 39 | true 40 | ;; 41 | *) 42 | false 43 | ;; 44 | esac 45 | } 46 | 47 | [[ $# != 6 ]] && usage 48 | 49 | #string with command options 50 | options=$@ 51 | 52 | # An array with all the arguments 53 | arguments=($options) 54 | 55 | # Loop index 56 | index=0 57 | 58 | for argument in $options 59 | do 60 | # Incrementing index 61 | index=`expr $index + 1` 62 | 63 | # The conditions 64 | case $argument in 65 | -n) RECIPE_NAME=${arguments[index]} ;; 66 | -v) RECIPE_VERSION=${arguments[index]} ;; 67 | -r) RECIPE_REVISION=${arguments[index]} ;; 68 | esac 69 | done 70 | 71 | [[ -z $RECIPE_NAME ]] || 72 | [[ -z $RECIPE_VERSION ]] || 73 | [[ -z $RECIPE_REVISION ]] && 74 | usage 75 | 76 | confirm || exit 1 77 | 78 | basedir=${BDROOT}/recipes/${RECIPE_NAME}/${RECIPE_NAME}-${RECIPE_VERSION}_${RECIPE_REVISION}/ 79 | mkdir -p ${basedir}/rpm/sources 80 | mkdir -p ${basedir}/rpm/specs 81 | 82 | cat < ${BDROOT}/recipes/${RECIPE_NAME}/ChangeLog 83 | DD-MM-YYYY User Committer 84 | 85 | * Initial commit. 86 | ! 87 | 88 | cat < ${BDROOT}/recipes/${RECIPE_NAME}/${RECIPE_NAME}-${RECIPE_VERSION}_${RECIPE_REVISION}.bd 89 | { 90 | "do_info": { 91 | "description": "${RECIPE_NAME} description", 92 | "homepage": "http://www.${RECIPE_NAME}.org/", 93 | "license": "Apache-2.0", 94 | "filename": "${RECIPE_NAME}-${RECIPE_VERSION}_${RECIPE_REVISION}.bd" 95 | }, 96 | 97 | "do_download": { 98 | "src_uri": "http://ftp.${RECIPE_NAME}.org/${RECIPE_NAME}-${RECIPE_VERSION}-src.tgz", 99 | "src_md5sum": "46b3e65e38f1bde4b6251ea131d905f4" 100 | }, 101 | 102 | "do_fetch": { 103 | "download_cmd": "wget" 104 | }, 105 | } 106 | ! 107 | 108 | tree ${BDROOT}/recipes/${RECIPE_NAME} 109 | exit 0 110 | 111 | -------------------------------------------------------------------------------- /scripts/home.ctags: -------------------------------------------------------------------------------- 1 | --langdef=groovy 2 | --langmap=groovy:.groovy 3 | --regex-groovy=/^[ \t]*[(private|public|protected) ( \t)]*def[ \t]+([A-Za-z0-9_]+)[ \t()=]*\{/\1/f,function,functions/ 4 | --regex-groovy=/^[ \t]*private def[ \t]+([A-Za-z0-9_]+)[ \t]*/\1/v,private,private variables/ 5 | --regex-groovy=/^[ \t]*public def[ \t]+([A-Za-z0-9_]+)[ \t]*/\1/u,public,public variables/ 6 | --regex-groovy=/^[ \t]*[abstract ( \t)]*[(private|public) ( \t)]*class[ \t]+([A-Za-z0-9_]+)[ \t]*/\1/c,class,classes/ 7 | --regex-groovy=/^[ \t]*[abstract ( \t)]*[(private|public) ( \t)]*enum[ \t]+([A-Za-z0-9_]+)[ \t]*/\1/c,class,classes/ 8 | -------------------------------------------------------------------------------- /scripts/java-head.env: -------------------------------------------------------------------------------- 1 | export JAVA_OPTS="-Xms512m -Xmx2014m" 2 | -------------------------------------------------------------------------------- /scripts/run-buildoop-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:set ts=4:sw=4:et:sts=4:ai:tw=80 3 | # 4 | # Licensed to the Apache Software Foundation (ASF) under one 5 | # or more contributor license agreements. See the NOTICE file 6 | # distributed with this work for additional information 7 | # regarding copyright ownership. The ASF licenses this file 8 | # to you under the Apache License, Version 2.0 (the 9 | # "License"); you may not use this file except in compliance 10 | # with the License. You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | 20 | if [ -z "$BDROOT" ]; then 21 | echo "BDROOT is not set" 22 | exit 1 23 | fi 24 | 25 | vagrantfileFolder=${BDROOT}/build/vagrant/vm/buildoop-cluster/ 26 | lockfile=/tmp/buildoop-cluster.lock 27 | 28 | start() { 29 | echo -n $"Starting $prog: " 30 | if [ -f $lockfile ] ; then 31 | echo "buildoop-cluster running" 32 | exit 0 33 | fi 34 | 35 | cd $vagrantfileFolder 36 | vagrant up manager 37 | vagrant provision manager 38 | vagrant up node1 39 | vagrant provision node1 40 | vagrant up node2 41 | vagrant provision node2 42 | retval=$? 43 | echo 44 | [ $retval -eq 0 ] && touch $lockfile 45 | } 46 | 47 | stop() { 48 | echo -n $"Stopping $prog: " 49 | cd $vagrantfileFolder 50 | vagrant halt manager 51 | vagrant halt node1 52 | vagrant halt node2 53 | retval=$? 54 | echo 55 | [ $retval -eq 0 ] && rm -f $lockfile 56 | } 57 | 58 | restart() { 59 | stop 60 | start 61 | } 62 | 63 | reload() { 64 | restart 65 | } 66 | 67 | status() { 68 | cd $vagrantfileFolder 69 | vagrant status 70 | } 71 | 72 | case "$1" in 73 | start) 74 | $1 75 | ;; 76 | stop) 77 | $1 78 | ;; 79 | restart) 80 | $1 81 | ;; 82 | reload) 83 | $1 84 | ;; 85 | status) 86 | $1 87 | ;; 88 | *) 89 | echo $"Usage: $0 {start|stop|status|restart|reload}" 90 | exit 2 91 | esac 92 | exit $? 93 | -------------------------------------------------------------------------------- /scripts/run-buildoop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Build script for run buildoop from Jenkins. 4 | # 5 | 6 | JAVA_HOME=/usr/java/jdk1.7.0_51/ 7 | GROOVY_HOME=/opt/groovy-2.2.1 8 | MAVEN_HOME=/usr/share/java/maven 9 | SCALA_HOME=/opt/scala-2.10.3 10 | PATH=$PATH:$GRADLE_HOME/bin:$GROOVY_HOME/bin:$SCALA_HOME/bin:$JAVA_HOME/bin:/usr/local/bin/ 11 | 12 | export JAVA_HOME GROOVY_HOME MAVEN_HOME SCALA_HOME PATH 13 | 14 | pushd /opt/buildoop.git 15 | source set-buildoop-env 16 | buildoop openbus-0.0.1 -build | tee /tmp/buildoop.log 17 | popd 18 | -------------------------------------------------------------------------------- /scripts/run-repo-server: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env groovy 2 | /* vim:set ts=4:sw=4:et:sts=4:ai:tw=80 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | * 20 | * Note about Java Heap: 21 | * -Xms -Xmx 22 | * by default 32m and 128m. 23 | */ 24 | import com.sun.net.httpserver.* 25 | import java.util.concurrent.Executors 26 | 27 | HTTP_SERVER_PORT=8081 28 | println "Create server port " + HTTP_SERVER_PORT 29 | server = HttpServer.create(new InetSocketAddress(HTTP_SERVER_PORT),0); 30 | 31 | server.createContext("/", new RepoHandler(server:server)); 32 | server.setExecutor(Executors.newCachedThreadPool()) 33 | println "Starting server" 34 | server.start(); 35 | println "Server Started" 36 | 37 | //exchange.close(); 38 | //server.stop(3) //max wait 3 second 39 | 40 | class RepoHandler implements HttpHandler { 41 | 42 | def server 43 | 44 | public void handle(HttpExchange exchange) throws IOException { 45 | println "getRequestMethod:" 46 | println exchange.getRequestMethod() 47 | println "getRequestHeaders:" 48 | println exchange.getRequestHeaders() 49 | println "getRequestURI:" 50 | def fileName = exchange.getRequestURI() 51 | println fileName 52 | 53 | def file = new File("." + fileName) 54 | def bytearray = new byte [(int)file.length()] 55 | def fis = new FileInputStream(file) 56 | def bis = new BufferedInputStream(fis); 57 | bis.read(bytearray, 0, bytearray.length); 58 | 59 | // ok, we are ready to send the response. 60 | exchange.sendResponseHeaders(200, file.length()); 61 | def os = exchange.getResponseBody(); 62 | os.write(bytearray,0,bytearray.length); 63 | os.close() 64 | } 65 | } 66 | 67 | -------------------------------------------------------------------------------- /scripts/set-enviromet.env: -------------------------------------------------------------------------------- 1 | # Java 2 | JAVA_HOME=/usr/java/jdk1.7.0_51/ 3 | 4 | # Gradle 5 | #JAVA_OPTS= 6 | #GRADLE_OPTS= 7 | GRADLE_HOME=/home/jroman/gradle-1.10 8 | 9 | # Groovy 10 | GROOVY_HOME=/home/jroman/groovy-2.2.1 11 | 12 | # Maven 13 | MAVEN_HOME=/usr/share/java/maven 14 | 15 | # Scala 16 | SCALA_HOME=/home/jroman/scala-2.10.3 17 | 18 | export GRADLE_HOME GROOVY_HOME MAVEN_HOME SCALA_HOME 19 | export PATH=$PATH:$GRADLE_HOME/bin:$GROOVY_HOME/bin:$SCALA_HOME/bin:$JAVA_HOME/bin 20 | 21 | echo "---------------------------" 22 | echo "JAVA_HOME: $JAVA_HOME" 23 | echo "Java version: $(java -version)" 24 | echo $(alternatives --display java | grep best) 25 | echo "---------------------------" 26 | -------------------------------------------------------------------------------- /scripts/set-version-file.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # vim:set ts=4:sw=4:et:sts=4:ai:tw=80 3 | # 4 | # Licensed to the Apache Software Foundation (ASF) under one 5 | # or more contributor license agreements. See the NOTICE file 6 | # distributed with this work for additional information 7 | # regarding copyright ownership. The ASF licenses this file 8 | # to you under the Apache License, Version 2.0 (the 9 | # "License"); you may not use this file except in compliance 10 | # with the License. You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | 20 | if [ -z "$BDROOT" ]; then 21 | echo "BDROOT is not set" 22 | exit 1 23 | fi 24 | 25 | conffile=${BDROOT}/buildoop/conf/buildoop.conf 26 | string="Buildoop v0.0.1-alpha" 27 | hash=build-$(date +"%m%d20%y") 28 | 29 | version=$string-$hash 30 | a=$(cat ${BDROOT}/VERSION | cut -d'-' -f5) 31 | sum=$(($a + 1)) 32 | echo ${version}-$sum > ${BDROOT}/VERSION 33 | 34 | sed -i -r "s/buildoop.version.*/buildoop.version=\"$version-$sum\"/g" ${conffile} 35 | 36 | -------------------------------------------------------------------------------- /set-buildoop-env: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Buildoop Build Environment Setup Script 3 | # 4 | # Copyright (C) 2014 Javi Roman 5 | # 6 | # Licensed to the Apache Software Foundation (ASF) under one or more 7 | # contributor license agreements. See the NOTICE file distributed with 8 | # this work for additional information regarding copyright ownership. 9 | # The ASF licenses this file to You under the Apache License, Version 2.0 10 | # (the "License"); you may not use this file except in compliance with 11 | # the License. You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | # 21 | # Call as '. ./set-buildoop-env' or 'source set-buildoop-env' 22 | # 23 | if [ -z "$ZSH_NAME" ] && [ "x$0" = "x./set-buildoop-env" ]; then 24 | echo "Error: This script needs to be sourced. Please run as '. ./set-buildoop-env'" 25 | return 1 26 | else 27 | if [ -n "$BASH_SOURCE" ]; then 28 | BDROOT="`dirname $BASH_SOURCE`" 29 | elif [ -n "$ZSH_NAME" ]; then 30 | BDROOT="`dirname $0`" 31 | else 32 | BDROOT="`pwd`" 33 | fi 34 | BDROOT=`readlink -f "$BDROOT"` 35 | BUILDDIR=build 36 | if [ ! -d $BUILDDIR ]; then 37 | mkdir $BUILDDIR 38 | fi 39 | fi 40 | 41 | if [ -z "$JAVA_HOME" ]; then 42 | echo "JAVA_HOME is not set" 43 | return 1 44 | fi 45 | 46 | if [ -z "$GROOVY_HOME" ]; then 47 | echo "GROOVY_HOME is not set" 48 | return 1 49 | fi 50 | 51 | if [ -z "$MAVEN_HOME" ]; then 52 | echo "MAVEN_HOME is not set" 53 | return 1 54 | fi 55 | 56 | PATH=$PATH:$BDROOT/buildoop/bin 57 | export BDROOT BUILDDIR PATH 58 | 59 | echo "Downloading dependencies ...." 60 | groovy -Dgroovy.grape.report.downloads=true buildoop/bin/buildoop -version 61 | echo "ready!" 62 | -------------------------------------------------------------------------------- /sit/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/sit/.gitignore -------------------------------------------------------------------------------- /sit/README: -------------------------------------------------------------------------------- 1 | System Integration Testing artifacts 2 | -------------------------------------------------------------------------------- /sit/poc/test-client.groovy: -------------------------------------------------------------------------------- 1 | @Grab('org.codehaus.groovy:groovy-xmlrpc:0.8') 2 | 3 | import groovy.net.xmlrpc.* 4 | 5 | def serverProxy = new XMLRPCServerProxy("http://localhost:15000") 6 | println serverProxy.echo("Hello World!") 7 | -------------------------------------------------------------------------------- /sit/poc/test-server.groovy: -------------------------------------------------------------------------------- 1 | @Grab('org.codehaus.groovy:groovy-xmlrpc:0.8') 2 | 3 | import groovy.net.xmlrpc.* 4 | import java.net.ServerSocket 5 | 6 | def XMLRPCServer server = new XMLRPCServer() 7 | 8 | server.echo = { 9 | file = new File("/tmp") 10 | return ["mierda":"para ti"] 11 | println "server.echo invoked" 12 | } 13 | 14 | server.dos = { 15 | def file = newFile("hola") 16 | println "server.dos invoked" 17 | println file 18 | } 19 | 20 | def serverSocket = new ServerSocket(15000) // Open a server socket on a free port 21 | println "server listening - localhost:15000" 22 | server.startServer(serverSocket) 23 | -------------------------------------------------------------------------------- /toolchain/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/toolchain/.gitignore -------------------------------------------------------------------------------- /toolchain/target-Centos-6.x/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/toolchain/target-Centos-6.x/.gitignore -------------------------------------------------------------------------------- /toolchain/target-Centos-6.x/binutils/binutils-2.20.51.bd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/toolchain/target-Centos-6.x/binutils/binutils-2.20.51.bd -------------------------------------------------------------------------------- /toolchain/target-Centos-6.x/gcc/gcc-4.4.7.bd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/toolchain/target-Centos-6.x/gcc/gcc-4.4.7.bd -------------------------------------------------------------------------------- /toolchain/target-Centos-6.x/glibc/glibc-2.12.bd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/toolchain/target-Centos-6.x/glibc/glibc-2.12.bd -------------------------------------------------------------------------------- /toolchain/target-Centos-6.x/rpm/rpm-4.8.0.bd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/toolchain/target-Centos-6.x/rpm/rpm-4.8.0.bd -------------------------------------------------------------------------------- /toolchain/target-RedHat-6.x/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keedio/buildoop/23618392897aa6f82bbbdc3e196b36ad226e6580/toolchain/target-RedHat-6.x/.gitignore --------------------------------------------------------------------------------