├── .gitignore ├── LICENSE.txt ├── README.md ├── Vagrantfile ├── accumulo-mesos-common ├── LICENSE.txt ├── pom.xml └── src │ ├── main │ ├── java │ │ └── aredee │ │ │ └── mesos │ │ │ └── frameworks │ │ │ └── accumulo │ │ │ ├── configuration │ │ │ ├── CommandLineHandler.java │ │ │ ├── Constants.java │ │ │ ├── Defaults.java │ │ │ └── Environment.java │ │ │ ├── initialize │ │ │ ├── AccumuloInitializer.java │ │ │ └── AccumuloSiteXml.java │ │ │ ├── model │ │ │ ├── Accumulo.java │ │ │ ├── Error.java │ │ │ ├── Framework.java │ │ │ ├── IdRegistry.java │ │ │ ├── Monitor.java │ │ │ ├── ServerGroup.java │ │ │ ├── ServerProfile.java │ │ │ └── Task.java │ │ │ ├── process │ │ │ └── AccumuloProcessFactory.java │ │ │ └── state │ │ │ └── FrameworkStateHelper.java │ └── resources │ │ └── accumulo-mesos.properties │ └── test │ ├── java │ └── aredee │ │ └── mesos │ │ └── frameworks │ │ └── accumulo │ │ ├── configuration │ │ └── file │ │ │ └── TestSiteXml.java │ │ ├── model │ │ └── FrameworkTest.java │ │ └── state │ │ └── FrameworkStateHelperTest.java │ └── resources │ ├── model │ ├── AccumuloOnly.json │ ├── FrameworkAndAccumulo.json │ └── FrameworkOnly.json │ └── state │ └── ZkTestFramework.json ├── accumulo-mesos-dist ├── LICENSE.txt ├── pom.xml └── src │ └── main │ └── assembly │ └── tarball.xml ├── accumulo-mesos-executor ├── LICENSE.txt ├── pom.xml └── src │ └── main │ ├── java │ └── aredee │ │ └── mesos │ │ └── frameworks │ │ └── accumulo │ │ └── executor │ │ ├── AccumuloStartExecutor.java │ │ ├── Main.java │ │ └── package-info.java │ └── resources │ ├── accumulo.json │ ├── accumulo.yaml │ └── log4j.xml ├── accumulo-mesos-framework ├── LICENSE.txt ├── pom.xml └── src │ └── main │ ├── java │ └── aredee │ │ └── mesos │ │ └── frameworks │ │ └── accumulo │ │ └── framework │ │ ├── Main.java │ │ ├── api │ │ ├── ApiException.java │ │ ├── ApiOriginFilter.java │ │ ├── ApiResponseMessage.java │ │ ├── ApiServiceFactory.java │ │ ├── ClusterApi.java │ │ ├── ClusterApiService.java │ │ ├── ConfigApi.java │ │ ├── ConfigApiService.java │ │ ├── DefaultApi.java │ │ ├── DefaultApiService.java │ │ ├── EchoResource.java │ │ ├── NotFoundException.java │ │ ├── StatusApi.java │ │ ├── StatusApiService.java │ │ ├── WebServer.java │ │ └── impl │ │ │ ├── ClusterApiServiceImpl.java │ │ │ ├── ConfigApiServiceImpl.java │ │ │ ├── DefaultApiServiceImpl.java │ │ │ └── StatusApiServiceImpl.java │ │ ├── guice │ │ ├── ApiServletModule.java │ │ └── ConfigurationModule.java │ │ └── package-info.java │ └── resources │ ├── log4j.properties │ ├── swagger.json │ ├── swagger.yaml │ └── webapp │ ├── public │ └── index.html │ └── swagger │ ├── dist │ ├── css │ │ ├── print.css │ │ ├── reset.css │ │ ├── screen.css │ │ ├── style.css │ │ └── typography.css │ ├── fonts │ │ ├── droid-sans-v6-latin-700.eot │ │ ├── droid-sans-v6-latin-700.svg │ │ ├── droid-sans-v6-latin-700.ttf │ │ ├── droid-sans-v6-latin-700.woff │ │ ├── droid-sans-v6-latin-700.woff2 │ │ ├── droid-sans-v6-latin-regular.eot │ │ ├── droid-sans-v6-latin-regular.svg │ │ ├── droid-sans-v6-latin-regular.ttf │ │ ├── droid-sans-v6-latin-regular.woff │ │ └── droid-sans-v6-latin-regular.woff2 │ ├── images │ │ ├── explorer_icons.png │ │ ├── favicon-16x16.png │ │ ├── favicon-32x32.png │ │ ├── favicon.ico │ │ ├── logo_small.png │ │ ├── pet_store_api.png │ │ ├── throbber.gif │ │ └── wordnik_api.png │ ├── index.html │ ├── lang │ │ ├── en.js │ │ ├── es.js │ │ ├── pt.js │ │ ├── ru.js │ │ └── translator.js │ ├── lib │ │ ├── backbone-min.js │ │ ├── handlebars-2.0.0.js │ │ ├── highlight.7.3.pack.js │ │ ├── jquery-1.8.0.min.js │ │ ├── jquery.ba-bbq.min.js │ │ ├── jquery.slideto.min.js │ │ ├── jquery.wiggle.min.js │ │ ├── marked.js │ │ ├── swagger-oauth.js │ │ ├── underscore-min.js │ │ └── underscore-min.map │ ├── o2c.html │ ├── swagger-ui.js │ └── swagger-ui.min.js │ ├── swagger-ui.version │ ├── swagger.json │ └── swagger.yaml ├── accumulo-mesos-scheduler ├── LICENSE.txt ├── pom.xml └── src │ ├── main │ └── java │ │ └── aredee │ │ └── mesos │ │ └── frameworks │ │ └── accumulo │ │ └── scheduler │ │ ├── Cluster.java │ │ ├── Scheduler.java │ │ ├── launcher │ │ ├── AccumuloStartExecutorLauncher.java │ │ └── Launcher.java │ │ ├── matcher │ │ ├── Match.java │ │ ├── MatchUtils.java │ │ ├── Matcher.java │ │ └── MinCpuMinRamFIFOMatcher.java │ │ └── package-info.java │ └── test │ └── java │ └── aredee │ └── mesos │ └── frameworks │ └── accumulo │ └── scheduler │ └── matcher │ └── MinCpuMinRamFIFOMatcherTest.java ├── dev ├── .gitignore ├── README.md ├── aws │ ├── AWS_cluster.json │ ├── AWS_framework.json │ ├── init_framework.sh │ ├── start_framework.sh │ └── upload_hdfs.sh ├── config │ ├── AWS_framework.json │ ├── FrameworkAndAccumulo.json │ ├── cluster.json │ └── framework.json ├── init_framework.sh ├── provision │ ├── .gitignore │ ├── format_namenode.sh │ ├── install_compiler.sh │ ├── install_default_jdk.sh │ ├── install_default_jre_headless.sh │ ├── install_docker.sh │ ├── install_hadoop.sh │ ├── install_java_8.sh │ ├── install_mesos.sh │ ├── start_datanode.sh │ ├── start_mesos_master.sh │ ├── start_mesos_master_debian.sh │ ├── start_mesos_slave.sh │ ├── start_mesos_slave_debian.sh │ └── start_namenode.sh ├── start_aws_framework.sh ├── start_framework.sh └── upload_hdfs.sh ├── docker ├── accumulo.dockerfile ├── bin │ ├── install-accumulo.sh │ └── start-framework.sh ├── build-framwork.sh ├── config │ ├── cluster.json-sample │ └── framework.json-sample └── docker-compose.yml └── pom.xml /.gitignore: -------------------------------------------------------------------------------- 1 | **/.vagrant/ 2 | **/logs/ 3 | 4 | # Compiled Licenses 5 | **/licenses/ 6 | **/THIRD-PARTY.txt 7 | 8 | # JAVA 9 | *.class 10 | 11 | # Mobile Tools for Java (J2ME) 12 | .mtj.tmp/ 13 | 14 | # Package Files # 15 | *.jar 16 | *.war 17 | *.ear 18 | 19 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 20 | hs_err_pid* 21 | 22 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm 23 | 24 | *.iml 25 | 26 | ## Directory-based project format: 27 | .idea/ 28 | # if you remove the above rule, at least ignore the following: 29 | 30 | # User-specific stuff: 31 | # .idea/workspace.xml 32 | # .idea/tasks.xml 33 | # .idea/dictionaries 34 | 35 | # Sensitive or high-churn files: 36 | # .idea/dataSources.ids 37 | # .idea/dataSources.xml 38 | # .idea/sqlDataSources.xml 39 | # .idea/dynamic.xml 40 | # .idea/uiDesigner.xml 41 | 42 | # Gradle: 43 | # .idea/gradle.xml 44 | # .idea/libraries 45 | 46 | # Mongo Explorer plugin: 47 | # .idea/mongoSettings.xml 48 | 49 | ## File-based project format: 50 | *.ipr 51 | *.iws 52 | 53 | ## Plugin-specific files: 54 | 55 | # IntelliJ 56 | /out/ 57 | 58 | # mpeltonen/sbt-idea plugin 59 | .idea_modules/ 60 | 61 | # JIRA plugin 62 | atlassian-ide-plugin.xml 63 | 64 | # Crashlytics plugin (for Android Studio and IntelliJ) 65 | com_crashlytics_export_strings.xml 66 | crashlytics.properties 67 | crashlytics-build.properties 68 | 69 | *.pydevproject 70 | .metadata 71 | .gradle 72 | tmp/ 73 | *.tmp 74 | *.bak 75 | *.swp 76 | *~.nib 77 | local.properties 78 | .settings/ 79 | .loadpath 80 | 81 | # Eclipse Core 82 | .project 83 | 84 | # External tool builders 85 | .externalToolBuilders/ 86 | 87 | # Locally stored "Eclipse launch configurations" 88 | *.launch 89 | 90 | # CDT-specific 91 | .cproject 92 | 93 | # JDT-specific (Eclipse Java Development Tools) 94 | .classpath 95 | 96 | # PDT-specific 97 | .buildpath 98 | 99 | # sbteclipse plugin 100 | .target 101 | 102 | # TeXlipse plugin 103 | .texlipse 104 | 105 | #VIM 106 | [._]*.s[a-w][a-z] 107 | [._]s[a-w][a-z] 108 | *.un~ 109 | Session.vim 110 | .netrwhist 111 | *~ 112 | 113 | .DS_Store 114 | .AppleDouble 115 | .LSOverride 116 | 117 | # Icon must end with two \r 118 | Icon 119 | 120 | 121 | # Thumbnails 122 | ._* 123 | 124 | # Files that might appear in the root of a volume 125 | .DocumentRevisions-V100 126 | .fseventsd 127 | .Spotlight-V100 128 | .TemporaryItems 129 | .Trashes 130 | .VolumeIcon.icns 131 | 132 | # Directories potentially created on remote AFP share 133 | .AppleDB 134 | .AppleDesktop 135 | Network Trash Folder 136 | Temporary Items 137 | .apdisk 138 | 139 | # Maven 140 | target/ 141 | pom.xml.tag 142 | pom.xml.releaseBackup 143 | pom.xml.versionsBackup 144 | pom.xml.next 145 | release.properties 146 | dependency-reduced-pom.xml 147 | buildNumber.properties 148 | .mvn/timing.properties 149 | 150 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Accumulo Mesos Framework 2 | ========================= 3 | Initialize and run Accumulo clusters as a Mesos framework. 4 | 5 | ------------ 6 | 7 | **DISCLAIMER** 8 | _This is a very early version of accumulo-mesos framework. This 9 | document, code behavior, and anything else may change without notice and/or break older installations._ 10 | 11 | ------------ 12 | 13 | # Design 14 | The accumulo-mesos framework launches accumulo server processes on mesos client machines using 15 | the `$ACCUMULO_HOME/bin/accumulo ` script. It automatically configures Java and Accumulo 16 | memory settings based on the mesos offer. The framework doesn't depend on Accumulo directly so 17 | it should be able to support the more recent versions of Accumulo. It is tested with 1.7.0 18 | 19 | # Current Status 20 | 21 | ### Implemented 22 | * Framework no longer depends on accumulo! Accumulo is uploaded to HDFS. There are assumptions about 23 | where somethings will be within the accumulo tarball when extracted, but this has been stable. 24 | * Accumulo init is separate step from running the framework. Currently this requires a local copy of 25 | Accumulo somewhere. 26 | 27 | ### Near Term Tasks 28 | * Docker? 29 | * Run accumulo init from a mesos client just like the accumulo servers. 30 | * Flesh out the framework webservice. 31 | * Reconnect framework to a running cluster. 32 | 33 | # Running the Framework 34 | First you have to upload artifacts to HDFS (accumulo tarball, framework tarball, native libs .so) 35 | and copy that into your config JSON structures. (see `dev/config`) 36 | 37 | Running the framework is then a two step process. First you must initialize the framework. 38 | See `dev/init_framework.sh` Then you can run the framework see `dev\start_framework.sh` 39 | 40 | 41 | ``` 42 | java -jar /vagrant/dev/dist/accumulo-mesos-dist-0.2.0-SNAPSHOT/accumulo-mesos-framework-0.2.0-SNAPSHOT-jar-with-dependencies.jar -h 43 | usage: accumulo-mesos [-b ] [-cc ] [-fc ] [-h] [-i] [-m 44 | ] [-n ] [-P ] [-t ] [-v] [-z ] 45 | -b,--bind-address IP address of interface to bind HTTP interface 46 | to 47 | -cc,--cluster JSON file containing cluster configuration 48 | -fc,--framework JSON file of entire framework configuration 49 | -h,--help Print this message and exit 50 | -i,--init If present, initialize new Accumulo instance 51 | -m,--master Location of mesos master to connect to 52 | -n,--name Name of this mesos framework 53 | -P,--port Port number to serve HTTP interface 54 | -t,--tarball URI of framework tarball 55 | -v,--version Show version number 56 | -z,--zookeepers List of Zookeeper servers 57 | ``` 58 | 59 | ## Configuration 60 | See config examples in `dev/config` 61 | 62 | ## Testing 63 | A multi-vm Vagrantfile is provided along with many provisioning scripts to setup 64 | the VMs for testing the framework. See `/dev` directory for more info. 65 | 66 | # Thanks 67 | Thanks to the cassandra-mesos project. I stole a lot of the project setup and framework design ideas from there. 68 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # # vi: set ft=ruby : 3 | 4 | VAGRANTFILE_API_VERSION = "2" 5 | 6 | NUM_SLAVES = 6 7 | 8 | # re-write /etc/hosts because ubuntu does 127.0.1.1 stuff that borks Hadoop 9 | $host_script = < 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 102 | 103 | 104 | 105 | 115 | 116 |
 
117 |
118 | 119 | 120 | -------------------------------------------------------------------------------- /accumulo-mesos-framework/src/main/resources/webapp/swagger/dist/lang/en.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /* jshint quotmark: double */ 4 | window.SwaggerTranslator.learn({ 5 | "Warning: Deprecated":"Warning: Deprecated", 6 | "Implementation Notes":"Implementation Notes", 7 | "Response Class":"Response Class", 8 | "Status":"Status", 9 | "Parameters":"Parameters", 10 | "Parameter":"Parameter", 11 | "Value":"Value", 12 | "Description":"Description", 13 | "Parameter Type":"Parameter Type", 14 | "Data Type":"Data Type", 15 | "Response Messages":"Response Messages", 16 | "HTTP Status Code":"HTTP Status Code", 17 | "Reason":"Reason", 18 | "Response Model":"Response Model", 19 | "Request URL":"Request URL", 20 | "Response Body":"Response Body", 21 | "Response Code":"Response Code", 22 | "Response Headers":"Response Headers", 23 | "Hide Response":"Hide Response", 24 | "Headers":"Headers", 25 | "Try it out!":"Try it out!", 26 | "Show/Hide":"Show/Hide", 27 | "List Operations":"List Operations", 28 | "Expand Operations":"Expand Operations", 29 | "Raw":"Raw", 30 | "can't parse JSON. Raw result":"can't parse JSON. Raw result", 31 | "Model Schema":"Model Schema", 32 | "Model":"Model", 33 | "apply":"apply", 34 | "Username":"Username", 35 | "Password":"Password", 36 | "Terms of service":"Terms of service", 37 | "Created by":"Created by", 38 | "See more at":"See more at", 39 | "Contact the developer":"Contact the developer", 40 | "api version":"api version", 41 | "Response Content Type":"Response Content Type", 42 | "fetching resource":"fetching resource", 43 | "fetching resource list":"fetching resource list", 44 | "Explore":"Explore", 45 | "Show Swagger Petstore Example Apis":"Show Swagger Petstore Example Apis", 46 | "Can't read from server. It may not have the appropriate access-control-origin settings.":"Can't read from server. It may not have the appropriate access-control-origin settings.", 47 | "Please specify the protocol for":"Please specify the protocol for", 48 | "Can't read swagger JSON from":"Can't read swagger JSON from", 49 | "Finished Loading Resource Information. Rendering Swagger UI":"Finished Loading Resource Information. Rendering Swagger UI", 50 | "Unable to read api":"Unable to read api", 51 | "from path":"from path", 52 | "server returned":"server returned" 53 | }); 54 | -------------------------------------------------------------------------------- /accumulo-mesos-framework/src/main/resources/webapp/swagger/dist/lang/es.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /* jshint quotmark: double */ 4 | window.SwaggerTranslator.learn({ 5 | "Warning: Deprecated":"Advertencia: Obsoleto", 6 | "Implementation Notes":"Notas de implementación", 7 | "Response Class":"Clase de la Respuesta", 8 | "Status":"Status", 9 | "Parameters":"Parámetros", 10 | "Parameter":"Parámetro", 11 | "Value":"Valor", 12 | "Description":"Descripción", 13 | "Parameter Type":"Tipo del Parámetro", 14 | "Data Type":"Tipo del Dato", 15 | "Response Messages":"Mensajes de la Respuesta", 16 | "HTTP Status Code":"Código de Status HTTP", 17 | "Reason":"Razón", 18 | "Response Model":"Modelo de la Respuesta", 19 | "Request URL":"URL de la Solicitud", 20 | "Response Body":"Cuerpo de la Respuesta", 21 | "Response Code":"Código de la Respuesta", 22 | "Response Headers":"Encabezados de la Respuesta", 23 | "Hide Response":"Ocultar Respuesta", 24 | "Try it out!":"Pruébalo!", 25 | "Show/Hide":"Mostrar/Ocultar", 26 | "List Operations":"Listar Operaciones", 27 | "Expand Operations":"Expandir Operaciones", 28 | "Raw":"Crudo", 29 | "can't parse JSON. Raw result":"no puede parsear el JSON. Resultado crudo", 30 | "Model Schema":"Esquema del Modelo", 31 | "Model":"Modelo", 32 | "apply":"aplicar", 33 | "Username":"Nombre de usuario", 34 | "Password":"Contraseña", 35 | "Terms of service":"Términos de Servicio", 36 | "Created by":"Creado por", 37 | "See more at":"Ver más en", 38 | "Contact the developer":"Contactar al desarrollador", 39 | "api version":"versión de la api", 40 | "Response Content Type":"Tipo de Contenido (Content Type) de la Respuesta", 41 | "fetching resource":"buscando recurso", 42 | "fetching resource list":"buscando lista del recurso", 43 | "Explore":"Explorar", 44 | "Show Swagger Petstore Example Apis":"Mostrar Api Ejemplo de Swagger Petstore", 45 | "Can't read from server. It may not have the appropriate access-control-origin settings.":"No se puede leer del servidor. Tal vez no tiene la configuración de control de acceso de origen (access-control-origin) apropiado.", 46 | "Please specify the protocol for":"Por favor, especificar el protocola para", 47 | "Can't read swagger JSON from":"No se puede leer el JSON de swagger desde", 48 | "Finished Loading Resource Information. Rendering Swagger UI":"Finalizada la carga del recurso de Información. Mostrando Swagger UI", 49 | "Unable to read api":"No se puede leer la api", 50 | "from path":"desde ruta", 51 | "server returned":"el servidor retornó" 52 | }); 53 | -------------------------------------------------------------------------------- /accumulo-mesos-framework/src/main/resources/webapp/swagger/dist/lang/pt.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /* jshint quotmark: double */ 4 | window.SwaggerTranslator.learn({ 5 | "Warning: Deprecated":"Aviso: Depreciado", 6 | "Implementation Notes":"Notas de Implementação", 7 | "Response Class":"Classe de resposta", 8 | "Status":"Status", 9 | "Parameters":"Parâmetros", 10 | "Parameter":"Parâmetro", 11 | "Value":"Valor", 12 | "Description":"Descrição", 13 | "Parameter Type":"Tipo de parâmetro", 14 | "Data Type":"Tipo de dados", 15 | "Response Messages":"Mensagens de resposta", 16 | "HTTP Status Code":"Código de status HTTP", 17 | "Reason":"Razão", 18 | "Response Model":"Modelo resposta", 19 | "Request URL":"URL requisição", 20 | "Response Body":"Corpo da resposta", 21 | "Response Code":"Código da resposta", 22 | "Response Headers":"Cabeçalho da resposta", 23 | "Headers":"Cabeçalhos", 24 | "Hide Response":"Esconder resposta", 25 | "Try it out!":"Tente agora!", 26 | "Show/Hide":"Mostrar/Esconder", 27 | "List Operations":"Listar operações", 28 | "Expand Operations":"Expandir operações", 29 | "Raw":"Cru", 30 | "can't parse JSON. Raw result":"Falha ao analisar JSON. Resulto cru", 31 | "Model Schema":"Modelo esquema", 32 | "Model":"Modelo", 33 | "apply":"Aplicar", 34 | "Username":"Usuário", 35 | "Password":"Senha", 36 | "Terms of service":"Termos do serviço", 37 | "Created by":"Criado por", 38 | "See more at":"Veja mais em", 39 | "Contact the developer":"Contate o desenvolvedor", 40 | "api version":"Versão api", 41 | "Response Content Type":"Tipo de conteúdo da resposta", 42 | "fetching resource":"busca recurso", 43 | "fetching resource list":"buscando lista de recursos", 44 | "Explore":"Explorar", 45 | "Show Swagger Petstore Example Apis":"Show Swagger Petstore Example Apis", 46 | "Can't read from server. It may not have the appropriate access-control-origin settings.":"Não é possível ler do servidor. Pode não ter as apropriadas configurações access-control-origin", 47 | "Please specify the protocol for":"Por favor especifique o protocolo", 48 | "Can't read swagger JSON from":"Não é possível ler o JSON Swagger de", 49 | "Finished Loading Resource Information. Rendering Swagger UI":"Carregar informação de recurso finalizada. Renderizando Swagger UI", 50 | "Unable to read api":"Não foi possível ler api", 51 | "from path":"do caminho", 52 | "server returned":"servidor retornou" 53 | }); 54 | -------------------------------------------------------------------------------- /accumulo-mesos-framework/src/main/resources/webapp/swagger/dist/lang/ru.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /* jshint quotmark: double */ 4 | window.SwaggerTranslator.learn({ 5 | "Warning: Deprecated":"Ворнинг: Депрекейтед", 6 | "Implementation Notes":"Заметки", 7 | "Response Class":"Пример ответа", 8 | "Status":"Статус", 9 | "Parameters":"Параметры", 10 | "Parameter":"Параметр", 11 | "Value":"Значение", 12 | "Description":"Описание", 13 | "Parameter Type":"Тип параметра", 14 | "Data Type":"Тип данных", 15 | "HTTP Status Code":"HTTP код", 16 | "Reason":"Причина", 17 | "Response Model":"Структура ответа", 18 | "Request URL":"URL запроса", 19 | "Response Body":"Тело ответа", 20 | "Response Code":"HTTP код ответа", 21 | "Response Headers":"Заголовки ответа", 22 | "Hide Response":"Спрятать ответ", 23 | "Response Messages":"Что может прийти в ответ", 24 | "Try it out!":"Попробовать!", 25 | "Show/Hide":"Показать/Скрыть", 26 | "List Operations":"Операции кратко", 27 | "Expand Operations":"Операции подробно", 28 | "Raw":"В сыром виде", 29 | "can't parse JSON. Raw result":"Не удается распарсить ответ:", 30 | "Model Schema":"Структура", 31 | "Model":"Описание", 32 | "apply":"применить", 33 | "Username":"Имя пользователя", 34 | "Password":"Пароль", 35 | "Terms of service":"Условия использования", 36 | "Created by":"Разработано", 37 | "See more at":"Еще тут", 38 | "Contact the developer":"Связаться с разработчиком", 39 | "api version":"Версия API", 40 | "Response Content Type":"Content Type ответа", 41 | "fetching resource":"Получение ресурса", 42 | "fetching resource list":"Получение ресурсов", 43 | "Explore":"Поехали", 44 | "Show Swagger Petstore Example Apis":"Показать примеры АПИ", 45 | "Can't read from server. It may not have the appropriate access-control-origin settings.":"Не удается получить ответ от сервера. Возможно, какая-то лажа с настройками доступа", 46 | "Please specify the protocol for":"Пожалуйста, укажите протогол для", 47 | "Can't read swagger JSON from":"Не получается прочитать swagger json из", 48 | "Finished Loading Resource Information. Rendering Swagger UI":"Загрузка информации о ресурсах завершена. Рендерим", 49 | "Unable to read api":"Не удалось прочитать api", 50 | "from path":"по адресу", 51 | "server returned":"сервер сказал" 52 | }); 53 | -------------------------------------------------------------------------------- /accumulo-mesos-framework/src/main/resources/webapp/swagger/dist/lang/translator.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | /** 4 | * Translator for documentation pages. 5 | * 6 | * To enable translation you should include one of language-files in your index.html 7 | * after . 8 | * For example - 9 | * 10 | * If you wish to translate some new texsts you should do two things: 11 | * 1. Add a new phrase pair ("New Phrase": "New Translation") into your language file (for example lang/ru.js). It will be great if you add it in other language files too. 12 | * 2. Mark that text it templates this way New Phrase or . 13 | * The main thing here is attribute data-sw-translate. Only inner html, title-attribute and value-attribute are going to translate. 14 | * 15 | */ 16 | window.SwaggerTranslator = { 17 | 18 | _words:[], 19 | 20 | translate: function(sel) { 21 | var $this = this; 22 | sel = sel || '[data-sw-translate]'; 23 | 24 | $(sel).each(function() { 25 | $(this).html($this._tryTranslate($(this).html())); 26 | 27 | $(this).val($this._tryTranslate($(this).val())); 28 | $(this).attr('title', $this._tryTranslate($(this).attr('title'))); 29 | }); 30 | }, 31 | 32 | _tryTranslate: function(word) { 33 | return this._words[$.trim(word)] !== undefined ? this._words[$.trim(word)] : word; 34 | }, 35 | 36 | learn: function(wordsMap) { 37 | this._words = wordsMap; 38 | } 39 | }; 40 | -------------------------------------------------------------------------------- /accumulo-mesos-framework/src/main/resources/webapp/swagger/dist/lib/jquery.ba-bbq.min.js: -------------------------------------------------------------------------------- 1 | /* 2 | * jQuery BBQ: Back Button & Query Library - v1.2.1 - 2/17/2010 3 | * http://benalman.com/projects/jquery-bbq-plugin/ 4 | * 5 | * Copyright (c) 2010 "Cowboy" Ben Alman 6 | * Dual licensed under the MIT and GPL licenses. 7 | * http://benalman.com/about/license/ 8 | */ 9 | (function($,p){var i,m=Array.prototype.slice,r=decodeURIComponent,a=$.param,c,l,v,b=$.bbq=$.bbq||{},q,u,j,e=$.event.special,d="hashchange",A="querystring",D="fragment",y="elemUrlAttr",g="location",k="href",t="src",x=/^.*\?|#.*$/g,w=/^.*\#/,h,C={};function E(F){return typeof F==="string"}function B(G){var F=m.call(arguments,1);return function(){return G.apply(this,F.concat(m.call(arguments)))}}function n(F){return F.replace(/^[^#]*#?(.*)$/,"$1")}function o(F){return F.replace(/(?:^[^?#]*\?([^#]*).*$)?.*/,"$1")}function f(H,M,F,I,G){var O,L,K,N,J;if(I!==i){K=F.match(H?/^([^#]*)\#?(.*)$/:/^([^#?]*)\??([^#]*)(#?.*)/);J=K[3]||"";if(G===2&&E(I)){L=I.replace(H?w:x,"")}else{N=l(K[2]);I=E(I)?l[H?D:A](I):I;L=G===2?I:G===1?$.extend({},I,N):$.extend({},N,I);L=a(L);if(H){L=L.replace(h,r)}}O=K[1]+(H?"#":L||!K[1]?"?":"")+L+J}else{O=M(F!==i?F:p[g][k])}return O}a[A]=B(f,0,o);a[D]=c=B(f,1,n);c.noEscape=function(G){G=G||"";var F=$.map(G.split(""),encodeURIComponent);h=new RegExp(F.join("|"),"g")};c.noEscape(",/");$.deparam=l=function(I,F){var H={},G={"true":!0,"false":!1,"null":null};$.each(I.replace(/\+/g," ").split("&"),function(L,Q){var K=Q.split("="),P=r(K[0]),J,O=H,M=0,R=P.split("]["),N=R.length-1;if(/\[/.test(R[0])&&/\]$/.test(R[N])){R[N]=R[N].replace(/\]$/,"");R=R.shift().split("[").concat(R);N=R.length-1}else{N=0}if(K.length===2){J=r(K[1]);if(F){J=J&&!isNaN(J)?+J:J==="undefined"?i:G[J]!==i?G[J]:J}if(N){for(;M<=N;M++){P=R[M]===""?O.length:R[M];O=O[P]=M').hide().insertAfter("body")[0].contentWindow;q=function(){return a(n.document[c][l])};o=function(u,s){if(u!==s){var t=n.document;t.open().close();t[c].hash="#"+u}};o(a())}}m.start=function(){if(r){return}var t=a();o||p();(function s(){var v=a(),u=q(t);if(v!==t){o(t=v,u);$(i).trigger(d)}else{if(u!==t){i[c][l]=i[c][l].replace(/#.*/,"")+"#"+u}}r=setTimeout(s,$[d+"Delay"])})()};m.stop=function(){if(!n){r&&clearTimeout(r);r=0}};return m})()})(jQuery,this); -------------------------------------------------------------------------------- /accumulo-mesos-framework/src/main/resources/webapp/swagger/dist/lib/jquery.slideto.min.js: -------------------------------------------------------------------------------- 1 | (function(b){b.fn.slideto=function(a){a=b.extend({slide_duration:"slow",highlight_duration:3E3,highlight:true,highlight_color:"#FFFF99"},a);return this.each(function(){obj=b(this);b("body").animate({scrollTop:obj.offset().top},a.slide_duration,function(){a.highlight&&b.ui.version&&obj.effect("highlight",{color:a.highlight_color},a.highlight_duration)})})}})(jQuery); 2 | -------------------------------------------------------------------------------- /accumulo-mesos-framework/src/main/resources/webapp/swagger/dist/lib/jquery.wiggle.min.js: -------------------------------------------------------------------------------- 1 | /* 2 | jQuery Wiggle 3 | Author: WonderGroup, Jordan Thomas 4 | URL: http://labs.wondergroup.com/demos/mini-ui/index.html 5 | License: MIT (http://en.wikipedia.org/wiki/MIT_License) 6 | */ 7 | jQuery.fn.wiggle=function(o){var d={speed:50,wiggles:3,travel:5,callback:null};var o=jQuery.extend(d,o);return this.each(function(){var cache=this;var wrap=jQuery(this).wrap('
').css("position","relative");var calls=0;for(i=1;i<=o.wiggles;i++){jQuery(this).animate({left:"-="+o.travel},o.speed).animate({left:"+="+o.travel*2},o.speed*2).animate({left:"-="+o.travel},o.speed,function(){calls++;if(jQuery(cache).parent().hasClass('wiggle-wrap')){jQuery(cache).parent().replaceWith(cache);} 8 | if(calls==o.wiggles&&jQuery.isFunction(o.callback)){o.callback();}});}});}; -------------------------------------------------------------------------------- /accumulo-mesos-framework/src/main/resources/webapp/swagger/dist/o2c.html: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /accumulo-mesos-framework/src/main/resources/webapp/swagger/swagger-ui.version: -------------------------------------------------------------------------------- 1 | 2.1.2 2 | -------------------------------------------------------------------------------- /accumulo-mesos-scheduler/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | aredee.mesos.frameworks.accumulo 7 | accumulo-mesos-parent 8 | ${global.version} 9 | 10 | 4.0.0 11 | 12 | accumulo-mesos-scheduler 13 | 14 | 15 | The Accumulo-on-Mesos framework to deploy Apache Accumulo to Apache Mesos. 16 | This artifact contains the Mesos scheduler for the Accumulo-on-Mesos framework. 17 | 18 | 19 | 20 | 21 | Apache 2 22 | http://www.apache.org/licenses/LICENSE-2.0.txt 23 | repo 24 | Apache License Version 2.0 25 | 26 | 27 | 28 | Apache Mesos Accumulo framework 29 | http://mesos.apache.org/ 30 | 31 | 32 | 33 | 34 | ossrh 35 | https://oss.sonatype.org/content/repositories/snapshots 36 | 37 | 38 | ossrh 39 | https://oss.sonatype.org/service/local/staging/deploy/maven2/ 40 | 41 | 42 | 43 | scm:git:https://github.com/aredee/accumulo-mesos.git 44 | scm:git:https://github.com/aredee/accumulo-mesos.git 45 | https://github.com/aredee/accumulo-mesos 46 | 47 | 48 | Github 49 | https://github.com/aredee/accumulo-mesos/issues 50 | 51 | 52 | 53 | 54 | aredee.mesos.frameworks.accumulo 55 | accumulo-mesos-common 56 | ${project.version} 57 | 58 | 59 | org.apache.mesos 60 | mesos 61 | 62 | 63 | org.slf4j 64 | slf4j-api 65 | 66 | 67 | org.slf4j 68 | ${slf4j.binding} 69 | 70 | 71 | com.google.guava 72 | guava 73 | 74 | 75 | junit 76 | junit 77 | test 78 | 79 | 80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /accumulo-mesos-scheduler/src/main/java/aredee/mesos/frameworks/accumulo/scheduler/launcher/Launcher.java: -------------------------------------------------------------------------------- 1 | package aredee.mesos.frameworks.accumulo.scheduler.launcher; 2 | 3 | import aredee.mesos.frameworks.accumulo.scheduler.matcher.Match; 4 | import org.apache.mesos.Protos; 5 | import org.apache.mesos.SchedulerDriver; 6 | 7 | /** 8 | * Interface to launch a server based on a Mesos offer. 9 | */ 10 | public interface Launcher { 11 | 12 | /** 13 | * Interface used to launch Accumulo Server tasks. 14 | * 15 | * @param driver Mesos interface to use to launch a server 16 | * @param match {@link Match} to launch. A Match must contain a server and an offer 17 | */ 18 | public Protos.TaskInfo launch(SchedulerDriver driver, Match match); 19 | } 20 | -------------------------------------------------------------------------------- /accumulo-mesos-scheduler/src/main/java/aredee/mesos/frameworks/accumulo/scheduler/matcher/Match.java: -------------------------------------------------------------------------------- 1 | package aredee.mesos.frameworks.accumulo.scheduler.matcher; 2 | 3 | import aredee.mesos.frameworks.accumulo.model.Task; 4 | import org.apache.mesos.Protos; 5 | 6 | public class Match { 7 | private Task task = null; 8 | private Protos.Offer offer = null; 9 | 10 | /** 11 | Constructs a Match object with no offer. These matches are used to pass back servers that were requested to 12 | be launched, but had no matching offers. 13 | */ 14 | public Match(Task task){ 15 | this(task, null); 16 | } 17 | 18 | /** 19 | * Constructs a match with no task. Typically used to signal extra offers were available when appropriate. 20 | * @param offer 21 | */ 22 | public Match(Protos.Offer offer){ 23 | this(null, offer); 24 | } 25 | 26 | /** 27 | * Constructs a Match between a task and an offer. 28 | */ 29 | public Match(Task task, Protos.Offer offer){ 30 | this.task = task; 31 | this.offer = offer; 32 | } 33 | 34 | public boolean hasTask(){ 35 | return this.task != null; 36 | } 37 | public boolean hasOffer(){ 38 | return this.offer != null; 39 | } 40 | public void setOffer(Protos.Offer offer){ 41 | this.offer = offer; 42 | } 43 | public void setTask(Task task){ 44 | this.task = task; 45 | } 46 | public Protos.Offer getOffer(){ 47 | return this.offer; 48 | } 49 | public Task getTask(){ 50 | return this.task; 51 | } 52 | 53 | } 54 | -------------------------------------------------------------------------------- /accumulo-mesos-scheduler/src/main/java/aredee/mesos/frameworks/accumulo/scheduler/matcher/MatchUtils.java: -------------------------------------------------------------------------------- 1 | package aredee.mesos.frameworks.accumulo.scheduler.matcher; 2 | 3 | import java.util.ArrayList; 4 | import java.util.List; 5 | 6 | /** 7 | * Static helper methods for handling lists of Match objects 8 | */ 9 | public class MatchUtils { 10 | // Don't allow instantiation 11 | private MatchUtils(){} 12 | 13 | /** 14 | * Iterates through list to find Match objects with no corresponding Offer 15 | * 16 | * @param matches List to check for Matches without Offers 17 | * @return List of Match objects with no Offer set 18 | */ 19 | public static List getUnmatchedServers(List matches){ 20 | List noMatch = new ArrayList<>(); 21 | for( Match match: matches){ 22 | if( !match.hasOffer() ){ 23 | noMatch.add(match); 24 | } 25 | } 26 | return noMatch; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /accumulo-mesos-scheduler/src/main/java/aredee/mesos/frameworks/accumulo/scheduler/matcher/Matcher.java: -------------------------------------------------------------------------------- 1 | package aredee.mesos.frameworks.accumulo.scheduler.matcher; 2 | 3 | import aredee.mesos.frameworks.accumulo.model.Task; 4 | import org.apache.mesos.Protos; 5 | 6 | import java.util.List; 7 | 8 | /** 9 | * Responsible for matching required servers to available offers 10 | */ 11 | public interface Matcher { 12 | /** 13 | Returns a list of matched servers and offers. If offers were not found for all servers, 14 | a Match object will be present with no offer 15 | 16 | @param tasks tasks that require resources 17 | @param offers to match servers against 18 | */ 19 | public List matchOffers(List tasks, List offers); 20 | } 21 | -------------------------------------------------------------------------------- /accumulo-mesos-scheduler/src/main/java/aredee/mesos/frameworks/accumulo/scheduler/matcher/MinCpuMinRamFIFOMatcher.java: -------------------------------------------------------------------------------- 1 | package aredee.mesos.frameworks.accumulo.scheduler.matcher; 2 | 3 | import aredee.mesos.frameworks.accumulo.configuration.Defaults; 4 | import aredee.mesos.frameworks.accumulo.model.Accumulo; 5 | import aredee.mesos.frameworks.accumulo.model.ServerProfile; 6 | import aredee.mesos.frameworks.accumulo.model.Task; 7 | import com.google.common.collect.Lists; 8 | import org.apache.mesos.Protos; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | 12 | import java.util.List; 13 | 14 | /** 15 | * Matcher that finds an offer that meets minimum cpu and ram requirements for the server on a first come first 16 | * served basis. 17 | * 18 | */ 19 | public class MinCpuMinRamFIFOMatcher implements Matcher { 20 | 21 | private static final Logger LOGGER = LoggerFactory.getLogger(MinCpuMinRamFIFOMatcher.class); 22 | 23 | private Accumulo config; 24 | 25 | public MinCpuMinRamFIFOMatcher(Accumulo config){ 26 | this.config = config; 27 | } 28 | 29 | /** 30 | * Returns a list of matched servers and offers. If offers were not found for all servers, 31 | * a Match object will be present with no offer 32 | * 33 | * @param tasks 34 | * @param offers 35 | * 36 | */ 37 | @Override 38 | public List matchOffers(List tasks, List offers) { 39 | 40 | LOGGER.info("Matching {} tasks to {} offers", tasks.size(), offers.size()); 41 | 42 | List matches = Lists.newArrayListWithCapacity(tasks.size()); 43 | 44 | for(int tt = 0; tt < tasks.size(); tt++){ 45 | Task task = tasks.get(tt); 46 | for( int oo = 0; (oo < offers.size()) ; oo++){ 47 | Protos.Offer offer = offers.get(oo); 48 | LOGGER.info("Checking offer id {} for task {}", offer.getId().getValue(), task.getType().getServerKeyword()); 49 | if( offerMatchesTask(task, offer)){ 50 | // create a match 51 | Match match = new Match(task, offer); 52 | matches.add(match); 53 | offers.remove(offer); 54 | 55 | LOGGER.info("Found match! task {} offer {}", task.getType().getServerKeyword(), offer.getId().getValue()); 56 | break; 57 | } 58 | } 59 | } 60 | 61 | return matches; 62 | } 63 | 64 | private boolean offerMatchesTask(Task task, Protos.Offer offer){ 65 | double offerCpus = -1; 66 | double offerMem = -1; 67 | 68 | // Get offer resources 69 | for( Protos.Resource resource : offer.getResourcesList()){ 70 | if (resource.getName().equalsIgnoreCase("cpus")) { 71 | offerCpus = resource.hasScalar() ? resource.getScalar().getValue() : 0.0; 72 | } else if (resource.getName().equalsIgnoreCase("mem")) { 73 | offerMem = resource.hasScalar() ? resource.getScalar().getValue() : 0.0; 74 | } 75 | } 76 | // TODO Have to take into account the executor resources because MESOS will. 77 | 78 | // Get profile resources 79 | ServerProfile profile = task.getServerProfile(); 80 | double profileCpus = profile.getCpus().doubleValue() + Defaults.EXECUTOR_CPUS; 81 | double profileMem = profile.getMemory().doubleValue() + config.getExecutorMemory(); 82 | 83 | boolean offerMatches = cpusAndMemAreAdequate(offerCpus, offerMem, profileCpus, profileMem); 84 | 85 | return offerMatches; 86 | } 87 | 88 | private boolean cpusAndMemAreAdequate(double offerCpu, double offerMem, double serverCpu, double serverMem) { 89 | return ( offerCpu >= serverCpu ) && ( offerMem >= serverMem ); 90 | } 91 | 92 | } 93 | -------------------------------------------------------------------------------- /accumulo-mesos-scheduler/src/main/java/aredee/mesos/frameworks/accumulo/scheduler/package-info.java: -------------------------------------------------------------------------------- 1 | 2 | package aredee.mesos.frameworks.accumulo.scheduler; -------------------------------------------------------------------------------- /accumulo-mesos-scheduler/src/test/java/aredee/mesos/frameworks/accumulo/scheduler/matcher/MinCpuMinRamFIFOMatcherTest.java: -------------------------------------------------------------------------------- 1 | package aredee.mesos.frameworks.accumulo.scheduler.matcher; 2 | 3 | 4 | //@RunWith(Parameterized.class) 5 | public class MinCpuMinRamFIFOMatcherTest { 6 | 7 | /* 8 | private Matcher matcher = null; 9 | 10 | private static class MatcherTestClusterConfiguration extends BaseClusterConfiguration { 11 | 12 | } 13 | 14 | private static class TestParameter { 15 | 16 | private ClusterConfiguration config; 17 | private Set servers; 18 | private List offers; 19 | private List matches; 20 | 21 | public TestParameter(ClusterConfiguration config){ 22 | this.config = config; 23 | this.servers = new HashSet<>(); 24 | this.offers = new ArrayList<>(); 25 | this.matches = new ArrayList<>(); 26 | } 27 | 28 | public TestParameter addMaster(){ 29 | this.servers.add(ServerUtils.newServer(ServerType.MASTER)); 30 | return this; 31 | } 32 | 33 | public TestParameter addTabletServer(){ 34 | this.servers.add(ServerUtils.newServer(ServerType.TABLET_SERVER)); 35 | return this; 36 | } 37 | 38 | public TestParameter addTabletServers(int numServers){ 39 | for( int ii = 0; ii < numServers; ii++ ){ 40 | this.addTabletServer(); 41 | } 42 | return this; 43 | } 44 | 45 | public TestParameter addSimpleOffer(double cpus, double ram){ 46 | this.offers.add(makeSimpleOffer(cpus, ram)); 47 | return this; 48 | } 49 | 50 | } 51 | 52 | private static final TestParameter param0 = 53 | new TestParameter(new MatcherTestClusterConfiguration()); 54 | 55 | static { 56 | 57 | param0.config.setMinTservers(3); 58 | param0.config.setTserverCpus(2); 59 | param0.config.setMinTserverMem(1024); 60 | param0.config.setMinMasterCpus(2); 61 | param0.config.setMinMasterMem(1024); 62 | 63 | param0.addMaster() 64 | .addTabletServers(3) 65 | .addSimpleOffer(2.0, 10240.0) 66 | .addSimpleOffer(3.0 ,10240.0) 67 | .addSimpleOffer(4.0 ,10240.0) 68 | .addSimpleOffer(5.0 ,10240.0); 69 | 70 | 71 | // TODO create class to package config, servers, offers, matches 72 | matches0 = Arrays.asList(new Match[] { 73 | new Match(Servers) 74 | }); 75 | 76 | } 77 | 78 | 79 | 80 | @Parameters(name = "{index}: {4}") 81 | public static Collection data(){ 82 | 83 | return Arrays.asList(new Object[][]{ 84 | {config0, servers0, offers0, matches0, "Simple Test Setup"} 85 | }); 86 | } 87 | 88 | @Parameter 89 | public ClusterConfiguration inputConfig; 90 | 91 | @Parameter 92 | public Set inputServers; 93 | 94 | @Parameter 95 | public List inputOffers; 96 | 97 | @Parameter 98 | public List expectedMatches; 99 | 100 | @Parameter 101 | public String testText; 102 | 103 | @Before 104 | public void setup() { 105 | // Auto-setup of number of tservers 106 | int tservers = 0; 107 | for( AccumuloServer server : inputServers){ 108 | if( server.getType() == ServerType.TABLET_SERVER){ 109 | tservers++; 110 | } 111 | } 112 | inputConfig.setMinTservers(tservers); 113 | } 114 | 115 | @Test 116 | public void testMatchOffers() throws Exception { 117 | matcher = new MinCpuMinRamFIFOMatcher(inputConfig); 118 | List actualMatches = matcher.matchOffers(inputServers, inputOffers); 119 | for( Match match : expectedMatches ){ 120 | assertTrue(testText, actualMatches.contains(match)); 121 | } 122 | } 123 | 124 | private static Protos.Resource makeScalarResource(String name, Double value){ 125 | return Protos.Resource.newBuilder() 126 | .setName(name) 127 | .setScalar(Protos.Value.Scalar.newBuilder() 128 | .setValue(value) 129 | .build() 130 | ) 131 | .build(); 132 | } 133 | 134 | private static Protos.Offer makeSimpleOffer(Double cpus, Double mem){ 135 | return Protos.Offer.newBuilder() 136 | .addResources(makeScalarResource("cpus", cpus)) 137 | .addResources(makeScalarResource("mem", mem)) 138 | .build(); 139 | } 140 | */ 141 | 142 | } -------------------------------------------------------------------------------- /dev/.gitignore: -------------------------------------------------------------------------------- 1 | **/dist 2 | **/provision/tarballs 3 | 4 | 5 | -------------------------------------------------------------------------------- /dev/README.md: -------------------------------------------------------------------------------- 1 | /dev contains scripts and config used to develop and test the framework. 2 | 3 | The main Vagrant file reads scripts from the provision directory to provision 4 | a cluster running mesos, zookeeper and hdfs. 5 | 6 | The dist directory contains archives to be uploaded to hdfs (accumulo, executor, etc) 7 | 8 | The scripts here are meant to be run from inside the vagrant machines (i.e. vagrant ssh master ... cd /vagrant/dev ... do whatever) 9 | 10 | Install /etc/hosts/ of all machines running. 11 | vagrant plugin install vagrant-hostmanager 12 | vagrant hostmanager 13 | 14 | -------------------------------------------------------------------------------- /dev/aws/AWS_cluster.json: -------------------------------------------------------------------------------- 1 | { 2 | "instance": "TEST_10092015_1", 3 | "rootUser": "jimbo", 4 | "rootPassword": "jimbopassword", 5 | "zkServers": "172.31.0.11:2181", 6 | "executorMemory": 128, 7 | "tarballUri": "hdfs://172.31.1.11:54310/user/klucar/accumulo-1.7.0-bin.tar.gz", 8 | "hdfsUri": "hdfs://172.31.1.11:54310/user/klucar/accumulo-mesos", 9 | "nativeLibUri": "hdfs://172.31.1.11:54310/user/klucar/libaccumulo.so", 10 | "siteXml": "", 11 | "servers": [ 12 | { 13 | "count": 10, 14 | "profile":{ 15 | "name":"BasicTserver", 16 | "description":"Basic Tserver setup", 17 | "type":"tserver", 18 | "cpus":2.0, 19 | "mem":12288, 20 | "user": "" 21 | } 22 | }, 23 | { 24 | "count": 1, 25 | "profile": { 26 | "name": "BasicMaster", 27 | "description": "Basic Master setup", 28 | "type": "master", 29 | "cpus": 2.0, 30 | "mem": 2048, 31 | "user": "" 32 | } 33 | }, 34 | { 35 | "count": 1, 36 | "profile": { 37 | "name": "Monitor", 38 | "description": "Basic Monitor setup", 39 | "type": "monitor", 40 | "cpus": 1.0, 41 | "mem": 1024, 42 | "user": "" 43 | } 44 | }, 45 | { 46 | "count": 1, 47 | "profile": { 48 | "name": "BasicGC", 49 | "description": "Basic Garbage Collector setup", 50 | "type": "gc", 51 | "cpus": 2.0, 52 | "mem": 2048, 53 | "user": "" 54 | } 55 | } 56 | ] 57 | } 58 | -------------------------------------------------------------------------------- /dev/aws/AWS_framework.json: -------------------------------------------------------------------------------- 1 | { 2 | "bindAddress": "0.0.0.0", 3 | "httpPort": "18192", 4 | "mesosMaster": "172.31.1.11:5050", 5 | "name":"accumulo-mesos-aws-test-1", 6 | "tarballUri": "hdfs://172.31.1.11:54310/user/klucar/accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz", 7 | "zkServers": "172.31.0.11:2181" 8 | } 9 | -------------------------------------------------------------------------------- /dev/aws/init_framework.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export LOG=/tmp/accumulo-framework.log 4 | 5 | export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 6 | export ACCUMULO_HOME=/home/ubuntu/klucar/accumulo-1.7.0 7 | export ACCUMULO_CLIENT_CONF_PATH=$ACCUMULO_HOME/conf 8 | export HADOOP_PREFIX=/usr/lib/hadoop 9 | export HADOOP_CONF_DIR=/etc/hadoop 10 | export ZOOKEEPER_HOME=/etc/zookeeper 11 | 12 | java -jar /home/ubuntu/klucar/accumulo-mesos-dist-0.2.0-SNAPSHOT/accumulo-mesos-framework-0.2.0-SNAPSHOT-jar-with-dependencies.jar \ 13 | -i -fc /home/ubuntu/klucar/AWS_framework.json -cc /home/ubuntu/klucar/AWS_cluster.json \ 14 | | tee $LOG 15 | -------------------------------------------------------------------------------- /dev/aws/start_framework.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export LOG=/tmp/accumulo-framework.log 4 | 5 | export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 6 | export ACCUMULO_HOME=/home/ubuntu/klucar/accumulo-1.7.0 7 | export ACCUMULO_CLIENT_CONF_PATH=$ACCUMULO_HOME/conf 8 | export HADOOP_PREFIX=/usr/lib/hadoop 9 | export HADOOP_CONF_DIR=/etc/hadoop 10 | export ZOOKEEPER_HOME=/etc/zookeeper 11 | 12 | java -jar /home/ubuntu/klucar/accumulo-mesos-dist-0.2.0-SNAPSHOT/accumulo-mesos-framework-0.2.0-SNAPSHOT-jar-with-dependencies.jar \ 13 | -master 172.31.1.11:5050 \ 14 | -zookeepers 172.31.0.11:2181 \ 15 | -name $1 \ 16 | | tee $LOG 17 | 18 | 19 | # "bindAddress": "172.16.0.100", 20 | # "httpPort": "8192", 21 | # "mesosMaster": "172.16.0.100:5050", 22 | # "name":"accumulo-mesos-test", 23 | # "id": "", 24 | # "tarballUri": "hdfs://172.16.0.100:9000/dist/accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz", 25 | # "zkServers": "172.16.0.100:2181" 26 | -------------------------------------------------------------------------------- /dev/aws/upload_hdfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | echo "Uploading files to HDFS" 5 | hadoop fs -copyFromLocal -f /home/ubuntu/klucar/*.gz /user/klucar/. 6 | hadoop fs -copyFromLocal -f /home/ubuntu/klucar/libaccumulo.so /user/klucar/. 7 | -------------------------------------------------------------------------------- /dev/config/AWS_framework.json: -------------------------------------------------------------------------------- 1 | { 2 | "bindAddress": "0.0.0.0", 3 | "httpPort": "8192", 4 | "mesosMaster": "172.31.1.11:5050", 5 | "name":"accumulo-mesos-aws-test-102", 6 | "id": "", 7 | "tarballUri": "hdfs://172.31.1.11:54310/user/klucar/accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz", 8 | "zkServers": "172.31.0.11:2181", 9 | "cluster":{ 10 | "instance": "TEST_09172015_102", 11 | "rootUser": "root", 12 | "rootPassword": "secret", 13 | "zkServers": "172.31.0.11:2181", 14 | "executorMemory": 512, 15 | "tarballUri": "hdfs://172.31.1.11:54310/user/klucar/accumulo-1.7.0-bin.tar.gz", 16 | "hdfsUri": "hdfs://172.31.1.11:54310/accumulo-mesos", 17 | "siteXml": "", 18 | "servers": [ 19 | { 20 | "count": 10, 21 | "profile":{ 22 | "name":"BasicTserver", 23 | "description":"Basic Tserver setup", 24 | "type":"tserver", 25 | "cpus":2.0, 26 | "mem":12288, 27 | "user": "" 28 | } 29 | }, 30 | { 31 | "count": 1, 32 | "profile": { 33 | "name": "BasicMaster", 34 | "description": "Basic Master setup", 35 | "type": "master", 36 | "cpus": 2.0, 37 | "mem": 2048, 38 | "user": "" 39 | } 40 | }, 41 | { 42 | "count": 1, 43 | "profile": { 44 | "name": "Monitor", 45 | "description": "Basic Monitor setup", 46 | "type": "monitor", 47 | "cpus": 1.0, 48 | "mem": 1024, 49 | "user": "" 50 | } 51 | }, 52 | { 53 | "count": 1, 54 | "profile": { 55 | "name": "BasicGC", 56 | "description": "Basic Garbage Collector setup", 57 | "type": "gc", 58 | "cpus": 2.0, 59 | "mem": 2048, 60 | "user": "" 61 | } 62 | } 63 | ] 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /dev/config/FrameworkAndAccumulo.json: -------------------------------------------------------------------------------- 1 | { 2 | "bindAddress": "172.16.0.100", 3 | "httpPort": "8192", 4 | "mesosMaster": "172.16.0.100:5050", 5 | "name":"accumulo-mesos-test", 6 | "id": "", 7 | "tarballUri": "hdfs://172.16.0.100:9000/dist/accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz", 8 | "zkServers": "172.16.0.100:2181", 9 | "cluster":{ 10 | "instance": "TEST_09162015_1", 11 | "rootUser": "jimbo", 12 | "rootPassword": "jimbopassword", 13 | "zkServers": "172.16.0.100:2181", 14 | "executorMemory": 128, 15 | "tarballUri": "hdfs://172.16.0.100:9000/dist/accumulo-1.7.0-bin.tar.gz", 16 | "hdfsUri": "hdfs://172.16.0.100:9000/accumulo-mesos", 17 | "siteXml": "", 18 | "servers": [ 19 | { 20 | "count": 3, 21 | "profile":{ 22 | "name":"BasicTserver", 23 | "description":"Basic Tserver setup", 24 | "type":"tserver", 25 | "cpus":1.0, 26 | "mem":1024, 27 | "user": "" 28 | } 29 | }, 30 | { 31 | "count": 1, 32 | "profile": { 33 | "name": "BasicMaster", 34 | "description": "Basic Master setup", 35 | "type": "master", 36 | "cpus": 1.0, 37 | "mem": 512, 38 | "user": "" 39 | } 40 | }, 41 | { 42 | "count": 1, 43 | "profile": { 44 | "name": "Monitor", 45 | "description": "Basic Monitor setup", 46 | "type": "monitor", 47 | "cpus": 1.0, 48 | "mem": 256, 49 | "user": "" 50 | } 51 | }, 52 | { 53 | "count": 1, 54 | "profile": { 55 | "name": "BasicGC", 56 | "description": "Basic Garbage Collector setup", 57 | "type": "gc", 58 | "cpus": 1.0, 59 | "mem": 256, 60 | "user": "" 61 | } 62 | } 63 | ] 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /dev/config/cluster.json: -------------------------------------------------------------------------------- 1 | { 2 | "instance": "TEST_10092015_3", 3 | "rootUser": "jimbo", 4 | "rootPassword": "jimbopassword", 5 | "zkServers": "172.16.0.100:2181", 6 | "executorMemory": 128, 7 | "tarballUri": "hdfs://172.16.0.100:9000/dist/accumulo-1.7.0-bin.tar.gz", 8 | "hdfsUri": "hdfs://172.16.0.100:9000/accumulo-mesos", 9 | "nativeLibUri": "hdfs://172.16.0.100:9000/dist/libaccumulo.so", 10 | "siteXml": "", 11 | "servers": [ 12 | { 13 | "count": 3, 14 | "profile":{ 15 | "name":"BasicTserver", 16 | "description":"Basic Tserver setup", 17 | "type":"tserver", 18 | "cpus":1.0, 19 | "mem":1024, 20 | "user": "" 21 | } 22 | }, 23 | { 24 | "count": 1, 25 | "profile": { 26 | "name": "BasicMaster", 27 | "description": "Basic Master setup", 28 | "type": "master", 29 | "cpus": 1.0, 30 | "mem": 512, 31 | "user": "" 32 | } 33 | }, 34 | { 35 | "count": 1, 36 | "profile": { 37 | "name": "Monitor", 38 | "description": "Basic Monitor setup", 39 | "type": "monitor", 40 | "cpus": 1.0, 41 | "mem": 256, 42 | "user": "" 43 | } 44 | }, 45 | { 46 | "count": 1, 47 | "profile": { 48 | "name": "BasicGC", 49 | "description": "Basic Garbage Collector setup", 50 | "type": "gc", 51 | "cpus": 1.0, 52 | "mem": 256, 53 | "user": "" 54 | } 55 | } 56 | ] 57 | } 58 | -------------------------------------------------------------------------------- /dev/config/framework.json: -------------------------------------------------------------------------------- 1 | { 2 | "bindAddress": "172.16.0.100", 3 | "httpPort": "8192", 4 | "mesosMaster": "172.16.0.100:5050", 5 | "name":"accumulo-mesos-test-4", 6 | "id": "", 7 | "tarballUri": "hdfs://172.16.0.100:9000/dist/accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz", 8 | "zkServers": "172.16.0.100:2181" 9 | } 10 | -------------------------------------------------------------------------------- /dev/init_framework.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export LOG=/tmp/accumulo-framework.log 4 | 5 | export ACCUMULO_HOME=/vagrant/dev/dist/accumulo-1.7.0 6 | export ACCUMULO_CLIENT_CONF_PATH=$ACCUMULO_HOME/conf 7 | export HADOOP_PREFIX=/usr/local/hadoop 8 | export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop 9 | export ZOOKEEPER_HOME=/etc/zookeeper 10 | 11 | java -jar /vagrant/dev/dist/accumulo-mesos-dist-0.2.0-SNAPSHOT/accumulo-mesos-framework-0.2.0-SNAPSHOT-jar-with-dependencies.jar \ 12 | -i -fc /vagrant/dev/config/framework.json -cc /vagrant/dev/config/cluster.json \ 13 | | tee $LOG 14 | -------------------------------------------------------------------------------- /dev/provision/.gitignore: -------------------------------------------------------------------------------- 1 | **/tarballs 2 | 3 | -------------------------------------------------------------------------------- /dev/provision/format_namenode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | set -e 4 | 5 | # Format NameNode 6 | sudo -u hduser sh -c 'yes Y | /usr/local/hadoop/bin/hdfs namenode -format' 7 | -------------------------------------------------------------------------------- /dev/provision/install_compiler.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | 4 | set -e 5 | 6 | 7 | apt-get -y install g++ 8 | apt-get -y install make 9 | 10 | -------------------------------------------------------------------------------- /dev/provision/install_default_jdk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | 4 | PREFIX="JAVA Default JDK Provisioner:" 5 | set -e 6 | 7 | # For installing Java 8 8 | apt-get -y update 9 | apt-get -y install default-jdk 10 | 11 | if $(test -e /usr/lib/libjvm.so); then 12 | rm /usr/lib/libjvm.so 13 | fi 14 | 15 | ln -s /usr/lib/jvm/default-java/jre/lib/amd64/server/libjvm.so /usr/lib/libjvm.so 16 | 17 | -------------------------------------------------------------------------------- /dev/provision/install_default_jre_headless.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | 4 | PREFIX="JAVA Default JDK Provisioner:" 5 | set -e 6 | 7 | # For installing Java 8 8 | apt-get -y update 9 | apt-get -y install default-jre-headless 10 | 11 | if $(test -e /usr/lib/libjvm.so); then 12 | rm /usr/lib/libjvm.so 13 | fi 14 | 15 | ln -s /usr/lib/jvm/default-java/jre/lib/amd64/server/libjvm.so /usr/lib/libjvm.so 16 | 17 | -------------------------------------------------------------------------------- /dev/provision/install_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | set -e 4 | 5 | #Install docker 6 | echo "deb http://http.debian.net/debian jessie-backports main" >> /etc/apt/sources.list 7 | apt-get update 8 | apt-get -y install docker.io 9 | -------------------------------------------------------------------------------- /dev/provision/install_hadoop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | # $1 is ip of namenode, $2 is ip of slave 4 | 5 | set -e 6 | 7 | HADOOP_VER=2.5.2 8 | 9 | apt-get update 10 | 11 | apt-get install -y openssh-server 12 | apt-get install -y tar 13 | apt-get install -y gzip 14 | 15 | # Add hduser user and hadoop group 16 | 17 | if [ `/bin/egrep -i "^hadoop:" /etc/group` ]; then 18 | echo "Group hadoop already exists" 19 | else 20 | echo "Adding hadoop group" 21 | addgroup hadoop 22 | fi 23 | 24 | 25 | if [ `/bin/egrep -i "^hduser:" /etc/passwd` ]; then 26 | echo "User hduser already exists" 27 | else 28 | echo "creating hduser in group hadoop" 29 | adduser --ingroup hadoop --disabled-password --gecos "" --home /home/hduser hduser 30 | adduser hduser sudo 31 | fi 32 | 33 | 34 | # Setup password-less auth 35 | sudo -u hduser sh -c "mkdir /home/hduser/.ssh" 36 | sudo -u hduser sh -c "chmod 700 /home/hduser/.ssh" 37 | sudo -u hduser sh -c "yes | ssh-keygen -t rsa -N '' -f /home/hduser/.ssh/id_rsa" 38 | sudo -u hduser sh -c 'cat /home/hduser/.ssh/id_rsa.pub >> /home/hduser/.ssh/authorized_keys' 39 | sudo -u hduser sh -c "ssh-keyscan -H $1 >> /home/hduser/.ssh/known_hosts" 40 | sudo -u hduser sh -c "ssh-keyscan -H localhost >> /home/hduser/.ssh/known_hosts" 41 | sudo -u hduser sh -c "ssh-keyscan -H $2 >> /home/hduser/.ssh/known_hosts" 42 | 43 | # Download Hadoop 44 | # I've decided to provide this under the vagrant directory 45 | #cd ~ 46 | #if [ ! -f hadoop-${HADOOP_VER}.tar.gz ]; then 47 | # wget http://apache.osuosl.org/hadoop/common/hadoop-${HADOOP_VER}/hadoop-${HADOOP_VER}.tar.gz 48 | #fi 49 | 50 | sudo tar ixzf /vagrant/dev/provision/tarballs/hadoop-${HADOOP_VER}.tar.gz -C /usr/local 51 | cd /usr/local 52 | rm -rf hadoop 53 | sudo mv -f hadoop-${HADOOP_VER} hadoop 54 | sudo chown -R hduser:hadoop hadoop 55 | 56 | # Init bashrc with hadoop env variables 57 | sudo sh -c 'echo export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 >> /home/hduser/.bashrc' 58 | sudo sh -c 'echo export HADOOP_INSTALL=/usr/local/hadoop >> /home/hduser/.bashrc' 59 | sudo sh -c 'echo export PATH=\$PATH:\$HADOOP_INSTALL/bin >> /home/hduser/.bashrc' 60 | sudo sh -c 'echo export PATH=\$PATH:\$HADOOP_INSTALL/sbin >> /home/hduser/.bashrc' 61 | sudo sh -c 'echo export HADOOP_MAPRED_HOME=\$HADOOP_INSTALL >> /home/hduser/.bashrc' 62 | sudo sh -c 'echo export HADOOP_COMMON_HOME=\$HADOOP_INSTALL >> /home/hduser/.bashrc' 63 | sudo sh -c 'echo export HADOOP_HDFS_HOME=\$HADOOP_INSTALL >> /home/hduser/.bashrc' 64 | sudo sh -c 'echo export YARN_HOME=\$HADOOP_INSTALL >> /home/hduser/.bashrc' 65 | sudo sh -c 'echo export HADOOP_COMMON_LIB_NATIVE_DIR=\$\{HADOOP_INSTALL\}/lib/native >> /home/hduser/.bashrc' 66 | sudo sh -c 'echo export HADOOP_OPTS=\"-Djava.library.path=\$HADOOP_INSTALL/lib\" >> /home/hduser/.bashrc' 67 | # hit the vagrant user with the same thing 68 | sudo sh -c 'echo export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 >> /home/vagrant/.bashrc' 69 | sudo sh -c 'echo export HADOOP_INSTALL=/usr/local/hadoop >> /home/vagrant/.bashrc' 70 | sudo sh -c 'echo export PATH=\$PATH:\$HADOOP_INSTALL/bin >> /home/vagrant/.bashrc' 71 | sudo sh -c 'echo export PATH=\$PATH:\$HADOOP_INSTALL/sbin >> /home/vagrant/.bashrc' 72 | sudo sh -c 'echo export HADOOP_MAPRED_HOME=\$HADOOP_INSTALL >> /home/vagrant/.bashrc' 73 | sudo sh -c 'echo export HADOOP_COMMON_HOME=\$HADOOP_INSTALL >> /home/vagrant/.bashrc' 74 | sudo sh -c 'echo export HADOOP_HDFS_HOME=\$HADOOP_INSTALL >> /home/vagrant/.bashrc' 75 | sudo sh -c 'echo export YARN_HOME=\$HADOOP_INSTALL >> /home/vagrant/.bashrc' 76 | sudo sh -c 'echo export HADOOP_COMMON_LIB_NATIVE_DIR=\$\{HADOOP_INSTALL\}/lib/native >> /home/vagrant/.bashrc' 77 | sudo sh -c 'echo export HADOOP_OPTS=\"-Djava.library.path=\$HADOOP_INSTALL/lib\" >> /home/vagrant/.bashrc' 78 | 79 | 80 | # Modify JAVA_HOME in hadoop-env 81 | cd /usr/local/hadoop/etc/hadoop 82 | sudo -u hduser sed -i.bak s=\${JAVA_HOME}=//usr/lib/jvm/java-7-openjdk-amd64/=g hadoop-env.sh 83 | pwd 84 | 85 | /usr/local/hadoop/bin/hadoop version 86 | 87 | # Update configuration 88 | #sudo -u hduser sed -i.bak 's==\\fs\.default\.name\\hdfs://localhost:9000\\=g' core-site.xml 89 | sudo -u hduser sed -i.bak 's==\\fs\.default\.name\\hdfs://'"$1"':9000\\=g' core-site.xml 90 | sudo -u hduser sed -i.bak 's==\\yarn\.nodemanager\.aux-services\mapreduce_shuffle\\\yarn.nodemanager.aux-services.mapreduce.shuffle.class\org\.apache\.hadoop\.mapred\.ShuffleHandler\=g' yarn-site.xml 91 | 92 | sudo -u hduser cp mapred-site.xml.template mapred-site.xml 93 | sudo -u hduser sed -i.bak 's==\\mapreduce\.framework\.name\yarn\=g' mapred-site.xml 94 | 95 | cd ~ 96 | sudo -u hduser sh -c 'mkdir -p ~hduser/mydata/hdfs/namenode' 97 | sudo -u hduser sh -c 'mkdir -p ~hduser/mydata/hdfs/datanode' 98 | sudo chown -R hduser:hadoop ~hduser/mydata 99 | 100 | cd /usr/local/hadoop/etc/hadoop 101 | sudo -u hduser sed -i.bak 's==\\dfs\.replication\1\\\\dfs\.namenode\.name\.dir\file:/home/hduser/mydata/hdfs/namenode\\\dfs\.datanode\.data\.dir\file:/home/hduser/mydata/hdfs/datanode\\\dfs\.namenode\.datanode\.registration\.ip-hostname-check\false\=g' hdfs-site.xml 102 | 103 | 104 | 105 | 106 | 107 | -------------------------------------------------------------------------------- /dev/provision/install_java_8.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | 4 | PREFIX="JAVA 8 Provisioner:" 5 | set -e 6 | 7 | # For installing Java 8 8 | add-apt-repository ppa:webupd8team/java 9 | apt-get -y update 10 | echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections 11 | # apt-get -y install default-jdk 12 | apt-get -y install oracle-java8-installer 13 | apt-get -y install oracle-java8-set-default 14 | 15 | if $(test -e /usr/lib/libjvm.so); then 16 | rm /usr/lib/libjvm.so 17 | fi 18 | ln -s /usr/lib/jvm/java-8-oracle/jre/lib/amd64/server/libjvm.so /usr/lib/libjvm.so 19 | 20 | -------------------------------------------------------------------------------- /dev/provision/install_mesos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | PREFIX="Mesos Provisioner: " 4 | set -e 5 | 6 | echo "${PREFIX} Installing pre-reqs..." 7 | # For Mesos 8 | apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF 9 | DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]') 10 | CODENAME=$(lsb_release -cs) 11 | echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | sudo tee /etc/apt/sources.list.d/mesosphere.list 12 | apt-get -y update 13 | 14 | apt-get -y install libcurl3 15 | apt-get -y install zookeeperd 16 | apt-get -y install aria2 17 | apt-get -y install ssh 18 | apt-get -y install rsync 19 | 20 | 21 | MESOS_VERSION="0.22.1" 22 | echo "${PREFIX}Installing mesos version: ${MESOS_VERSION}..." 23 | apt-get -y install mesos 24 | -------------------------------------------------------------------------------- /dev/provision/start_datanode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | set -e 4 | 5 | # Start DataNode 6 | sudo -u hduser sh -c '/usr/local/hadoop/sbin/hadoop-daemons.sh start datanode' 7 | 8 | 9 | -------------------------------------------------------------------------------- /dev/provision/start_mesos_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | # $1 is the master ip 4 | 5 | echo "Starting master!! $1" > startmaster.log 6 | 7 | echo "zk://$1:2181/mesos" > /etc/mesos/zk 8 | #echo "$@ master" >> /etc/hosts 9 | #echo "192.168.50.102 slave" >> /etc/hosts 10 | echo $1 | sudo tee /etc/mesos-master/ip 11 | echo $1 | sudo tee /etc/mesos-master/hostname 12 | 13 | echo "export HADOOP_HOME=/usr/local/hadoop" >> /root/.bashrc 14 | echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> /root/.bashrc 15 | echo "export PATH=$PATH:$HADOOP_HOME/bin:$JAVA_HOME/bin" >> /root/.bashrc 16 | 17 | # keep mesos slave from starting here 18 | echo manual | sudo tee /etc/init/mesos-slave.override 19 | 20 | sudo start mesos-master 21 | -------------------------------------------------------------------------------- /dev/provision/start_mesos_master_debian.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | # $1 is the master ip 4 | 5 | echo "Starting master!! $1" > startmaster.log 6 | 7 | echo "zk://$1:2181/mesos" > /etc/mesos/zk 8 | #echo "$@ master" >> /etc/hosts 9 | #echo "192.168.50.102 slave" >> /etc/hosts 10 | echo $1 | sudo tee /etc/mesos-master/ip 11 | echo $1 | sudo tee /etc/mesos-master/hostname 12 | 13 | echo "export HADOOP_HOME=/usr/local/hadoop" >> /root/.bashrc 14 | echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> /root/.bashrc 15 | echo "export PATH=$PATH:$HADOOP_HOME/bin:$JAVA_HOME/bin" >> /root/.bashrc 16 | 17 | # keep mesos slave from starting here 18 | echo manual | sudo tee /etc/init/mesos-slave.override 19 | 20 | sudo service mesos-master start 21 | -------------------------------------------------------------------------------- /dev/provision/start_mesos_slave.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | # args => $1 = ip of slave, $2 = ip of mesos master, $3 = hostname of slave 4 | 5 | set -e 6 | 7 | echo "Starting slave $@" > startslave.log 8 | echo "$1" > /etc/mesos-slave/ip 9 | echo "cgroups/cpu,cgroups/mem" > /etc/mesos-slave/isolation 10 | echo "mesos" > /etc/mesos-slave/containerizers 11 | echo "/usr/local/hadoop" > /etc/mesos-slave/hadoop_home 12 | 13 | echo "export HADOOP_HOME=/usr/local/hadoop" >> /root/.bashrc 14 | echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> /root/.bashrc 15 | 16 | echo "zk://$2:2181/mesos" | sudo tee /etc/mesos/zk 17 | echo $1 | sudo tee /etc/mesos-slave/hostname 18 | echo "cpus:2;mem:2048" | sudo tee /etc/mesos-slave/resources 19 | echo manual | sudo tee /etc/init/mesos-master.override 20 | echo manual | sudo tee /etc/init/zookeeper.override 21 | 22 | 23 | start mesos-slave 24 | -------------------------------------------------------------------------------- /dev/provision/start_mesos_slave_debian.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | # args => $1 = ip of slave, $2 = ip of mesos master, $3 = hostname of slave 4 | 5 | set -e 6 | 7 | echo "Starting slave $@" > startslave.log 8 | echo "$1" > /etc/mesos-slave/ip 9 | echo "cgroups/cpu,cgroups/mem" > /etc/mesos-slave/isolation 10 | echo "mesos" > /etc/mesos-slave/containerizers 11 | echo "/usr/local/hadoop" > /etc/mesos-slave/hadoop_home 12 | 13 | echo "export HADOOP_HOME=/usr/local/hadoop" >> /root/.bashrc 14 | echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> /root/.bashrc 15 | 16 | echo "zk://$2:2181/mesos" | sudo tee /etc/mesos/zk 17 | echo $1 | sudo tee /etc/mesos-slave/hostname 18 | echo "cpus:2;mem:2048" | sudo tee /etc/mesos-slave/resources 19 | echo manual | sudo tee /etc/init/mesos-master.override 20 | echo manual | sudo tee /etc/init/zookeeper.override 21 | 22 | 23 | sudo service mesos-slave start 24 | -------------------------------------------------------------------------------- /dev/provision/start_namenode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | 3 | set -e 4 | 5 | # Start NameNode 6 | sudo -u hduser sh -c '/usr/local/hadoop/sbin/hadoop-daemon.sh start namenode' 7 | 8 | sudo -u hduser sh -c "/usr/local/hadoop/bin/hadoop fs -chmod 777 /" -------------------------------------------------------------------------------- /dev/start_aws_framework.sh: -------------------------------------------------------------------------------- 1 | export ACCUMULO_HOME=/home/ubuntu/klucar/accumulo-1.7.0 2 | export ACCUMULO_CLIENT_CONF_PATH=$ACCUMULO_HOME/conf 3 | export HADOOP_PREFIX=/usr/lib/hadoop 4 | export HADOOP_CONF_DIR=/etc/hadoop 5 | export ZOOKEEPER_HOME=/etc/zookeeper 6 | 7 | java -jar /home/ubuntu/klucar/accumulo-mesos-dist-0.2.0-SNAPSHOT/accumulo-mesos-framework-0.2.0-SNAPSHOT-jar-with-dependencies.jar \ 8 | -f /home/ubuntu/klucar/AWS_framework.json \ 9 | | tee $LOG 10 | 11 | -------------------------------------------------------------------------------- /dev/start_framework.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export LOG=/tmp/accumulo-framework.log 4 | 5 | export ACCUMULO_HOME=/vagrant/dev/dist/accumulo-1.7.0 6 | export ACCUMULO_CLIENT_CONF_PATH=$ACCUMULO_HOME/conf 7 | export HADOOP_PREFIX=/usr/local/hadoop 8 | export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop 9 | export ZOOKEEPER_HOME=/etc/zookeeper 10 | 11 | java -jar /vagrant/dev/dist/accumulo-mesos-dist-0.2.0-SNAPSHOT/accumulo-mesos-framework-0.2.0-SNAPSHOT-jar-with-dependencies.jar \ 12 | -master 172.16.0.100:5050 \ 13 | -zookeepers 172.16.0.100:2181 \ 14 | -name accumulo-mesos-test-1 \ 15 | | tee $LOG 16 | 17 | 18 | # "bindAddress": "172.16.0.100", 19 | # "httpPort": "8192", 20 | # "mesosMaster": "172.16.0.100:5050", 21 | # "name":"accumulo-mesos-test", 22 | # "id": "", 23 | # "tarballUri": "hdfs://172.16.0.100:9000/dist/accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz", 24 | # "zkServers": "172.16.0.100:2181" 25 | -------------------------------------------------------------------------------- /dev/upload_hdfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Cleaning local files" 4 | rm -rf /vagrant/dev/dist/accumulo-mesos-dist-0.2.0-SNAPSHOT 5 | rm /vagrant/dev/dist/accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz 6 | echo "Copying archives from build, expanding" 7 | cp /vagrant/accumulo-mesos-dist/target/accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz /vagrant/dev/dist/. 8 | tar xzf /vagrant/dev/dist/accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz -C /vagrant/dev/dist 9 | 10 | TEST=`hadoop fs -ls /dist` 2>&1 11 | if [ -z "$TEST" ]; then 12 | echo "Creating dist directory" 13 | hadoop fs -mkdir /dist 14 | else 15 | echo "/dist already exists... skipping" 16 | fi 17 | 18 | echo "Uploading files to HDFS" 19 | hadoop fs -copyFromLocal -f /vagrant/dev/dist/*.gz /dist/. 20 | hadoop fs -copyFromLocal -f /vagrant/dev/dist/libaccumulo.so /dist/. 21 | -------------------------------------------------------------------------------- /docker/accumulo.dockerfile: -------------------------------------------------------------------------------- 1 | # Pull base image. 2 | FROM ubuntu 3 | 4 | RUN apt-get update 5 | RUN apt-get install -y software-properties-common gpgv 6 | RUN apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF 7 | #RUN add-apt-repository ppa:openjdk-r/ppa 8 | RUN apt-get update 9 | 10 | RUN \ 11 | echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | debconf-set-selections && \ 12 | add-apt-repository -y ppa:webupd8team/java && \ 13 | apt-get update && \ 14 | apt-get install -y oracle-java8-installer && \ 15 | rm -rf /var/lib/apt/lists/* && \ 16 | rm -rf /var/cache/oracle-jdk8-installer 17 | 18 | 19 | RUN apt-get install -y wget lsb-release 20 | RUN DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]') CODENAME=$(lsb_release -cs) && echo "deb http://repos.mesosphere.com/${DISTRO} ${CODENAME} main" | tee /etc/apt/sources.list.d/mesosphere.list 21 | RUN apt-get -y update 22 | RUN apt-get install -y zookeeper mesos 23 | 24 | ADD bin/install-accumulo.sh / 25 | RUN chmod +x /install-accumulo.sh 26 | RUN /install-accumulo.sh 27 | 28 | ADD bin/start-framework.sh / 29 | RUN chmod +x /start-framework.sh 30 | 31 | CMD ["/start-framework.sh"] 32 | -------------------------------------------------------------------------------- /docker/bin/install-accumulo.sh: -------------------------------------------------------------------------------- 1 | ## 2 | # YARN installer script for Apache Myriad Deployment 3 | ## 4 | HADOOP_VER="2.7.1" 5 | HADOOP_TARBALL_URL=http://apache.osuosl.org/hadoop/common/hadoop-${HADOOP_VER}/hadoop-${HADOOP_VER}.tar.gz 6 | ACCUMULO_TARBALL_URL=http://download.nextag.com/apache/accumulo/1.7.0/accumulo-1.7.0-bin.tar.gz 7 | 8 | 9 | echo "Installing Yarn...." 10 | if [ ! -z "$1" ];then 11 | HADOOP_TARBALL_URL=$1 12 | echo "Deleting previous hadoop home" 13 | rm -rf ${HADOOP_HOME} 14 | fi 15 | 16 | # Download the tarball 17 | wget -O /opt/hadoop.tgz ${HADOOP_TARBALL_URL} 18 | HADOOP_BASENAME=`basename ${HADOOP_TARBALL_URL} .tar.gz` 19 | 20 | # Put in env defaults if they are missing 21 | export HADOOP_GROUP=${HADOOP_GROUP:='hadoop'} 22 | export HADOOP_USER=${HADOOP_USER:='accumulo'} 23 | export HADOOP_HOME=${HADOOP_HOME:='/usr/local/hadoop'} 24 | export USER_UID=${USER_UID:='113'} 25 | export GROUP_UID=${GROUP_GID:='112'} 26 | 27 | # Add hduser user 28 | groupadd $HADOOP_GROUP -g ${GROUP_UID} 29 | useradd $HADOOP_USER -g $HADOOP_GROUP -u ${USER_UID} -s /bin/bash 30 | #mkdir /home/${HADOOP_USER} 31 | chown -R $HADOOP_USER:$HADOOP_GROUP /home/${HADOOP_USER} 32 | 33 | # Extract Hadoop 34 | tar vxzf /opt/hadoop.tgz -C /tmp 35 | #mv /tmp/hadoop-${HADOOP_VER} ${HADOOP_HOME} 36 | echo "Moving /tmp/hadoop-${HADOOP_BASENAME} to ${HADOOP_HOME}" 37 | mv /tmp/${HADOOP_BASENAME} ${HADOOP_HOME} 38 | ls -lath ${HADOOP_HOME} 39 | 40 | mkdir /home/$HADOOP_USER 41 | chown -R ${HADOOP_USER}:${HADOOP_GROUP} ${HADOOP_HOME} 42 | 43 | # Init bashrc with hadoop env variables 44 | sh -c 'echo export JAVA_HOME=/usr >> /home/${HADOOP_USER}/.bashrc' 45 | sh -c 'echo export HADOOP_HOME=\${HADOOP_HOME} >> /home/${HADOOP_USER}/.bashrc' 46 | sh -c 'echo export PATH=\$PATH:\${HADOOP_HOME}/bin >> /home/${HADOOP_USER}/.bashrc' 47 | sh -c 'echo export PATH=\$PATH:\${HADOOP_HOME}/sbin >> /home/${HADOOP_USER}/.bashrc' 48 | sh -c 'echo export HADOOP_MAPRED_HOME=\${HADOOP_HOME} >> /home/${HADOOP_USER}/.bashrc' 49 | sh -c 'echo export HADOOP_COMMON_HOME=\${HADOOP_HOME} >> /home/${HADOOP_USER}/.bashrc' 50 | sh -c 'echo export HADOOP_HDFS_HOME=\${HADOOP_HOME} >> /home/${HADOOP_USER}/.bashrc' 51 | sh -c 'echo export YARN_HOME=\${HADOOP_HOME} >> /home/${HADOOP_USER}/.bashrc' 52 | sh -c 'echo export HADOOP_COMMON_LIB_NATIVE_DIR=\$\{HADOOP_HOME\}/lib/native >> /home/${HADOOP_USER}/.bashrc' 53 | sh -c 'echo export HADOOP_OPTS=\"-Djava.library.path=\${HADOOP_HOME}/lib\" >> /home/${HADOOP_USER}/.bashrc' 54 | 55 | # Link Mesos Libraries 56 | touch ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh 57 | echo "export JAVA_HOME=/usr" >> ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh 58 | echo "export MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so" >> ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh 59 | 60 | # Ensure the hadoop-env is executable 61 | chmod +x ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh 62 | 63 | 64 | 65 | # install accumulo 66 | ACCUMULO_HOME=/opt/accumulo 67 | 68 | echo "Installing accumulo..." 69 | if [ ! -z "$1" ];then 70 | ACCUMULO_TARBALL_URL=$1 71 | echo "Deleting previous accumulo home" 72 | rm -rf ${ACCUMULO_HOME} 73 | fi 74 | 75 | wget -O /opt/accumulo.tgz ${ACCUMULO_TARBALL_URL} 76 | # Extract Accumulo 77 | tar vxzf /opt/accumulo.tgz -C /tmp 78 | #mv /tmp/accumulo-${HADOOP_VER} ${ACCUMULO_HOME} 79 | echo "Moving /tmp/hadoop-${ACCUMULO_BASENAME} to ${ACCUMULO_HOME}" 80 | mv /tmp/${ACCUMULO_BASENAME} ${ACCUMULO_HOME} 81 | ls -lath ${ACCUMULO_HOME} 82 | chown -R ${HADOOP_USER}:${HADOOP_GROUP} ${ACCUMULO_HOME} 83 | -------------------------------------------------------------------------------- /docker/bin/start-framework.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | export ACCUMULO_HOME=/opt/accumulo/accumulo-1.7.0 5 | export ACCUMULO_CLIENT_CONF_PATH=$ACCUMULO_HOME/conf 6 | export HADOOP_PREFIX=/usr/local/hadoop 7 | export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop 8 | export ZOOKEEPER_HOME=/etc/zookeeper 9 | 10 | export JAVA_HOME=/usr 11 | HADOOP_HOME=/usr/local/hadoop 12 | HADOOP_NAMENODE=172.31.45.229:54310 13 | ACCUMULO_TAR="accumulo-1.7.0.tar.gz" 14 | ACCUMULO_NATIVE_LIB="libaccumulo.so" 15 | ACCUMULO_DIST="accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz" 16 | 17 | FOUND_ACCUMULO=`${HADOOP_HOME}/bin/hadoop fs -ls hdfs://${HADOOP_NAMENODE}/dist | grep ${ACCUMULO_TAR}` 18 | FOUND_ACCUMULO_NATIVE_LIB=`${HADOOP_HOME}/bin/hadoop fs -ls hdfs://${HADOOP_NAMENODE}/dist | grep ${ACCUMULO_NATIVE_LIB}` 19 | FOUND_ACCUMULO_DIST=`${HADOOP_HOME}/bin/hadoop fs -ls hdfs://${HADOOP_NAMENODE}/dist | grep ${ACCUMULO_DIST}` 20 | 21 | # Fix broken packages 22 | #apt-get -f install 23 | 24 | # Create accumulo-mesos folder 25 | ${HADOOP_HOME}/bin/hadoop fs -mkdir hdfs://${HADOOP_NAMENODE}/accumulo-mesos 26 | 27 | # Ensure config files are present 28 | if [ ! -f ${CLUSTER_CONFIG} ]; then 29 | echo "[FATAL] ${CLUSTER_CONFIG} not found. Did you mount your volumes?" 30 | exit 1 31 | fi 32 | if [ ! -f ${FRAMEWORK_CONFIG} ]; then 33 | echo "[FATAL] ${FRAMEWORK_CONFIG} not found. Did you mount your volumes?" 34 | exit 1 35 | fi 36 | 37 | # Ensure libraries are mounted 38 | if [ ! -f /accumulo-lib/${ACCUMULO_DIST} ]; then 39 | echo "[FATAL] ${ACCUMULO_DIST} not found in /accumulo-lib. Did you mount your volumes?" 40 | ls -lath /accumulo-lib/ 41 | exit 1 42 | fi 43 | 44 | 45 | # Ensure env vars are present 46 | if [ -z $HADOOP_NAMENODE ]; then 47 | echo "[FATAL] HADOOP_NAMENODE env variable not set!" 48 | exit 1 49 | fi 50 | 51 | 52 | # Look for accumulo tar 53 | if [ "${FOUND_ACCUMULO}" > 0 ]; then 54 | echo "[FOUND] ${ACCUMULO_TAR}" 55 | else 56 | echo "[MISSING] ${ACCUMULO_TAR}" 57 | mv /opt/accumulo.tgz /opt/${ACCUMULO_TAR} 58 | ${HADOOP_HOME}/bin/hadoop fs -copyFromLocal /opt/${ACCUMULO_TAR} hdfs://${HADOOP_NAMENODE}/dist/ 59 | echo "[COPIED] ${ACCUMULO_TAR} to HDFS" 60 | fi 61 | 62 | # Look for accumulo0dst 63 | if [ "${FOUND_ACCUMULO_DIST}" > 0 ]; then 64 | echo "[FOUND] ${ACCUMULO_DIST}" 65 | else 66 | echo "[MISSING] ${ACCUMULO_DIST}" 67 | mv /opt/accumulo.tgz /opt/${ACCUMULO_DIST} 68 | ${HADOOP_HOME}/bin/hadoop fs -copyFromLocal /accumulo-lib/${ACCUMULO_DIST} hdfs://${HADOOP_NAMENODE}/dist/ 69 | echo "[COPIED] ${ACCUMULO_DIST} to HDFS" 70 | fi 71 | 72 | 73 | # Look for accumulo native library in HDFS 74 | if [ "${FOUND_ACCUMULO_NATIVE_LIB}" > 0 ];then 75 | echo "[FOUND] ${ACCUMULO_NATIVE_LIB}" 76 | else 77 | echo "[MISSING] ${ACCUMULO_NATIVE_LIB} Compiling..." 78 | apt-get clean 79 | mv /var/lib/apt/lists /tmp 80 | mkdir -p /var/lib/apt/lists/partial 81 | apt-get clean 82 | apt-get update 83 | rm /etc/apt/sources.list.d/webupd8team-java-trusty.list && apt-get update 84 | apt-get install -y build-essential g++ gcc 85 | ls -lath /usr/lib/jvm/java-8-oracle/include 86 | gcc -I /usr/lib/jvm/java-8-oracle/include 87 | . ${ACCUMULO_HOME}/bin/build_native_library.sh 88 | mv ${ACCUMULO_HOME}/lib/native/libaccumulo.so /accumulo-lib/ 89 | ${HADOOP_HOME}/bin/hadoop fs -copyFromLocal /accumulo-lib/${ACCUMULO_NATIVE_LIB} hdfs://${HADOOP_NAMENODE}/dist/ 90 | echo "[COPIED] ${ACCUMULO_NATIVE_LIB} to HDFS" 91 | 92 | fi 93 | 94 | # Look for dist library in HDFS 95 | if [ "${FOUND_ACCUMULO_NATIVE_LIB}" > 0 ];then 96 | echo "[FOUND] ${ACCUMULO_NATIVE_LIB}" 97 | else 98 | echo "[MISSING] ${ACCUMULO_NATIVE_LIB}" 99 | 100 | fi 101 | 102 | 103 | ${HADOOP_HOME}/bin/hadoop fs -rm -r hdfs://${HADOOP_NAMENODE}/accumulo-mesos/* 104 | 105 | #init accumulo 106 | java -jar /accumulo-lib/accumulo-mesos-framework-0.2.0-SNAPSHOT-jar-with-dependencies.jar \ 107 | -i -fc /accumulo-config/framework.json -cc /accumulo-config/cluster.json \ 108 | | tee $LOG 109 | 110 | 111 | 112 | MESOS_MASTER="172.31.45.229:5050" 113 | ZOOKEEPERS="172.31.20.165:2181" 114 | 115 | java -jar /accumulo-lib/accumulo-mesos-framework-0.2.0-SNAPSHOT-jar-with-dependencies.jar \ 116 | -master $MESOS_MASTER \ 117 | -zookeepers $ZOOKEEPERS \ 118 | -name accumulo-mesos-test-4 \ 119 | | tee $LOG 120 | -------------------------------------------------------------------------------- /docker/build-framwork.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd .. 3 | mvn package 4 | 5 | cd docker 6 | if [ ! -f "lib" ]; then 7 | mkdir lib 8 | fi 9 | echo "Copying libraries..." 10 | cp ../accumulo-mesos-framework/target/accumulo-mesos-framework-0.2.0-SNAPSHOT-jar-with-dependencies.jar lib/ 11 | cp ../accumulo-mesos-dist/target/accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz lib/ 12 | 13 | -------------------------------------------------------------------------------- /docker/config/cluster.json-sample: -------------------------------------------------------------------------------- 1 | { 2 | "instance": "TEST_10092015_3", 3 | "rootUser": "jimbo", 4 | "rootPassword": "jimbopassword", 5 | "zkServers": "172.31.20.165:2181", 6 | "executorMemory": 128, 7 | "tarballUri": "hdfs://172.31.45.229:54310/dist/accumulo-1.7.0-bin.tar.gz", 8 | "hdfsUri": "hdfs://172.31.45.229:54310/accumulo-mesos", 9 | "nativeLibUri": "hdfs://172.31.45.229:54310/dist/libaccumulo.so", 10 | "siteXml": "", 11 | "servers": [ 12 | { 13 | "count": 3, 14 | "profile":{ 15 | "name":"BasicTserver", 16 | "description":"Basic Tserver setup", 17 | "type":"tserver", 18 | "cpus":1.0, 19 | "mem":1024, 20 | "user": "" 21 | } 22 | }, 23 | { 24 | "count": 1, 25 | "profile": { 26 | "name": "BasicMaster", 27 | "description": "Basic Master setup", 28 | "type": "master", 29 | "cpus": 1.0, 30 | "mem": 512, 31 | "user": "" 32 | } 33 | }, 34 | { 35 | "count": 1, 36 | "profile": { 37 | "name": "Monitor", 38 | "description": "Basic Monitor setup", 39 | "type": "monitor", 40 | "cpus": 1.0, 41 | "mem": 256, 42 | "user": "" 43 | } 44 | }, 45 | { 46 | "count": 1, 47 | "profile": { 48 | "name": "BasicGC", 49 | "description": "Basic Garbage Collector setup", 50 | "type": "gc", 51 | "cpus": 1.0, 52 | "mem": 256, 53 | "user": "" 54 | } 55 | } 56 | ] 57 | } 58 | -------------------------------------------------------------------------------- /docker/config/framework.json-sample: -------------------------------------------------------------------------------- 1 | { 2 | "bindAddress": "0.0.0.0", 3 | "httpPort": "8192", 4 | "mesosMaster": "172.31.45.229:5050", 5 | "name":"accumulo-mesos-test-4", 6 | "id": "", 7 | "tarballUri": "hdfs://172.31.45.229:54310/dist/accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz", 8 | "zkServers": "172.31.20.165:2181" 9 | } 10 | -------------------------------------------------------------------------------- /docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | accumulo: 2 | build: . 3 | dockerfile: accumulo.dockerfile 4 | net: host 5 | volumes: 6 | - ./lib:/accumulo-lib:rw 7 | - ./config:/accumulo-config 8 | --------------------------------------------------------------------------------