├── .gitignore ├── LICENSE ├── README.md ├── executors ├── dotnet-executor │ ├── .DS_Store │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── build.sbt │ ├── docker-build-push.sh │ ├── install-coreclr.sh │ ├── no_sudo_install-corecler.sh │ ├── project │ │ ├── build.properties │ │ └── plugins.sbt │ └── src │ │ └── main │ │ └── scala │ │ └── com │ │ └── galacticfog │ │ └── lambda │ │ └── executor │ │ └── DotNetExecutor.scala ├── java-executor │ ├── .DS_Store │ ├── .gitignore │ ├── LICENSE │ ├── README │ ├── build.sbt │ ├── docker-build-push.sh │ ├── project │ │ ├── build.properties │ │ └── plugins.sbt │ └── src │ │ └── main │ │ └── scala │ │ └── com │ │ └── galacticfog │ │ └── lambda │ │ └── executor │ │ └── JavaExecutor.scala └── js-executor │ ├── .DS_Store │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── build.sbt │ ├── docker-build-push.sh │ ├── project │ ├── build.properties │ └── plugins.sbt │ └── src │ └── main │ ├── resources │ ├── console.js │ └── npm-js.js │ └── scala │ └── com │ └── galacticfog │ └── lambda │ └── executor │ ├── ClasspathFileResolver.java │ ├── JSWorker.scala │ └── JavaScrptExecutor.scala ├── lambda-io ├── .gitignore ├── README.md ├── build.sbt ├── project │ ├── plugins.sbt │ └── scalikejdbc.properties └── src │ ├── main │ ├── resources │ │ ├── application.conf │ │ ├── db │ │ │ └── migration │ │ │ │ ├── V1__create_reference_tables.sql │ │ │ │ ├── V2__create_tables.sql │ │ │ │ ├── V3__load_data.sql │ │ │ │ └── beforeClean.sql │ │ └── logback.xml │ └── scala │ │ └── com │ │ └── galacticfog │ │ └── gestalt │ │ └── lambda │ │ ├── io │ │ ├── JdbcConnectionInfo.scala │ │ ├── PostgresJdbcInfo.scala │ │ ├── ScalikePostgresInfo.scala │ │ ├── domain │ │ │ ├── LambdaDao.scala │ │ │ ├── LambdaEvent.scala │ │ │ ├── LambdaResult.scala │ │ │ └── ResultDao.scala │ │ └── model │ │ │ ├── LambdaRepository.scala │ │ │ └── ResultRepository.scala │ │ └── utils │ │ └── SecureIdGenerator.scala │ └── test │ └── scala │ └── com │ └── galacticfog │ └── gestalt │ ├── lambda │ └── io │ │ └── model │ │ ├── LambdaRepositorySpec.scala │ │ └── ResultRepositorySpec.scala │ └── vertx │ ├── com │ └── galacticfog │ │ └── gestalt │ │ └── vertx │ │ └── io │ │ └── model │ │ └── PolicyRepositorySpec.scala │ └── io │ └── model │ ├── LambdaRepositorySpec.scala │ ├── PolicyRepositorySpec.scala │ ├── PolicyTypeRepositorySpec.scala │ └── VertxRepositorySpec.scala ├── lambda-plugin ├── .gitignore ├── README.md ├── build.sbt ├── project │ └── plugins.sbt └── src │ └── main │ ├── resources │ ├── application.conf │ └── logback.xml │ └── scala │ └── com │ └── galacticfog │ └── gestalt │ └── lambda │ └── plugin │ └── LambdaAdapter.scala ├── lambda-scheduler ├── .gitignore ├── LICENSE ├── README ├── activator ├── activator.bat ├── app │ ├── com │ │ └── galacticfog │ │ │ └── lambda │ │ │ └── scheduler │ │ │ ├── Global.scala │ │ │ ├── LambdaScheduler.scala │ │ │ └── TaskUtils.scala │ ├── controllers │ │ └── Application.scala │ └── views │ │ ├── index.scala.html │ │ └── main.scala.html ├── build.sbt ├── conf │ ├── application.conf │ └── routes ├── project │ ├── build.properties │ └── plugins.sbt ├── public │ ├── images │ │ └── favicon.png │ ├── javascripts │ │ └── hello.js │ └── stylesheets │ │ └── main.css └── test │ ├── ApplicationSpec.scala │ └── IntegrationSpec.scala ├── lambda ├── .gitignore ├── Dockerfile ├── LICENSE ├── README ├── activator ├── activator.bat ├── api.raml ├── app │ ├── com │ │ └── galacticfog │ │ │ └── gestalt │ │ │ └── lambda │ │ │ ├── Global.scala │ │ │ ├── LambdaFramework.scala │ │ │ ├── actor │ │ │ ├── EnvironmentActor.scala │ │ │ ├── FactoryActor.scala │ │ │ ├── InvokeActor.scala │ │ │ ├── LambdaMessages.scala │ │ │ ├── LookupActor.scala │ │ │ └── UnhandledMessageActor.scala │ │ │ ├── adapters │ │ │ └── FakeLambdaAdapter.scala │ │ │ ├── config │ │ │ └── DatabaseConfig.scala │ │ │ └── util │ │ │ ├── SecureIdGenerator.scala │ │ │ └── WebClient.scala │ ├── controllers │ │ └── Application.scala │ └── views │ │ ├── index.scala.html │ │ └── main.scala.html ├── build.sbt ├── conf │ ├── application-logger.xml │ ├── application.conf │ ├── logback.xml │ ├── newrelic.yml │ └── routes ├── create_test_db.sh ├── deleteAllLambdas.sh ├── docker-build-push.sh ├── gestalt-security.conf ├── gestalt.conf ├── local_config.json ├── localrc ├── payloads │ ├── alt.dotnet.create.json │ ├── aws.create.json │ ├── aws.event.json │ ├── aws.hello_world.json │ ├── dotnet.call.create.json │ ├── dotnet.create.json │ ├── event.json │ ├── java.call.create.json │ ├── java.create.json │ ├── java.sleep.create.json │ ├── js.call.create.json │ ├── js.hello_world.create.json │ ├── js.inline.hello.create.json │ ├── kill.framework.json │ ├── lambda.create.json │ ├── lambda_marathon.json │ ├── lambda_marathon_aqr.json │ ├── lambda_marathon_dev.json │ ├── lambda_marathon_test.json │ ├── laser.event.json │ ├── remote_config.json │ ├── remote_config_dev.json │ ├── sleep.event.json │ ├── telnet_test.json │ ├── unreserve.payload.json │ └── watercoins_config.json ├── project │ ├── build.properties │ └── plugins.sbt ├── public │ ├── images │ │ └── favicon.png │ ├── javascripts │ │ └── hello.js │ └── stylesheets │ │ └── main.css ├── remote_config.json └── test │ ├── ApplicationSpec.scala │ ├── GlobalWithoutMeta.scala │ ├── IntegrationSpec.scala │ └── LambdaServiceSpec.scala ├── mesos_http ├── .gitignore ├── build.sbt ├── project │ └── plugins.sbt └── src │ └── main │ ├── resources │ └── META-INF │ │ └── services │ │ └── com.galacticfog.gestalt.lambda.plugin.LambdaAdapter │ └── scala │ └── com │ └── galacticfog │ └── gestalt │ └── mesos │ └── http │ ├── GestaltSchedulerDriver.scala │ ├── InternalSchedulerDriver.scala │ ├── JsonUtil.scala │ ├── SubscribedResposne.scala │ └── package.scala ├── plugins ├── lambda-aws-plugin │ ├── .gitignore │ ├── META-INF │ │ └── services │ │ │ └── com.galacticfog.gestalt.lambda.plugin.LambdaAdapter │ ├── README.md │ ├── build.sbt │ ├── project │ │ └── plugins.sbt │ └── src │ │ └── main │ │ └── scala │ │ └── com │ │ └── galacticfog │ │ └── gestalt │ │ └── lambda │ │ └── impl │ │ └── AWSLambdaAdapter.scala ├── lambda-gfi-plugin │ ├── .gitignore │ ├── README.md │ ├── build.sbt │ ├── newrelic │ │ ├── CHANGELOG │ │ ├── LICENSE │ │ ├── README.txt │ │ ├── extension-example.xml │ │ ├── extension.xsd │ │ ├── newrelic.yml │ │ └── nrcerts │ ├── project │ │ └── plugins.sbt │ └── src │ │ └── main │ │ ├── resources │ │ └── META-INF │ │ │ └── services │ │ │ └── com.galacticfog.gestalt.lambda.plugin.LambdaAdapter │ │ └── scala │ │ └── com │ │ └── galacticfog │ │ └── gestalt │ │ └── lambda │ │ └── impl │ │ ├── EnvironmentCache.scala │ │ ├── ExecutorCache.scala │ │ ├── GFILambdaAdapter.scala │ │ └── actor │ │ ├── GFIMessages.scala │ │ ├── OfferActor.scala │ │ ├── SchedulerActor.scala │ │ └── TaskActor.scala └── lambda-vertx-plugin │ ├── .gitignore │ ├── Dockerfile │ ├── README │ ├── activator │ ├── activator.bat │ ├── api.raml │ ├── build.sbt │ ├── conf │ └── langs.properties │ ├── examples │ ├── test-mod │ │ ├── .gitignore │ │ ├── build.sbt │ │ ├── project │ │ │ └── plugins.sbt │ │ └── src │ │ │ └── main │ │ │ ├── resources │ │ │ ├── hello-verticle.js │ │ │ ├── logback.xml │ │ │ └── vatomic.js │ │ │ └── scala │ │ │ └── com │ │ │ └── galacticfog │ │ │ └── gestalt │ │ │ └── vertx │ │ │ └── FactoryVerticle.scala │ └── test-post │ │ ├── .gitignore │ │ ├── build.sbt │ │ ├── project │ │ └── plugins.sbt │ │ └── src │ │ └── main │ │ ├── resources │ │ ├── hello-verticle.js │ │ └── logback.xml │ │ └── scala │ │ └── com │ │ └── galacticfog │ │ └── gestalt │ │ └── vertx │ │ └── FactoryVerticle.scala │ ├── gestalt-security.conf │ ├── langs.properties │ ├── notifier.patch │ ├── project │ ├── build.properties │ └── plugins.sbt │ ├── public │ ├── images │ │ └── favicon.png │ ├── javascripts │ │ └── hello.js │ └── stylesheets │ │ └── main.css │ ├── record.post │ ├── src │ └── main │ │ ├── resources │ │ └── META-INF │ │ │ └── services │ │ │ └── com.galacticfog.gestalt.lambda.plugin.LambdaAdapter │ │ └── scala │ │ └── com │ │ └── galacticfog │ │ ├── TestVerticle.class │ │ └── gestalt │ │ └── lambda │ │ └── impl │ │ ├── DynLoader.scala │ │ ├── FactoryActor.scala │ │ ├── PolicyReturnEvent.scala │ │ └── Utility.scala │ └── test │ ├── ApplicationSpec.scala │ └── IntegrationSpec.scala ├── test-harness ├── .gitignore ├── README.md ├── build.sbt ├── project │ └── plugins.sbt └── src │ └── main │ ├── resources │ └── logback.xml │ └── scala │ └── com │ └── galacticfog │ └── gestalt │ └── lambda │ └── test │ └── TestHarnessApp.scala └── worker ├── .gitignore ├── README.md ├── build.sbt ├── project └── plugins.sbt └── src └── main ├── resources ├── application.conf ├── console.js ├── hello_world.js ├── logback.xml └── npm-js.js └── scala └── com └── galacticfog └── gestalt └── lambda └── worker ├── ClasspathFileResolver.java ├── JSWorker.scala └── LambdaWorkerApp.scala /.gitignore: -------------------------------------------------------------------------------- 1 | *.zip 2 | *.jar 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This software is licensed under the Apache 2 license, quoted below. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this project except in compliance with 4 | the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. 5 | 6 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 7 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific 8 | language governing permissions and limitations under the License. 9 | 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This repo is deprecated! This was the 1.0 lambda engine. A lot has changed since then. For the latest version of gestalt and laser please refer to: http://docs.galacticfog.com/ 2 | 3 | 4 | 5 | #Gestalt Lambda 6 | 7 | This repository contains a version of the Gestalt Lambda service. This service provides a RESTful interface to an implementation 8 | of a lambda engine. The current adapter work supports running under Mesos as a Mesos Framework. This allows efficient use of 9 | hardware resources for one-off or seldomly occuring compute tasks. 10 | -------------------------------------------------------------------------------- /executors/dotnet-executor/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GalacticFog/gestalt-lambda/17429af43ba8b3f1bdfc2e4a2fd79e027f98e216/executors/dotnet-executor/.DS_Store -------------------------------------------------------------------------------- /executors/dotnet-executor/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | project/project 3 | project/target 4 | tmp 5 | /.idea 6 | /*.iml 7 | /.idea_modules 8 | -------------------------------------------------------------------------------- /executors/dotnet-executor/LICENSE: -------------------------------------------------------------------------------- 1 | This software is licensed under the Apache 2 license, quoted below. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this project except in compliance with 4 | the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. 5 | 6 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 7 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific 8 | language governing permissions and limitations under the License. -------------------------------------------------------------------------------- /executors/dotnet-executor/README.md: -------------------------------------------------------------------------------- 1 | #.NET Executor 2 | 3 | This is an implementation of a Mesos Executor that will execute a lambda written for the .NET Core CLR. The current implementation requires that the lambda author build and publish their dependencies into a directory and zip *both* the executable and all of it's dependencies into an artifact to be inflated and executed. 4 | 5 | We're currently using a docker container built from the `ubuntu:trusty`, but more specifically we're using the following base : `buildpack-deps:trusty-scm`. 6 | 7 | In order to remaing consistent, lambda authors should use the following container to build and publish their executables : `microsoft/dotnet:latest` which is also based on the above listed docker images. 8 | 9 | Here is a brief example of how to create a .NET lambda. 10 | 11 | #Hello World 12 | 13 | Simplest possible .NET lambda. Note that you must implement `Main(string[] args)` entry point in order for your lambda to be executed by the system. 14 | 15 | ### Program.cs 16 | ```c# 17 | using System; 18 | 19 | namespace ConsoleApplication 20 | { 21 | public class Program 22 | { 23 | public static void Main(string[] args) 24 | { 25 | Console.WriteLine("Hello World!"); 26 | } 27 | } 28 | } 29 | ``` 30 | 31 | ### project.json 32 | 33 | ```json 34 | { 35 | "version": "1.0.0-*", 36 | "compilationOptions": { 37 | "emitEntryPoint": true 38 | }, 39 | 40 | "dependencies": { 41 | "Microsoft.NETCore.Runtime": "1.0.1-beta-*", 42 | "System.IO": "4.0.11-beta-*", 43 | "System.Console": "4.0.0-beta-*", 44 | "System.Runtime": "4.0.21-beta-*" 45 | }, 46 | 47 | "frameworks": { 48 | "dnxcore50": { } 49 | } 50 | } 51 | ``` 52 | 53 | Now while you're in the directory that contains your project you can run the following command to build and publish your lambda : 54 | 55 | `>docker run --rm -v "$PWD":/dotnet_call -w /dotnet_call microsoft/dotnet@sha256:19ab67ce4fc80a1c1c68c45961216805c2119336e51be2132d4b2487a6a7034b /bin/bash -c "dotnet restore && dotnet build && dotnet publish"` 56 | 57 | The above command will mount your working directory as a writeable volume in the docker container, and then execute the commands necessary to pull down all the dependencies specified in your project, compild your code, and publish the executable and the dependencies in a directory for the specific runtime flavor (in this case `ubuntu:trust`) 58 | 59 | You can then zip up the contents of your directory : `zip -r hello_world.zip *` 60 | 61 | Then you specify your lambda payload like so : 62 | 63 | ```json 64 | { 65 | "eventFilter": "com.awesome.HelloWorld", 66 | "artifactDescription": { 67 | "artifactUri": "https://s3.amazonaws.com/my.lambdas/hello_world.zip", 68 | "description": "super simple lambda for dotnet", 69 | "functionName": "doesntgetused", 70 | "handler": "bin/Debug/dnxcore50/ubuntu.14.04-x64/hello_world", 71 | "memorySize": 1024, 72 | "cpus": 0.2, 73 | "publish": false, 74 | "role": "doesntgetused", 75 | "runtime": "dotnet", 76 | "timeoutSecs": 180 77 | } 78 | } 79 | ``` 80 | 81 | Once you have your payload, you simply `POST` to the `/lambdas` endpoint for the lambda service : 82 | 83 | `POST /lambdas < payload.json` 84 | 85 | And you can invoke it like so : 86 | 87 | `POST /lambdas/{id}/invoke < event.payload.json` 88 | 89 | 90 | -------------------------------------------------------------------------------- /executors/dotnet-executor/build.sbt: -------------------------------------------------------------------------------- 1 | name := """lambda-dotnet-alt-executor""" 2 | 3 | version := "1.2.0-SNAPSHOT" 4 | 5 | mainClass in (Compile, packageBin) := Some("com.galacticfog.lambda.executor.DotNetExecutor") 6 | 7 | lazy val root = (project in file(".")) 8 | 9 | scalaVersion := "2.11.6" 10 | 11 | resolvers ++= Seq( 12 | "Mesosphere Repo" at "http://downloads.mesosphere.io/maven", 13 | "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository", 14 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local" 15 | ) 16 | 17 | 18 | libraryDependencies ++= Seq( 19 | "com.galacticfog" %% "gestalt-utils" % "0.0.1-SNAPSHOT" withSources(), 20 | "org.specs2" %% "specs2-core" % "3.7" % "test", 21 | "com.typesafe.play" %% "play-json" % "2.4.0-M2", 22 | "mesosphere" %% "mesos-utils" % "0.28.0" 23 | ) 24 | 25 | scalacOptions in Test ++= Seq("-Yrangepos") 26 | 27 | enablePlugins(JavaAppPackaging) 28 | enablePlugins(DockerPlugin) 29 | 30 | import com.typesafe.sbt.packager.docker._ 31 | import NativePackagerHelper._ 32 | mappings in Universal += file("install-coreclr.sh") -> "local/install-coreclr.sh" 33 | 34 | dockerBaseImage := "galacticfog.artifactoryonline.com/gestalt-mesos-base:0.0.0-d5747ff7" 35 | 36 | dockerCommands := dockerCommands.value.flatMap{ 37 | case cmd@Cmd("ADD",_) => List( 38 | cmd, 39 | Cmd("RUN","apt-get -y install sudo"), 40 | Cmd("RUN", "echo \"daemon:daemon\" | chpasswd && adduser daemon sudo"), 41 | Cmd("RUN", "echo \"daemon ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers"), 42 | Cmd("USER", "daemon"), 43 | 44 | Cmd("RUN", "sudo sh -c 'echo \"deb http://llvm.org/apt/jessie/ llvm-toolchain-jessie-3.6 main\" > /etc/apt/sources.list.d/llvm-toolchain.list'"), 45 | Cmd("RUN", "sudo sh -c 'echo \"deb-src http://llvm.org/apt/jessie/ llvm-toolchain-jessie-3.6 main\" > /etc/apt/sources.list.d/llvm-toolchain1.list'"), 46 | Cmd("RUN", "wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key|sudo apt-key add -"), 47 | Cmd("RUN", "sudo apt-get update && sudo apt-get -y install liblldb-3.6"), 48 | 49 | Cmd("RUN", "sudo sh -c 'echo \"deb [arch=amd64] http://apt-mo.trafficmanager.net/repos/dotnet/ trusty main\" > /etc/apt/sources.list.d/dotnetdev.list'"), 50 | Cmd("RUN", "sudo apt-key adv --keyserver apt-mo.trafficmanager.net --recv-keys 417A0893"), 51 | Cmd("RUN", "sudo apt-get update"), 52 | Cmd("RUN", "sudo apt-get install -y dotnet"), 53 | Cmd("USER", "root"), 54 | Cmd("RUN", "chown -R root:daemon /usr/sbin") 55 | ) 56 | case other => List(other) 57 | } 58 | 59 | //Cmd("ADD", "install-coreclr.sh local/"), 60 | 61 | maintainer := "Brad Futch " 62 | 63 | dockerUpdateLatest := true 64 | 65 | dockerRepository := Some("galacticfog.artifactoryonline.com") 66 | 67 | -------------------------------------------------------------------------------- /executors/dotnet-executor/docker-build-push.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | # set -x 5 | 6 | export IMG=galacticfog.artifactoryonline.com/lambda-dotnet-alt-executor 7 | 8 | export SHA=$(git rev-parse --short=8 HEAD) 9 | export VER=$(grep "^version" build.sbt | sed 's/.*:=[ ]*//' | sed 's/"//g') 10 | export TAG=$VER-$SHA 11 | 12 | echo "Building $TAG" 13 | 14 | echo "Creating build image..." 15 | sbt docker:stage 16 | cd target/docker/stage 17 | docker build -t $IMG:$TAG . 18 | echo "Pushing new image to artifactory..." 19 | docker push $IMG:$TAG 20 | docker tag $IMG:$TAG $IMG:$VER 21 | docker push $IMG:$VER 22 | -------------------------------------------------------------------------------- /executors/dotnet-executor/install-coreclr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #mono 4 | sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF 5 | echo "deb http://download.mono-project.com/repo/debian wheezy main" | sudo tee /etc/apt/sources.list.d/mono-xamarin.list 6 | echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | sudo tee -a /etc/apt/sources.list.d/mono-xamarin.list 7 | sudo apt-get update -y 8 | sudo apt-get install -y mono-complete 9 | 10 | #libuv 11 | sudo apt-get install -y automake libtool curl 12 | curl -sSL https://github.com/libuv/libuv/archive/v1.4.2.tar.gz | sudo tar zxfv - -C /usr/local/src 13 | cd /usr/local/src/libuv-1.4.2 14 | sudo sh autogen.sh 15 | sudo ./configure 16 | sudo make 17 | sudo make install 18 | sudo rm -rf /usr/local/src/libuv-1.4.2 && cd ~/ 19 | sudo ldconfig 20 | 21 | #dnvm 22 | sudo apt-get install -y unzip 23 | curl -sSL https://raw.githubusercontent.com/aspnet/Home/dev/dnvminstall.sh | DNX_BRANCH=dev sh && source /usr/sbin/.dnx/dnvm/dnvm.sh 24 | 25 | #.net core dependencies 26 | sudo apt-get install -y libunwind8 libssl-dev 27 | mozroots --import --sync 28 | 29 | #core runtime 30 | dnvm upgrade -u 31 | dnvm install latest -r coreclr -u -p 32 | 33 | #additional 34 | sudo apt-get install -y libcurl4-openssl-dev 35 | sudo apt-get install -y libcurl4-gnutls-dev 36 | -------------------------------------------------------------------------------- /executors/dotnet-executor/no_sudo_install-corecler.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #mono 4 | apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF 5 | echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list 6 | echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list 7 | apt-get update -y 8 | apt-get install -y mono-complete 9 | 10 | #libuv 11 | apt-get install -y automake libtool curl 12 | curl -sSL https://github.com/libuv/libuv/archive/v1.4.2.tar.gz | tar zxfv - -C /usr/local/src 13 | cd /usr/local/src/libuv-1.4.2 14 | sh autogen.sh 15 | ./configure 16 | make 17 | make install 18 | rm -rf /usr/local/src/libuv-1.4.2 && cd ~/ 19 | ldconfig 20 | 21 | #dnvm 22 | apt-get install -y unzip 23 | curl -sSL https://raw.githubusercontent.com/aspnet/Home/dev/dnvminstall.sh | DNX_BRANCH=dev sh && source ~/.dnx/dnvm/dnvm.sh 24 | 25 | #.net core dependencies 26 | apt-get install -y libunwind8 libssl-dev 27 | mozroots --import --sync 28 | 29 | #core runtime 30 | dnvm upgrade -u 31 | dnvm install latest -r coreclr -u -p 32 | 33 | #additional 34 | apt-get install -y libcurl4-openssl-dev 35 | -------------------------------------------------------------------------------- /executors/dotnet-executor/project/build.properties: -------------------------------------------------------------------------------- 1 | #Activator-generated Properties 2 | #Wed Jan 20 10:02:00 PST 2016 3 | template.uuid=deacbc00-065e-4ce7-a5f7-6f227953edfa 4 | sbt.version=0.13.8 5 | -------------------------------------------------------------------------------- /executors/dotnet-executor/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.1.1") 2 | 3 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.1") 4 | -------------------------------------------------------------------------------- /executors/java-executor/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GalacticFog/gestalt-lambda/17429af43ba8b3f1bdfc2e4a2fd79e027f98e216/executors/java-executor/.DS_Store -------------------------------------------------------------------------------- /executors/java-executor/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | project/project 3 | project/target 4 | tmp 5 | /.idea 6 | /*.iml 7 | /.idea_modules 8 | -------------------------------------------------------------------------------- /executors/java-executor/LICENSE: -------------------------------------------------------------------------------- 1 | This software is licensed under the Apache 2 license, quoted below. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this project except in compliance with 4 | the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. 5 | 6 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 7 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific 8 | language governing permissions and limitations under the License. -------------------------------------------------------------------------------- /executors/java-executor/README: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GalacticFog/gestalt-lambda/17429af43ba8b3f1bdfc2e4a2fd79e027f98e216/executors/java-executor/README -------------------------------------------------------------------------------- /executors/java-executor/build.sbt: -------------------------------------------------------------------------------- 1 | name := """lambda-java-executor""" 2 | 3 | version := "1.2.0-SNAPSHOT" 4 | 5 | mainClass in (Compile, packageBin) := Some("com.galacticfog.lambda.executor.JavaExecutor") 6 | 7 | lazy val root = (project in file(".")) 8 | 9 | scalaVersion := "2.11.6" 10 | 11 | resolvers ++= Seq( 12 | "Mesosphere Repo" at "http://downloads.mesosphere.io/maven", 13 | "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository", 14 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local" 15 | ) 16 | 17 | libraryDependencies ++= Seq( 18 | "com.galacticfog" %% "gestalt-utils" % "0.0.1-SNAPSHOT" withSources(), 19 | "org.specs2" %% "specs2-core" % "3.7" % "test", 20 | "com.typesafe.play" %% "play-json" % "2.4.0-M2", 21 | "mesosphere" %% "mesos-utils" % "0.28.0" 22 | ) 23 | 24 | scalacOptions in Test ++= Seq("-Yrangepos") 25 | 26 | enablePlugins(JavaAppPackaging) 27 | enablePlugins(DockerPlugin) 28 | 29 | import com.typesafe.sbt.packager.docker._ 30 | 31 | dockerBaseImage := "galacticfog.artifactoryonline.com/gestalt-mesos-base:0.0.0-d5747ff7" 32 | 33 | maintainer := "Brad Futch " 34 | 35 | dockerUpdateLatest := true 36 | 37 | dockerRepository := Some("galacticfog.artifactoryonline.com") 38 | -------------------------------------------------------------------------------- /executors/java-executor/docker-build-push.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | # set -x 5 | 6 | export IMG=galacticfog.artifactoryonline.com/lambda-java-executor 7 | 8 | export SHA=$(git rev-parse --short=8 HEAD) 9 | export VER=$(grep "^version" build.sbt | sed 's/.*:=[ ]*//' | sed 's/"//g') 10 | export TAG=$VER-$SHA 11 | 12 | echo "Building $TAG" 13 | 14 | echo "Creating build image..." 15 | sbt docker:stage 16 | cd target/docker/stage 17 | docker build -t $IMG:$TAG . 18 | echo "Pushing new image to artifactory..." 19 | docker push $IMG:$TAG 20 | docker tag $IMG:$TAG $IMG:$VER 21 | docker push $IMG:$VER 22 | -------------------------------------------------------------------------------- /executors/java-executor/project/build.properties: -------------------------------------------------------------------------------- 1 | #Activator-generated Properties 2 | #Wed Jan 20 10:02:00 PST 2016 3 | template.uuid=deacbc00-065e-4ce7-a5f7-6f227953edfa 4 | sbt.version=0.13.8 5 | -------------------------------------------------------------------------------- /executors/java-executor/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.0.6") 2 | 3 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.1") 4 | -------------------------------------------------------------------------------- /executors/js-executor/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GalacticFog/gestalt-lambda/17429af43ba8b3f1bdfc2e4a2fd79e027f98e216/executors/js-executor/.DS_Store -------------------------------------------------------------------------------- /executors/js-executor/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | project/project 3 | project/target 4 | tmp 5 | /.idea 6 | /*.iml 7 | /.idea_modules 8 | -------------------------------------------------------------------------------- /executors/js-executor/LICENSE: -------------------------------------------------------------------------------- 1 | This software is licensed under the Apache 2 license, quoted below. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this project except in compliance with 4 | the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. 5 | 6 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 7 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific 8 | language governing permissions and limitations under the License. -------------------------------------------------------------------------------- /executors/js-executor/README.md: -------------------------------------------------------------------------------- 1 | #JavaScript Executor 2 | 3 | This is an implementation of a Mesos Executor that will execute a lambda written in JavaScript. The current implementation requires that the lambda author build and publish their dependencies into a directory and zip *both* the executable and all of it's dependencies into an artifact to be inflated and executed. Or the author can provider inline code that is Base64 encoded in the payload. Examples of both will be given. 4 | 5 | We're currently using a docker container built from the `java:latest` and executing the Java 8 Nashorn JavaScript Engine. 6 | 7 | Here is a brief example of how to create a JavaScript lambda. 8 | 9 | #Hello World 10 | 11 | Simplest possible JavaScript lambda. Note that you must create a named function in both cases. In the artifact case, you then create a zip file that contains your scripts, and specify an artifact URI in the payload upon creation. Examples follow 12 | 13 | ### hello_world.js 14 | ```javascript 15 | function hello(event, context) { 16 | // Call the console.log function. 17 | var parsed = JSON.parse( event ); 18 | console.log("Hello World"); 19 | console.log( "Event Name : " + parsed.eventName ); 20 | return "SUCCESS"; 21 | }; 22 | ``` 23 | 24 | Now while you're in the directory that contains your project you can run the following command to build an artifact for use 25 | 26 | `zip -r hello_world.zip *` 27 | 28 | Then you specify your lambda payload like so : 29 | 30 | ### Artifact Example Payload 31 | ```json 32 | { 33 | "eventFilter": "com.awesome.HelloWorld", 34 | "artifactDescription": { 35 | "artifactUri": "https://s3.amazonaws.com/my.lambdas/hello_world.zip", 36 | "description": "super simple lambda for javascript", 37 | "functionName": "hello", 38 | "handler": "hello_world.js", 39 | "memorySize": 1024, 40 | "cpus": 0.2, 41 | "publish": false, 42 | "role": "doesntgetused", 43 | "runtime": "dotnet", 44 | "timeoutSecs": 180 45 | } 46 | } 47 | ``` 48 | 49 | ### Inline Example Payload 50 | 51 | ```json 52 | { 53 | "eventFilter": "com.awesome.HelloWorld", 54 | "artifactDescription": { 55 | "code": "ZnVuY3Rpb24gaGVsbG8oZXZlbnQsIGNvbnRleHQpIHsNCiAgLy8gQ2FsbCB0aGUgY29uc29sZS5sb2cgZnVuY3Rpb24uDQogIGNvbnNvbGUubG9nKCJIZWxsbyBXb3JsZCIpOw0KICBjb25zb2xlLmxvZyggIkV2ZW50IE5hbWUgOiAiICsgZXZlbnQuZXZlbnROYW1lICk7DQogIHJldHVybiAiU1VDQ0VTUyI7DQp9Ow==", 56 | "description": "super simple lambda for javascript", 57 | "functionName": "hello", 58 | "handler": "doesntgetused", 59 | "memorySize": 1024, 60 | "cpus": 0.2, 61 | "publish": false, 62 | "role": "doesntgetused", 63 | "runtime": "dotnet", 64 | "timeoutSecs": 180 65 | } 66 | } 67 | ``` 68 | Once you have your payload, you simply `POST` to the `/lambdas` endpoint for the lambda service : 69 | 70 | `POST /lambdas < payload.json` 71 | 72 | And you can invoke it like so : 73 | 74 | `POST /lambdas/{id}/invoke < event.payload.json` 75 | -------------------------------------------------------------------------------- /executors/js-executor/build.sbt: -------------------------------------------------------------------------------- 1 | name := """lambda-javascript-executor""" 2 | 3 | version := "1.2.0-SNAPSHOT" 4 | 5 | mainClass in (Compile, packageBin) := Some("com.galacticfog.lambda.executor.JavaScriptExecutor") 6 | 7 | lazy val root = (project in file(".")) 8 | 9 | scalaVersion := "2.11.6" 10 | 11 | resolvers ++= Seq( 12 | "Mesosphere Repo" at "http://downloads.mesosphere.io/maven", 13 | "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository" 14 | ) 15 | 16 | libraryDependencies ++= Seq( 17 | "org.specs2" %% "specs2-core" % "3.7" % "test", 18 | "org.slf4j" % "slf4j-api" % "1.7.10", 19 | "ch.qos.logback" % "logback-classic" % "1.1.2", 20 | "com.typesafe.play" %% "play-json" % "2.4.0-M2", 21 | "mesosphere" %% "mesos-utils" % "0.28.0", 22 | "com.groupon.mesos" % "jesos" % "1.5.4-SNAPSHOT" withSources() 23 | ) 24 | 25 | scalacOptions in Test ++= Seq("-Yrangepos") 26 | 27 | enablePlugins(JavaAppPackaging) 28 | enablePlugins(DockerPlugin) 29 | 30 | import com.typesafe.sbt.packager.docker._ 31 | 32 | dockerBaseImage := "galacticfog.artifactoryonline.com/gestalt-mesos-base:0.0.0-d5747ff7" 33 | 34 | maintainer := "Brad Futch " 35 | 36 | dockerUpdateLatest := true 37 | 38 | dockerRepository := Some("galacticfog.artifactoryonline.com") 39 | -------------------------------------------------------------------------------- /executors/js-executor/docker-build-push.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | # set -x 5 | 6 | export IMG=galacticfog.artifactoryonline.com/lambda-javascript-executor 7 | 8 | export SHA=$(git rev-parse --short=8 HEAD) 9 | export VER=$(grep "^version" build.sbt | sed 's/.*:=[ ]*//' | sed 's/"//g') 10 | export TAG=$VER-$SHA 11 | 12 | echo "Building $TAG" 13 | 14 | echo "Creating build image..." 15 | sbt docker:stage 16 | cd target/docker/stage 17 | docker build -t $IMG:$TAG . 18 | echo "Pushing new image to artifactory..." 19 | docker push $IMG:$TAG 20 | docker tag $IMG:$TAG $IMG:$VER 21 | docker push $IMG:$VER 22 | -------------------------------------------------------------------------------- /executors/js-executor/project/build.properties: -------------------------------------------------------------------------------- 1 | #Activator-generated Properties 2 | #Wed Jan 20 10:02:00 PST 2016 3 | template.uuid=deacbc00-065e-4ce7-a5f7-6f227953edfa 4 | sbt.version=0.13.8 5 | -------------------------------------------------------------------------------- /executors/js-executor/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.0.6") 2 | 3 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.1") 4 | -------------------------------------------------------------------------------- /executors/js-executor/src/main/resources/console.js: -------------------------------------------------------------------------------- 1 | var stdout = java.lang.System.out; 2 | var stderr = java.lang.System.err; 3 | 4 | /** 5 | * A simple console object that can be used to print log messages 6 | * errors, and warnings. 7 | * @example 8 | * 9 | * console.log('Hello standard out'); 10 | * console.warn('Warning standard error'); 11 | * console.error('Alert! Alert!'); 12 | * 13 | */ 14 | var console = { 15 | 16 | /** 17 | * Log the msg to STDOUT. 18 | * 19 | * @param {string} msg The message to log to standard out. 20 | */ 21 | log: function(msg) { 22 | stdout.println(msg); 23 | }, 24 | 25 | /** 26 | * Log the msg to STDERR 27 | * 28 | * @param {string} msg The message to log with a warning to standard error. 29 | */ 30 | warn: function(msg) { 31 | stderr.println(msg); 32 | }, 33 | 34 | /** 35 | * Log the msg to STDERR 36 | * 37 | * @param {string} msg The message to log with a warning alert to standard error. 38 | */ 39 | error: function(msg) { 40 | stderr.println(msg); 41 | } 42 | }; 43 | 44 | module.exports = console; 45 | -------------------------------------------------------------------------------- /lambda-io/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | logs 3 | localrc 4 | remoterc 5 | project/project 6 | project/target 7 | mods 8 | target 9 | tmp 10 | change.json 11 | approver.json 12 | .history 13 | *.event 14 | *.json 15 | dist 16 | /.idea 17 | /*.iml 18 | /out 19 | /.idea_modules 20 | /.classpath 21 | /.project 22 | /RUNNING_PID 23 | /.settings 24 | *.jar 25 | /bin/ 26 | -------------------------------------------------------------------------------- /lambda-io/README.md: -------------------------------------------------------------------------------- 1 | #gestalt-vertx-com.galacticfog.gestalt.vertx.io 2 | 3 | The gestalt-vertx-com.galacticfog.gestalt.vertx.io repo holds the data access project for the gestalt-vertx framework. 4 | -------------------------------------------------------------------------------- /lambda-io/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.typesafe.sbteclipse" % "sbteclipse-plugin" % "2.5.0") 2 | 3 | 4 | // Driver needed here for scalike mapper. 5 | 6 | libraryDependencies += "org.postgresql" % "postgresql" % "9.3-1102-jdbc4" 7 | 8 | addSbtPlugin("org.scalikejdbc" %% "scalikejdbc-mapper-generator" % "2.2.3") 9 | 10 | 11 | // 12 | // Flyway 13 | // 14 | 15 | addSbtPlugin("org.flywaydb" % "flyway-sbt" % "3.1") 16 | 17 | resolvers += "Flyway" at "http://flywaydb.org/repo" 18 | 19 | -------------------------------------------------------------------------------- /lambda-io/project/scalikejdbc.properties: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # ScalikeJDBC Code Generator Configuration 4 | # 5 | 6 | # replace the database connection info below with the appropriate connection info for your project 7 | jdbc.driver=org.postgresql.Driver 8 | jdbc.url=jdbc:postgresql://localhost:5432/gestaltlambda 9 | jdbc.username=gestaltdev 10 | jdbc.password=M8keitw0rk 11 | jdbc.schema=public 12 | 13 | generator.packageName=com.galacticfog.gestalt.lambda.io.model 14 | 15 | # generator.lineBreak: LF/CRLF 16 | geneartor.lineBreak=LF 17 | 18 | # generator.template: interpolation/queryDsl 19 | generator.template=queryDsl 20 | 21 | # generator.testTemplate: specs2unit/specs2acceptance/ScalaTestFlatSpec 22 | generator.testTemplate=specs2unit 23 | generator.encoding=UTF-8 24 | 25 | # When you're using Scala 2.11 or higher, you can use case classes for 22+ columns tables 26 | generator.caseClassOnly=true 27 | 28 | # Set AutoSession for implicit DBSession parameter's default value 29 | generator.defaultAutoSession=true 30 | 31 | # Use autoConstruct macro (default: false) 32 | generator.autoConstruct=false 33 | -------------------------------------------------------------------------------- /lambda-io/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Connection Pool settings 4 | dev.db.default.poolInitialSize=20 5 | dev.db.default.poolMaxSize=40 6 | dev.db.default.poolConnectionTimeoutMillis=7000 -------------------------------------------------------------------------------- /lambda-io/src/main/resources/db/migration/V1__create_reference_tables.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GalacticFog/gestalt-lambda/17429af43ba8b3f1bdfc2e4a2fd79e027f98e216/lambda-io/src/main/resources/db/migration/V1__create_reference_tables.sql -------------------------------------------------------------------------------- /lambda-io/src/main/resources/db/migration/V2__create_tables.sql: -------------------------------------------------------------------------------- 1 | -- ---------------------------------------------------------------------------- 2 | -- LAMBDA 3 | -- ---------------------------------------------------------------------------- 4 | DROP TABLE IF EXISTS lambda CASCADE; 5 | CREATE TABLE lambda( 6 | id TEXT NOT NULL, 7 | is_public boolean NOT NULL, 8 | artifact_description TEXT NOT NULL, 9 | payload TEXT, 10 | 11 | CONSTRAINT pk_lambda PRIMARY KEY (id) 12 | ); 13 | 14 | -- ---------------------------------------------------------------------------- 15 | -- RESULTS 16 | -- ---------------------------------------------------------------------------- 17 | DROP TABLE IF EXISTS result CASCADE; 18 | CREATE TABLE result( 19 | execution_id TEXT NOT NULL, 20 | lambda_id TEXT NOT NULL, 21 | execution_time timestamp with time zone NOT NULL, 22 | content_type TEXT NOT NULL, 23 | result TEXT NOT NULL, 24 | log TEXT, 25 | 26 | CONSTRAINT fk_lambda_id FOREIGN KEY (lambda_id) 27 | REFERENCES lambda (id) MATCH SIMPLE 28 | ON DELETE CASCADE, 29 | 30 | CONSTRAINT pk_result PRIMARY KEY (execution_id) 31 | ); 32 | 33 | -------------------------------------------------------------------------------- /lambda-io/src/main/resources/db/migration/V3__load_data.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GalacticFog/gestalt-lambda/17429af43ba8b3f1bdfc2e4a2fd79e027f98e216/lambda-io/src/main/resources/db/migration/V3__load_data.sql -------------------------------------------------------------------------------- /lambda-io/src/main/resources/db/migration/beforeClean.sql: -------------------------------------------------------------------------------- 1 | DROP EXTENSION IF EXISTS "uuid-ossp" CASCADE; 2 | -------------------------------------------------------------------------------- /lambda-io/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | true 8 | 10 | 11 | UTF-8 12 | 13 | %-4r %highlight(%-5level) - %msg%n 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /lambda-io/src/main/scala/com/galacticfog/gestalt/lambda/io/JdbcConnectionInfo.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.io 2 | 3 | abstract class JdbcConnectionInfo( 4 | val subprotocol: String, 5 | val driver: String, 6 | val host: String, 7 | val port: Option[Int], 8 | val database: String, 9 | val username: Option[String], 10 | val password: Option[String], 11 | val args: String*) { 12 | 13 | private[io] val dbsep = "/" 14 | private[io] val protosep = "://" 15 | 16 | def url(args: (String,String)*) = { 17 | val portstr = if(port.isDefined) s":${port.get}" else "" 18 | "jdbc:%s%s%s%s%s%s%s".format(subprotocol, protosep, host, portstr, dbsep, database, 19 | if (!args.isEmpty) mkargs(args.toMap) else "") 20 | } 21 | 22 | private[io] def mkargs(args: Map[String,String]) = args map { 23 | case (k,v) => "%s=%s".format(k, v) 24 | } addString(new StringBuilder, ";", ";", "") toString 25 | 26 | } 27 | 28 | -------------------------------------------------------------------------------- /lambda-io/src/main/scala/com/galacticfog/gestalt/lambda/io/PostgresJdbcInfo.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.io 2 | 3 | class PostgresJdbcInfo( 4 | host: String, port: Option[Int] = Some(5432), database: String, 5 | username: Option[String], password: Option[String]) 6 | extends JdbcConnectionInfo( 7 | "postgresql", "org.postgresql.Driver", 8 | host, port, database, username, password) { 9 | } 10 | 11 | object PostgresJdbcInfo { 12 | def apply(host: String, port: Option[Int] = Some(5432), database: String, 13 | username: Option[String], password: Option[String]) = { 14 | new PostgresJdbcInfo(host, port, database, username, password) 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /lambda-io/src/main/scala/com/galacticfog/gestalt/lambda/io/ScalikePostgresInfo.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.io 2 | 3 | import scalikejdbc._ 4 | 5 | class ScalikePostgresInfo( 6 | host: String, port: Int, 7 | database: String, user: String, password: String, timeoutMs: Long = 5000L) 8 | extends PostgresJdbcInfo(host, Some(port), database, Some(user), Some(password)) { 9 | 10 | val settings = ConnectionPoolSettings( 11 | connectionTimeoutMillis = timeoutMs 12 | /*,validationQuery = "select 1 from organization;"*/) 13 | 14 | /* This magically 'opens' the connection */ 15 | Class.forName(driver) 16 | ConnectionPool.singleton(url(), user, password, settings) 17 | } 18 | 19 | object ScalikePostgresInfo { 20 | def apply(host: String, port: Int = 5432, database: String, 21 | user: String, password: String, timeoutMs: Long = 5000L) = { 22 | new ScalikePostgresInfo(host, port, database, user, password) 23 | } 24 | 25 | } -------------------------------------------------------------------------------- /lambda-io/src/main/scala/com/galacticfog/gestalt/lambda/io/domain/LambdaDao.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.io.domain 2 | 3 | import java.util.UUID 4 | 5 | import com.galacticfog.gestalt.lambda.io.model.LambdaRepository 6 | import play.api.libs.json.{JsValue, Json} 7 | import scalikejdbc._ 8 | import LambdaRepository.lr 9 | 10 | import scala.util.Success 11 | 12 | //@deprecated - eventFilter is deprecated 13 | case class LambdaDao( id : Option[String], eventFilter: Option[String] = None, public : Option[Boolean], artifactDescription : JsValue, payload : Option[String] ) { 14 | def create : LambdaDao = { 15 | LambdaDao.create( 16 | inId = id, 17 | public = public.getOrElse( false ), 18 | artifactDescription = artifactDescription, 19 | payload = payload 20 | ) 21 | } 22 | 23 | def update : LambdaDao = { 24 | 25 | val lr = LambdaRepository.find( id.get ) getOrElse { 26 | throw new Exception( "Could not find lambda with id : " + id.get ) 27 | } 28 | 29 | LambdaDao.make( 30 | lr.copy( 31 | id = id.get, 32 | isPublic = public.getOrElse( false ), 33 | artifactDescription = Json.stringify( artifactDescription ), 34 | payload = payload 35 | ).save 36 | ) 37 | } 38 | } 39 | 40 | object LambdaDao { 41 | 42 | implicit val lambdaDaoFormat = Json.format[LambdaDao] 43 | 44 | def create( 45 | inId : Option[String], 46 | public : Boolean, 47 | artifactDescription : JsValue, 48 | payload : Option[String] = None 49 | ) : LambdaDao = { 50 | 51 | val id = inId getOrElse UUID.randomUUID.toString 52 | 53 | make( 54 | LambdaRepository.create( 55 | id = id, 56 | isPublic = public, 57 | artifactDescription = Json.stringify( artifactDescription ), 58 | payload = payload 59 | ) 60 | ) 61 | } 62 | 63 | def make( lr : LambdaRepository ) : LambdaDao = { 64 | new LambdaDao( 65 | id = Some(lr.id), 66 | public = Some(lr.isPublic), 67 | artifactDescription = Json.parse( lr.artifactDescription ), 68 | payload = lr.payload 69 | ) 70 | } 71 | 72 | def findAll : Seq[LambdaDao] = { 73 | LambdaRepository.findAll.map(make(_)) 74 | } 75 | 76 | def delete( id : String ) : Unit = { 77 | LambdaRepository.find( id ) match { 78 | case Some(s) => s.destroy 79 | case None => { 80 | throw new Exception( s"Lambda not found with id $id" ) 81 | } 82 | } 83 | } 84 | 85 | def find( id : String ) : Option[LambdaDao] = { 86 | LambdaRepository.find(id).map(make(_)) 87 | } 88 | 89 | def findById( id : String ) : Option[LambdaDao] = { 90 | LambdaRepository.find(id).map(make(_)) 91 | } 92 | 93 | } 94 | -------------------------------------------------------------------------------- /lambda-io/src/main/scala/com/galacticfog/gestalt/lambda/io/domain/LambdaEvent.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.io.domain 2 | 3 | import play.api.libs.json.{Json, JsValue} 4 | 5 | case class LambdaEvent( eventName : String, data : JsValue ) 6 | 7 | object LambdaEvent { 8 | implicit val lambdaEventFormat = Json.format[LambdaEvent] 9 | } 10 | -------------------------------------------------------------------------------- /lambda-io/src/main/scala/com/galacticfog/gestalt/lambda/io/domain/LambdaResult.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.io.domain 2 | 3 | import com.galacticfog.gestalt.lambda.io.domain.LambdaContentType.LambdaContentType 4 | import play.api.libs.json.{Reads, Json} 5 | import play.api.libs.json._ 6 | import play.api.libs.functional.syntax._ 7 | 8 | object LambdaContentType { 9 | 10 | sealed trait LambdaContentType { 11 | def name: String 12 | } 13 | 14 | case object HTML extends LambdaContentType { 15 | val name = "text/html" 16 | } 17 | 18 | case object JS extends LambdaContentType { 19 | val name = "application/js" 20 | } 21 | 22 | case object TEXT extends LambdaContentType { 23 | val name = "text/plain" 24 | } 25 | 26 | def apply( name : String ) : LambdaContentType = { 27 | name match { 28 | case HTML.name => HTML 29 | case JS.name => JS 30 | case TEXT.name | "application/json" => TEXT 31 | //TODO : is this the right thing to do? 32 | case _ => TEXT 33 | } 34 | } 35 | } 36 | 37 | case class LambdaResult( contentType : LambdaContentType, result : String ) 38 | -------------------------------------------------------------------------------- /lambda-io/src/main/scala/com/galacticfog/gestalt/lambda/io/domain/ResultDao.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.io.domain 2 | 3 | import com.galacticfog.gestalt.lambda.io.domain.LambdaContentType.LambdaContentType 4 | import com.galacticfog.gestalt.lambda.io.model.ResultRepository 5 | import com.galacticfog.gestalt.lambda.utils.SecureIdGenerator 6 | import org.joda.time.DateTime 7 | import play.api.libs.json.{Reads, JsValue, Json} 8 | import play.api.libs.json._ 9 | import play.api.libs.functional.syntax._ 10 | import scalikejdbc._ 11 | import ResultRepository.rr 12 | 13 | import scala.util.Success 14 | 15 | case class ResultDao( lambdaId : String, executionId : String, contentType : LambdaContentType, result : String, log : Option[String], executionTime : Option[DateTime] ) 16 | 17 | object ResultDao { 18 | 19 | //implicit val resultDaoFormat = Json.format[ResultDao] 20 | implicit val resultDaoReads : Reads[ResultDao] = 21 | ( 22 | ( __ \ "lambdaId" ).read[String] and 23 | ( __ \ "executionId" ).read[String] and 24 | ( __ \ "contentType" ).read[String].map( LambdaContentType(_) ) and 25 | ( __ \ "result" ).read[String] and 26 | ( __ \ "log" ).readNullable[String] and 27 | ( __ \ "executionTime" ).readNullable[DateTime] 28 | )(ResultDao.apply _) 29 | 30 | implicit val resultDaoWrites : Writes[ResultDao] = new Writes[ResultDao] { 31 | def writes( result : ResultDao ) : JsValue = { 32 | 33 | var obj = Json.obj( 34 | "lambdaId" -> result.lambdaId, 35 | "executionId" -> result.executionId, 36 | "contentType" -> result.contentType.name, 37 | "result" -> result.result 38 | ) 39 | 40 | if( result.log.isDefined ) 41 | { 42 | obj = obj + ("log" -> Json.toJson( result.log.get) ) 43 | } 44 | if( result.executionTime.isDefined ) 45 | { 46 | obj = obj + ("executionTime" -> Json.toJson( result.executionTime.get ) ) 47 | } 48 | 49 | obj 50 | } 51 | } 52 | 53 | private val ID_LENGTH = 24 54 | 55 | def create( 56 | lambdaId : String, 57 | executionId : String, 58 | result : String, 59 | contentType : LambdaContentType, 60 | log : Option[String] = None 61 | ) : ResultDao = { 62 | 63 | val id = SecureIdGenerator.genId62( ID_LENGTH ) 64 | make( 65 | ResultRepository.create( 66 | lambdaId = lambdaId, 67 | executionId = executionId, 68 | executionTime = DateTime.now, 69 | contentType = contentType.name, 70 | result = result, 71 | log = log 72 | ) 73 | ) 74 | } 75 | 76 | def make( lr : ResultRepository ) : ResultDao = { 77 | new ResultDao( 78 | lambdaId = lr.lambdaId, 79 | executionId = lr.executionId, 80 | contentType = LambdaContentType( lr.contentType ), 81 | result = lr.result, 82 | executionTime = Some(lr.executionTime), 83 | log = lr.log 84 | ) 85 | } 86 | 87 | def findAll : Seq[ResultDao] = { 88 | ResultRepository.findAll.map(make(_)) 89 | } 90 | 91 | def delete( id : String ) : Unit = { 92 | ResultRepository.find( id ) match { 93 | case Some(s) => s.destroy 94 | case None => { 95 | throw new Exception( s"Result not found with id $id" ) 96 | } 97 | } 98 | } 99 | 100 | //TODO : this may need to return a list at some point depending on how we handle the metaContext issue 101 | def find( executionId : String )(implicit session : DBSession = AutoSession) : Option[ResultDao] = { 102 | withSQL { 103 | select.from(ResultRepository as rr).where.eq(rr.executionId, executionId) 104 | }.map(ResultRepository(rr)).single.apply().map(make(_)) 105 | } 106 | 107 | def findById( id : String ) : Option[ResultDao] = { 108 | ResultRepository.find(id).map(make(_)) 109 | } 110 | 111 | } 112 | -------------------------------------------------------------------------------- /lambda-io/src/main/scala/com/galacticfog/gestalt/lambda/utils/SecureIdGenerator.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.utils 2 | 3 | import java.security.SecureRandom 4 | 5 | object SecureIdGenerator { 6 | 7 | val random = new SecureRandom() 8 | 9 | val alpha62 = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" 10 | val alpha64 = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789/+" 11 | 12 | def genId62(len: Int) = { 13 | val id = new StringBuilder(len) 14 | (1 to len) foreach(_ => id += alpha62.charAt(random.nextInt(alpha62.size))) 15 | id.toString 16 | } 17 | 18 | def genId64(len: Int) = { 19 | val id = new StringBuilder(len) 20 | (1 to len) foreach(_ => id += alpha64.charAt(random.nextInt(alpha64.size))) 21 | id.toString 22 | } 23 | 24 | def main(args: Array[String]) { 25 | val num = if (args.size > 0) args(0).toInt else 10 26 | for (i <- 1 to num) println(genId62(24)) 27 | } 28 | 29 | } 30 | -------------------------------------------------------------------------------- /lambda-io/src/test/scala/com/galacticfog/gestalt/lambda/io/model/LambdaRepositorySpec.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.io.model 2 | 3 | import scalikejdbc.specs2.mutable.AutoRollback 4 | import org.specs2.mutable._ 5 | import scalikejdbc._ 6 | 7 | 8 | class LambdaRepositorySpec extends Specification { 9 | 10 | "LambdaRepository" should { 11 | 12 | val lr = LambdaRepository.syntax("lr") 13 | 14 | "find by primary keys" in new AutoRollback { 15 | val maybeFound = LambdaRepository.find("MyString") 16 | maybeFound.isDefined should beTrue 17 | } 18 | "find by where clauses" in new AutoRollback { 19 | val maybeFound = LambdaRepository.findBy(sqls.eq(lr.id, "MyString")) 20 | maybeFound.isDefined should beTrue 21 | } 22 | "find all records" in new AutoRollback { 23 | val allResults = LambdaRepository.findAll() 24 | allResults.size should be_>(0) 25 | } 26 | "count all records" in new AutoRollback { 27 | val count = LambdaRepository.countAll() 28 | count should be_>(0L) 29 | } 30 | "find all by where clauses" in new AutoRollback { 31 | val results = LambdaRepository.findAllBy(sqls.eq(lr.id, "MyString")) 32 | results.size should be_>(0) 33 | } 34 | "count by where clauses" in new AutoRollback { 35 | val count = LambdaRepository.countBy(sqls.eq(lr.id, "MyString")) 36 | count should be_>(0L) 37 | } 38 | "create new record" in new AutoRollback { 39 | val created = LambdaRepository.create(id = "MyString", isPublic = false, artifactDescription = "MyString") 40 | created should not beNull 41 | } 42 | "save a record" in new AutoRollback { 43 | val entity = LambdaRepository.findAll().head 44 | // TODO modify something 45 | val modified = entity 46 | val updated = LambdaRepository.save(modified) 47 | updated should not equalTo(entity) 48 | } 49 | "destroy a record" in new AutoRollback { 50 | val entity = LambdaRepository.findAll().head 51 | LambdaRepository.destroy(entity) 52 | val shouldBeNone = LambdaRepository.find("MyString") 53 | shouldBeNone.isDefined should beFalse 54 | } 55 | } 56 | 57 | } 58 | -------------------------------------------------------------------------------- /lambda-io/src/test/scala/com/galacticfog/gestalt/lambda/io/model/ResultRepositorySpec.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.io.model 2 | 3 | import scalikejdbc.specs2.mutable.AutoRollback 4 | import org.specs2.mutable._ 5 | import scalikejdbc._ 6 | import org.joda.time.{DateTime} 7 | 8 | 9 | class ResultRepositorySpec extends Specification { 10 | 11 | "ResultRepository" should { 12 | 13 | val rr = ResultRepository.syntax("rr") 14 | 15 | "find by primary keys" in new AutoRollback { 16 | val maybeFound = ResultRepository.find("MyString") 17 | maybeFound.isDefined should beTrue 18 | } 19 | "find by where clauses" in new AutoRollback { 20 | val maybeFound = ResultRepository.findBy(sqls.eq(rr.executionId, "MyString")) 21 | maybeFound.isDefined should beTrue 22 | } 23 | "find all records" in new AutoRollback { 24 | val allResults = ResultRepository.findAll() 25 | allResults.size should be_>(0) 26 | } 27 | "count all records" in new AutoRollback { 28 | val count = ResultRepository.countAll() 29 | count should be_>(0L) 30 | } 31 | "find all by where clauses" in new AutoRollback { 32 | val results = ResultRepository.findAllBy(sqls.eq(rr.executionId, "MyString")) 33 | results.size should be_>(0) 34 | } 35 | "count by where clauses" in new AutoRollback { 36 | val count = ResultRepository.countBy(sqls.eq(rr.executionId, "MyString")) 37 | count should be_>(0L) 38 | } 39 | "create new record" in new AutoRollback { 40 | val created = ResultRepository.create(executionId = "MyString", lambdaId = "MyString", executionTime = DateTime.now, contentType = "MyString", result = "MyString") 41 | created should not beNull 42 | } 43 | "save a record" in new AutoRollback { 44 | val entity = ResultRepository.findAll().head 45 | // TODO modify something 46 | val modified = entity 47 | val updated = ResultRepository.save(modified) 48 | updated should not equalTo(entity) 49 | } 50 | "destroy a record" in new AutoRollback { 51 | val entity = ResultRepository.findAll().head 52 | ResultRepository.destroy(entity) 53 | val shouldBeNone = ResultRepository.find("MyString") 54 | shouldBeNone.isDefined should beFalse 55 | } 56 | } 57 | 58 | } 59 | -------------------------------------------------------------------------------- /lambda-io/src/test/scala/com/galacticfog/gestalt/vertx/com/galacticfog/gestalt/vertx/io/model/PolicyRepositorySpec.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.vertx.com.galacticfog.gestalt.vertx.io.model 2 | 3 | import scalikejdbc.specs2.mutable.AutoRollback 4 | import org.specs2.mutable._ 5 | import scalikejdbc._ 6 | 7 | 8 | class PolicyRepositorySpec extends Specification { 9 | 10 | "PolicyRepository" should { 11 | 12 | val pr = PolicyRepository.syntax("pr") 13 | 14 | "find by primary keys" in new AutoRollback { 15 | val maybeFound = PolicyRepository.find("MyString") 16 | maybeFound.isDefined should beTrue 17 | } 18 | "find by where clauses" in new AutoRollback { 19 | val maybeFound = PolicyRepository.findBy(sqls.eq(pr.id, "MyString")) 20 | maybeFound.isDefined should beTrue 21 | } 22 | "find all records" in new AutoRollback { 23 | val allResults = PolicyRepository.findAll() 24 | allResults.size should be_>(0) 25 | } 26 | "count all records" in new AutoRollback { 27 | val count = PolicyRepository.countAll() 28 | count should be_>(0L) 29 | } 30 | "find all by where clauses" in new AutoRollback { 31 | val results = PolicyRepository.findAllBy(sqls.eq(pr.id, "MyString")) 32 | results.size should be_>(0) 33 | } 34 | "count by where clauses" in new AutoRollback { 35 | val count = PolicyRepository.countBy(sqls.eq(pr.id, "MyString")) 36 | count should be_>(0L) 37 | } 38 | "create new record" in new AutoRollback { 39 | val created = PolicyRepository.create(id = "MyString", handlerId = "MyString", policyName = "MyString", artifactName = "MyString", eventFilter = "MyString") 40 | created should not beNull 41 | } 42 | "save a record" in new AutoRollback { 43 | val entity = PolicyRepository.findAll().head 44 | // TODO modify something 45 | val modified = entity 46 | val updated = PolicyRepository.save(modified) 47 | updated should not equalTo(entity) 48 | } 49 | "destroy a record" in new AutoRollback { 50 | val entity = PolicyRepository.findAll().head 51 | PolicyRepository.destroy(entity) 52 | val shouldBeNone = PolicyRepository.find("MyString") 53 | shouldBeNone.isDefined should beFalse 54 | } 55 | } 56 | 57 | } 58 | -------------------------------------------------------------------------------- /lambda-io/src/test/scala/com/galacticfog/gestalt/vertx/io/model/LambdaRepositorySpec.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.vertx.io.model 2 | 3 | import com.galacticfog.gestalt.lambda.io.model.LambdaRepository 4 | import scalikejdbc.specs2.mutable.AutoRollback 5 | import org.specs2.mutable._ 6 | import scalikejdbc._ 7 | 8 | 9 | class LambdaRepositorySpec extends Specification { 10 | 11 | "LambdaRepository" should { 12 | 13 | val lr = LambdaRepository.syntax("lr") 14 | 15 | "find by primary keys" in new AutoRollback { 16 | val maybeFound = LambdaRepository.find("MyString") 17 | maybeFound.isDefined should beTrue 18 | } 19 | "find by where clauses" in new AutoRollback { 20 | val maybeFound = LambdaRepository.findBy(sqls.eq(lr.id, "MyString")) 21 | maybeFound.isDefined should beTrue 22 | } 23 | "find all records" in new AutoRollback { 24 | val allResults = LambdaRepository.findAll() 25 | allResults.size should be_>(0) 26 | } 27 | "count all records" in new AutoRollback { 28 | val count = LambdaRepository.countAll() 29 | count should be_>(0L) 30 | } 31 | "find all by where clauses" in new AutoRollback { 32 | val results = LambdaRepository.findAllBy(sqls.eq(lr.id, "MyString")) 33 | results.size should be_>(0) 34 | } 35 | "count by where clauses" in new AutoRollback { 36 | val count = LambdaRepository.countBy(sqls.eq(lr.id, "MyString")) 37 | count should be_>(0L) 38 | } 39 | "create new record" in new AutoRollback { 40 | val created = LambdaRepository.create(id = "MyString", eventFilter = "MyString", artifactDescription = "MyString", metaContext = "MyString") 41 | created should not beNull 42 | } 43 | "save a record" in new AutoRollback { 44 | val entity = LambdaRepository.findAll().head 45 | // TODO modify something 46 | val modified = entity 47 | val updated = LambdaRepository.save(modified) 48 | updated should not equalTo(entity) 49 | } 50 | "destroy a record" in new AutoRollback { 51 | val entity = LambdaRepository.findAll().head 52 | LambdaRepository.destroy(entity) 53 | val shouldBeNone = LambdaRepository.find("MyString") 54 | shouldBeNone.isDefined should beFalse 55 | } 56 | } 57 | 58 | } 59 | -------------------------------------------------------------------------------- /lambda-io/src/test/scala/com/galacticfog/gestalt/vertx/io/model/PolicyRepositorySpec.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.vertx.io.model 2 | 3 | import scalikejdbc.specs2.mutable.AutoRollback 4 | import org.specs2.mutable._ 5 | import scalikejdbc._ 6 | 7 | 8 | class PolicyRepositorySpec extends Specification { 9 | 10 | "PolicyRepository" should { 11 | 12 | val pr = PolicyRepository.syntax("pr") 13 | 14 | "find by primary keys" in new AutoRollback { 15 | val maybeFound = PolicyRepository.find("MyString") 16 | maybeFound.isDefined should beTrue 17 | } 18 | "find by where clauses" in new AutoRollback { 19 | val maybeFound = PolicyRepository.findBy(sqls.eq(pr.id, "MyString")) 20 | maybeFound.isDefined should beTrue 21 | } 22 | "find all records" in new AutoRollback { 23 | val allResults = PolicyRepository.findAll() 24 | allResults.size should be_>(0) 25 | } 26 | "count all records" in new AutoRollback { 27 | val count = PolicyRepository.countAll() 28 | count should be_>(0L) 29 | } 30 | "find all by where clauses" in new AutoRollback { 31 | val results = PolicyRepository.findAllBy(sqls.eq(pr.id, "MyString")) 32 | results.size should be_>(0) 33 | } 34 | "count by where clauses" in new AutoRollback { 35 | val count = PolicyRepository.countBy(sqls.eq(pr.id, "MyString")) 36 | count should be_>(0L) 37 | } 38 | "create new record" in new AutoRollback { 39 | val created = PolicyRepository.create(id = "MyString", handlerId = "MyString", policyName = "MyString", artifactName = "MyString", eventFilter = "MyString", policyTypeId = 1L, timeOut = 1L) 40 | created should not beNull 41 | } 42 | "save a record" in new AutoRollback { 43 | val entity = PolicyRepository.findAll().head 44 | // TODO modify something 45 | val modified = entity 46 | val updated = PolicyRepository.save(modified) 47 | updated should not equalTo(entity) 48 | } 49 | "destroy a record" in new AutoRollback { 50 | val entity = PolicyRepository.findAll().head 51 | PolicyRepository.destroy(entity) 52 | val shouldBeNone = PolicyRepository.find("MyString") 53 | shouldBeNone.isDefined should beFalse 54 | } 55 | } 56 | 57 | } 58 | -------------------------------------------------------------------------------- /lambda-io/src/test/scala/com/galacticfog/gestalt/vertx/io/model/PolicyTypeRepositorySpec.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.vertx.io.model 2 | 3 | import scalikejdbc.specs2.mutable.AutoRollback 4 | import org.specs2.mutable._ 5 | import scalikejdbc._ 6 | 7 | 8 | class PolicyTypeRepositorySpec extends Specification { 9 | 10 | "PolicyTypeRepository" should { 11 | 12 | val ptr = PolicyTypeRepository.syntax("ptr") 13 | 14 | "find by primary keys" in new AutoRollback { 15 | val maybeFound = PolicyTypeRepository.find(1L) 16 | maybeFound.isDefined should beTrue 17 | } 18 | "find by where clauses" in new AutoRollback { 19 | val maybeFound = PolicyTypeRepository.findBy(sqls.eq(ptr.id, 1L)) 20 | maybeFound.isDefined should beTrue 21 | } 22 | "find all records" in new AutoRollback { 23 | val allResults = PolicyTypeRepository.findAll() 24 | allResults.size should be_>(0) 25 | } 26 | "count all records" in new AutoRollback { 27 | val count = PolicyTypeRepository.countAll() 28 | count should be_>(0L) 29 | } 30 | "find all by where clauses" in new AutoRollback { 31 | val results = PolicyTypeRepository.findAllBy(sqls.eq(ptr.id, 1L)) 32 | results.size should be_>(0) 33 | } 34 | "count by where clauses" in new AutoRollback { 35 | val count = PolicyTypeRepository.countBy(sqls.eq(ptr.id, 1L)) 36 | count should be_>(0L) 37 | } 38 | "create new record" in new AutoRollback { 39 | val created = PolicyTypeRepository.create(name = "MyString") 40 | created should not beNull 41 | } 42 | "save a record" in new AutoRollback { 43 | val entity = PolicyTypeRepository.findAll().head 44 | // TODO modify something 45 | val modified = entity 46 | val updated = PolicyTypeRepository.save(modified) 47 | updated should not equalTo(entity) 48 | } 49 | "destroy a record" in new AutoRollback { 50 | val entity = PolicyTypeRepository.findAll().head 51 | PolicyTypeRepository.destroy(entity) 52 | val shouldBeNone = PolicyTypeRepository.find(1L) 53 | shouldBeNone.isDefined should beFalse 54 | } 55 | } 56 | 57 | } 58 | -------------------------------------------------------------------------------- /lambda-io/src/test/scala/com/galacticfog/gestalt/vertx/io/model/VertxRepositorySpec.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.vertx.io.model 2 | 3 | import com.galacticfog.gestalt.lambda.io.model.VertxRepository 4 | import scalikejdbc.specs2.mutable.AutoRollback 5 | import org.specs2.mutable._ 6 | import scalikejdbc._ 7 | 8 | 9 | class VertxRepositorySpec extends Specification { 10 | 11 | "VertxRepository" should { 12 | 13 | val vr = VertxRepository.syntax("vr") 14 | 15 | "find by primary keys" in new AutoRollback { 16 | val maybeFound = VertxRepository.find("MyString") 17 | maybeFound.isDefined should beTrue 18 | } 19 | "find by where clauses" in new AutoRollback { 20 | val maybeFound = VertxRepository.findBy(sqls.eq(vr.id, "MyString")) 21 | maybeFound.isDefined should beTrue 22 | } 23 | "find all records" in new AutoRollback { 24 | val allResults = VertxRepository.findAll() 25 | allResults.size should be_>(0) 26 | } 27 | "count all records" in new AutoRollback { 28 | val count = VertxRepository.countAll() 29 | count should be_>(0L) 30 | } 31 | "find all by where clauses" in new AutoRollback { 32 | val results = VertxRepository.findAllBy(sqls.eq(vr.id, "MyString")) 33 | results.size should be_>(0) 34 | } 35 | "count by where clauses" in new AutoRollback { 36 | val count = VertxRepository.countBy(sqls.eq(vr.id, "MyString")) 37 | count should be_>(0L) 38 | } 39 | "create new record" in new AutoRollback { 40 | val created = VertxRepository.create(id = "MyString", verticleName = "MyString", artifactName = "MyString", eventFilter = "MyString", timeOut = 1L) 41 | created should not beNull 42 | } 43 | "save a record" in new AutoRollback { 44 | val entity = VertxRepository.findAll().head 45 | // TODO modify something 46 | val modified = entity 47 | val updated = VertxRepository.save(modified) 48 | updated should not equalTo(entity) 49 | } 50 | "destroy a record" in new AutoRollback { 51 | val entity = VertxRepository.findAll().head 52 | VertxRepository.destroy(entity) 53 | val shouldBeNone = VertxRepository.find("MyString") 54 | shouldBeNone.isDefined should beFalse 55 | } 56 | } 57 | 58 | } 59 | -------------------------------------------------------------------------------- /lambda-plugin/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | logs 3 | localrc 4 | remoterc 5 | project/project 6 | project/target 7 | mods 8 | target 9 | tmp 10 | change.json 11 | approver.json 12 | .history 13 | *.event 14 | *.json 15 | dist 16 | /.idea 17 | /*.iml 18 | /out 19 | /.idea_modules 20 | /.classpath 21 | /.project 22 | /RUNNING_PID 23 | /.settings 24 | *.jar 25 | /bin/ 26 | -------------------------------------------------------------------------------- /lambda-plugin/README.md: -------------------------------------------------------------------------------- 1 | #com.galacticfog.gestalt.lambda.plugin 2 | 3 | This holds the plugin interface that must be implemented for Gestalt-Lambda 4 | -------------------------------------------------------------------------------- /lambda-plugin/build.sbt: -------------------------------------------------------------------------------- 1 | name := """gestalt-lambda-plugin""" 2 | 3 | organization := "com.galacticfog" 4 | 5 | version := "0.2.1-SNAPSHOT" 6 | 7 | scalaVersion := "2.11.5" 8 | 9 | publishTo := Some("Artifactory Realm" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local/") 10 | 11 | credentials += Credentials(Path.userHome / ".ivy2" / ".credentials") 12 | 13 | resolvers ++= Seq( 14 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local", 15 | "snapshots" at "http://scala-tools.org/repo-snapshots", 16 | "releases" at "http://scala-tools.org/repo-releases") 17 | 18 | credentials ++= { 19 | (for { 20 | realm <- sys.env.get("GESTALT_RESOLVER_REALM") 21 | username <- sys.env.get("GESTALT_RESOLVER_USERNAME") 22 | resolverUrlStr <- sys.env.get("GESTALT_RESOLVER_URL") 23 | resolverUrl <- scala.util.Try{url(resolverUrlStr)}.toOption 24 | password <- sys.env.get("GESTALT_RESOLVER_PASSWORD") 25 | } yield { 26 | Seq(Credentials(realm, resolverUrl.getHost, username, password)) 27 | }) getOrElse(Seq()) 28 | } 29 | 30 | resolvers ++= { 31 | sys.env.get("GESTALT_RESOLVER_URL") map { 32 | url => Seq("gestalt-resolver" at url) 33 | } getOrElse(Seq()) 34 | } 35 | 36 | // 37 | // Adds project name to prompt like in a Play project 38 | // 39 | shellPrompt in ThisBuild := { state => "\033[0;36m" + Project.extract(state).currentRef.project + "\033[0m] " } 40 | 41 | libraryDependencies ++= Seq ( 42 | "com.galacticfog" %% "gestalt-utils" % "0.0.1-SNAPSHOT" withSources(), 43 | "com.galacticfog" %% "gestalt-lambda-io" % "0.2.0-SNAPSHOT" withSources(), 44 | "org.slf4j" % "slf4j-api" % "1.7.10", 45 | "ch.qos.logback" % "logback-classic" % "1.1.2", 46 | "com.typesafe.scala-logging" %% "scala-logging" % "3.1.0" 47 | ) 48 | 49 | -------------------------------------------------------------------------------- /lambda-plugin/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.typesafe.sbteclipse" % "sbteclipse-plugin" % "2.5.0") 2 | 3 | -------------------------------------------------------------------------------- /lambda-plugin/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Connection Pool settings 4 | dev.db.default.poolInitialSize=20 5 | dev.db.default.poolMaxSize=40 6 | dev.db.default.poolConnectionTimeoutMillis=7000 -------------------------------------------------------------------------------- /lambda-plugin/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | true 8 | 10 | 11 | UTF-8 12 | 13 | %-4r %highlight(%-5level) - %msg%n 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /lambda-plugin/src/main/scala/com/galacticfog/gestalt/lambda/plugin/LambdaAdapter.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.plugin 2 | 3 | import com.galacticfog.gestalt.lambda.io.domain.{LambdaResult, LambdaEvent, LambdaDao} 4 | import com.galacticfog.gestalt.utils.servicefactory.GestaltPlugin 5 | 6 | import scala.concurrent.{ExecutionContext, Future} 7 | 8 | trait LambdaAdapter extends GestaltPlugin { 9 | 10 | //the return should be a stringified json payload that will 11 | //contain the adapter specific data for invoking lambdas 12 | def createLambda( data : LambdaDao ) : String 13 | 14 | //this will either complete or throw and exception, no need for a return 15 | def deleteLambda( data : LambdaDao ) : Unit 16 | 17 | def invokeLambda( data : LambdaDao, event : LambdaEvent, env : Future[Map[String,String]], creds : Option[String] = None )(implicit context : ExecutionContext ) : Future[LambdaResult] 18 | 19 | //this means that the environment has changed and the next invocation should repull the environment variables 20 | def invalidateCache( data : LambdaDao ) : Unit 21 | } 22 | -------------------------------------------------------------------------------- /lambda-scheduler/.gitignore: -------------------------------------------------------------------------------- 1 | logs 2 | project/project 3 | project/target 4 | target 5 | tmp 6 | .history 7 | dist 8 | /.idea 9 | /*.iml 10 | /out 11 | /.idea_modules 12 | /.classpath 13 | /.project 14 | /RUNNING_PID 15 | /.settings 16 | -------------------------------------------------------------------------------- /lambda-scheduler/LICENSE: -------------------------------------------------------------------------------- 1 | This software is licensed under the Apache 2 license, quoted below. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this project except in compliance with 4 | the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. 5 | 6 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 7 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific 8 | language governing permissions and limitations under the License. -------------------------------------------------------------------------------- /lambda-scheduler/README: -------------------------------------------------------------------------------- 1 | This is your new Play application 2 | ================================= 3 | 4 | This file will be packaged with your application, when using `activator dist`. 5 | -------------------------------------------------------------------------------- /lambda-scheduler/app/com/galacticfog/lambda/scheduler/Global.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.lambda.scheduler 2 | 3 | import java.util.UUID 4 | 5 | import org.apache.mesos.{Protos, MesosSchedulerDriver} 6 | import org.apache.mesos.Protos.FrameworkInfo 7 | import play.api.{Logger, Play, Application, GlobalSettings} 8 | import scala.collection.mutable 9 | import scala.concurrent.Future 10 | import scala.sys.SystemProperties 11 | import scala.concurrent.duration._ 12 | import Play.current 13 | import play.api.libs.concurrent.Execution.Implicits._ 14 | import scala.util.Try 15 | 16 | case class CmdTask(uuid: UUID, handlerMethod: String, jarUrl: String) 17 | 18 | object Global extends GlobalSettings { 19 | 20 | lazy val driver: Try[MesosSchedulerDriver] = Try { 21 | Logger.info("creating LambdaScheduler") 22 | val scheduler = new LambdaScheduler 23 | 24 | val master = current.configuration.getString("master") getOrElse "localhost:5050" 25 | Logger.info(s"registering with mesos-master: ${master}") 26 | 27 | val schedulerHostname = current.configuration.getString("hostname") getOrElse java.net.InetAddress.getLocalHost.getHostName 28 | Logger.info(s"scheduler on: ${schedulerHostname}") 29 | 30 | val frameworkInfoBuilder = FrameworkInfo.newBuilder() 31 | .setName("gestalt-lambda-scheduler") 32 | .setFailoverTimeout(60.seconds.toMillis) 33 | .setUser("") 34 | .setCheckpoint(true) 35 | .setHostname(schedulerHostname) 36 | 37 | val frameworkInfo = frameworkInfoBuilder.build() 38 | Logger.info(s"scheduler on: ${schedulerHostname}") 39 | val implicitAcknowledgements = false 40 | 41 | new MesosSchedulerDriver( scheduler, frameworkInfo, master, implicitAcknowledgements ) 42 | } 43 | 44 | val taskQueue = new mutable.Queue[CmdTask] 45 | 46 | override def onStart(app: Application): Unit = { 47 | Logger.info("onStart") 48 | driver foreach { d => 49 | Logger.info("starting driver") 50 | Future { d.run } map println 51 | } 52 | } 53 | 54 | override def onStop(app: Application): Unit = { 55 | Logger.info("onStop") 56 | driver foreach { 57 | Logger.info("stopping driver") 58 | _.stop() 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /lambda-scheduler/app/com/galacticfog/lambda/scheduler/TaskUtils.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.lambda.scheduler 2 | 3 | import java.io.File 4 | 5 | import com.google.protobuf.ByteString 6 | import org.apache.mesos.Protos.{ContainerInfo, CommandInfo} 7 | import org.apache.mesos.Protos.ContainerInfo.DockerInfo 8 | import org.apache.mesos._ 9 | 10 | import scala.collection.JavaConverters._ 11 | 12 | /* 13 | A lot of this was ripped off from the scala-based scheduler for RENDLER 14 | They didn't use containers and I haven't tested it yet, so all bets are off on this working as is. 15 | Also, lots of refactoring will be necessary due to the fact that our lambdas 16 | - don't all have the same container (i.e., the same executor) 17 | - don't all have the same CPU and Memory requirement 18 | */ 19 | 20 | trait TaskUtils { 21 | 22 | val TASK_CPUS = 0.1 23 | val TASK_MEM = 32.0 24 | 25 | lazy val echoExecutorContainer = 26 | Protos.CommandInfo.ContainerInfo.newBuilder() 27 | .setImage("galacticfog.artifactoryonline.com/lambda-echo-executor") 28 | .build() 29 | 30 | def makeTaskPrototype(id: String, offer: Protos.Offer): Protos.TaskInfo = 31 | Protos.TaskInfo.newBuilder 32 | .setTaskId(Protos.TaskID.newBuilder.setValue(id)) 33 | .setName("") 34 | .setSlaveId(offer.getSlaveId) 35 | .addAllResources( 36 | Seq( 37 | scalarResource("cpus", TASK_CPUS), 38 | scalarResource("mem", TASK_MEM) 39 | ).asJava 40 | ) 41 | .build 42 | 43 | protected def scalarResource(name: String, value: Double): Protos.Resource = 44 | Protos.Resource.newBuilder 45 | .setType(Protos.Value.Type.SCALAR) 46 | .setName(name) 47 | .setScalar(Protos.Value.Scalar.newBuilder.setValue(value)) 48 | .build 49 | 50 | def echoExecutor(jarUrl: String): Protos.ExecutorInfo = { 51 | val command = Protos.CommandInfo.newBuilder 52 | .setContainer(echoExecutorContainer) 53 | .addAllUris(Seq( 54 | CommandInfo.URI.newBuilder.setValue(jarUrl).build 55 | ).asJava) 56 | Protos.ExecutorInfo.newBuilder 57 | .setExecutorId(Protos.ExecutorID.newBuilder.setValue("crawl-executor")) 58 | .setName("Crawler") 59 | .setCommand(command) 60 | .build 61 | } 62 | 63 | def makeEchoTask(id: String, 64 | handlerMethod: String, 65 | jarUrl: String, 66 | offer: Protos.Offer): Protos.TaskInfo = 67 | makeTaskPrototype(id, offer).toBuilder 68 | .setName(s"echo_$id") 69 | .setExecutor(echoExecutor(jarUrl)) 70 | .setData(ByteString.copyFromUtf8(handlerMethod)) 71 | .build 72 | 73 | def maxTasksForOffer( 74 | offer: Protos.Offer, 75 | cpusPerTask: Double = TASK_CPUS, 76 | memPerTask: Double = TASK_MEM): Int = { 77 | var count = 0 78 | var cpus = 0.0 79 | var mem = 0.0 80 | 81 | for (resource <- offer.getResourcesList.asScala) { 82 | resource.getName match { 83 | case "cpus" => cpus = resource.getScalar.getValue 84 | case "mem" => mem = resource.getScalar.getValue 85 | case _ => () 86 | } 87 | } 88 | 89 | while (cpus >= TASK_CPUS && mem >= TASK_MEM) { 90 | count = count + 1 91 | cpus = cpus - TASK_CPUS 92 | mem = mem - TASK_MEM 93 | } 94 | 95 | count 96 | } 97 | 98 | def isTerminal(state: Protos.TaskState): Boolean = { 99 | import Protos.TaskState._ 100 | state match { 101 | case TASK_FINISHED | TASK_FAILED | TASK_KILLED | TASK_LOST => 102 | true 103 | case _ => 104 | false 105 | } 106 | } 107 | 108 | } 109 | -------------------------------------------------------------------------------- /lambda-scheduler/app/controllers/Application.scala: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import java.util.UUID 4 | 5 | import com.galacticfog.lambda.scheduler.{CmdTask, Global} 6 | import play.api._ 7 | import play.api.mvc._ 8 | 9 | class Application extends Controller { 10 | 11 | def launch() = Action { request => 12 | // currently, this adds a task to the queue 13 | // instead, it needs to Ask some actor to launch the appropriate lambda 14 | Global.taskQueue.enqueue(CmdTask(UUID.randomUUID(),request.body.asText getOrElse "helloWorld","http:://someJar.com/theJar.jar")) 15 | Accepted("") 16 | } 17 | 18 | def health() = Action{ Ok("alive") } 19 | } 20 | -------------------------------------------------------------------------------- /lambda-scheduler/app/views/index.scala.html: -------------------------------------------------------------------------------- 1 | @(message: String) 2 | 3 | @main("Welcome to Play") { 4 | 5 | @play20.welcome(message) 6 | 7 | } 8 | -------------------------------------------------------------------------------- /lambda-scheduler/app/views/main.scala.html: -------------------------------------------------------------------------------- 1 | @(title: String)(content: Html) 2 | 3 | 4 | 5 | 6 | 7 | @title 8 | 9 | 10 | 11 | 12 | 13 | @content 14 | 15 | 16 | -------------------------------------------------------------------------------- /lambda-scheduler/build.sbt: -------------------------------------------------------------------------------- 1 | import com.typesafe.sbt.packager.docker._ 2 | 3 | name := """lambda-scheduler""" 4 | 5 | version := "1.0-SNAPSHOT" 6 | 7 | maintainer in Docker := "Chris Baker " 8 | 9 | dockerUpdateLatest := true 10 | 11 | // dockerRepository := Some("galacticfog.artifactoryonline.com") 12 | 13 | dockerRepository := Some("192.168.200.20:5000") 14 | 15 | lazy val root = (project in file(".")).enablePlugins(PlayScala) 16 | 17 | scalaVersion := "2.11.6" 18 | 19 | resolvers += "Mesosphere Repo" at "http://downloads.mesosphere.io/maven" 20 | 21 | libraryDependencies += "mesosphere" %% "mesos-utils" % "0.26.0" withJavadoc() 22 | 23 | libraryDependencies ++= Seq( 24 | ws, 25 | specs2 % Test 26 | ) 27 | 28 | resolvers += "scalaz-bintray" at "http://dl.bintray.com/scalaz/releases" 29 | 30 | // Play provides two styles of routers, one expects its actions to be injected, the 31 | // other, legacy style, accesses its actions statically. 32 | routesGenerator := InjectedRoutesGenerator 33 | -------------------------------------------------------------------------------- /lambda-scheduler/conf/application.conf: -------------------------------------------------------------------------------- 1 | # This is the main configuration file for the application. 2 | # ~~~~~ 3 | 4 | # Secret key 5 | # ~~~~~ 6 | # The secret key is used to secure cryptographics functions. 7 | # 8 | # This must be changed for production, but we recommend not changing it in this file. 9 | # 10 | # See http://www.playframework.com/documentation/latest/ApplicationSecret for more details. 11 | application.secret="Z]Dr4Z7pV59ga4Zyrrt@P2Lir/vcW3l=0[8yh7>/lc]sk/963k 3 | RUN echo 'deb http://repos.mesosphere.io/ubuntu/ trusty main' > /etc/apt/sources.list.d/mesosphere.list 4 | RUN apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF 5 | RUN apt-get -y update 6 | RUN apt-get -y install mesos=0.28.1-2.0.20.ubuntu1404 7 | -------------------------------------------------------------------------------- /lambda/LICENSE: -------------------------------------------------------------------------------- 1 | This software is licensed under the Apache 2 license, quoted below. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this project except in compliance with 4 | the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. 5 | 6 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 7 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific 8 | language governing permissions and limitations under the License. -------------------------------------------------------------------------------- /lambda/README: -------------------------------------------------------------------------------- 1 | This is your new Play application 2 | ================================= 3 | 4 | This file will be packaged with your application, when using `activator dist`. 5 | -------------------------------------------------------------------------------- /lambda/app/com/galacticfog/gestalt/lambda/Global.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda 2 | 3 | import com.galacticfog.gestalt.lambda.config.DatabaseConfig 4 | import com.galacticfog.gestalt.lambda.io.ScalikePostgresInfo 5 | import org.apache.commons.dbcp.BasicDataSource 6 | import org.flywaydb.core.Flyway 7 | import play.api.Play.current 8 | import play.api.libs.json.{JsSuccess, Json, JsError} 9 | import play.api.{Application, GlobalSettings, Logger => log} 10 | import play.libs.Akka 11 | import scalikejdbc.{GlobalSettings, LoggingSQLAndTimeSettings} 12 | 13 | import scala.util.{Success, Failure} 14 | 15 | object Global extends GlobalSettings { 16 | 17 | GlobalSettings.loggingSQLAndTime = LoggingSQLAndTimeSettings( 18 | enabled = false, 19 | singleLineMode = false, 20 | printUnprocessedStackTrace = false, 21 | stackTraceDepth= 15, 22 | logLevel = 'connection, 23 | warningEnabled = false, 24 | warningThresholdMillis = 3000L, 25 | warningLogLevel = 'warn 26 | ) 27 | 28 | override def onStart( app : Application ) : Unit = { 29 | 30 | val databaseConfig = getDBConfig( "database" ) 31 | val connection = initDB( databaseConfig ) 32 | 33 | //now check if we're doing migration and cleaning 34 | //TODO : FIX 35 | val bClean = current.configuration.getBoolean( "database.clean" ) getOrElse false 36 | val bMigrate = current.configuration.getBoolean( "database.migrate" ) getOrElse false 37 | if( bMigrate ) migrate( connection, bClean, databaseConfig.username, databaseConfig.password ) 38 | 39 | sys.addShutdownHook( akkaShutdown ) 40 | 41 | log.debug( "app started" ) 42 | } 43 | 44 | def akkaShutdown = { 45 | LambdaFramework.shutdown() 46 | Akka.system.shutdown() 47 | Akka.system.awaitTermination() 48 | log.debug( "Akka actorsystem ::shutdown()" ) 49 | } 50 | 51 | override def onStop( app : Application ): Unit = { 52 | akkaShutdown 53 | } 54 | 55 | def getDBConfig( name : String ) : DatabaseConfig = { 56 | log.debug( s"getDBConfig( $name )") 57 | 58 | val hostname = sys.env.getOrElse( "LAMBDA_DATABASE_HOSTNAME", "localhost" ) 59 | val port = sys.env.getOrElse( "LAMBDA_DATABASE_PORT", "5432" ).toInt 60 | val dbName = sys.env.getOrElse( "LAMBDA_DATABASE_NAME", "gestaltlambda" ) 61 | val dbUser = sys.env.getOrElse( "LAMBDA_DATABASE_USER", "gestaltdev" ) 62 | val dbPassword = sys.env.getOrElse( "LAMBDA_DATABASE_PASSWORD", "M8keitw0rk" ) 63 | 64 | println( "Database Connection Info : " ) 65 | println( "\t hostname : " + hostname ) 66 | println( "\t port : " + port ) 67 | println( "\t dbName : " + dbName ) 68 | println( "\t dbUser : " + dbUser ) 69 | //println( "\t dbPass : " + dbPassword ) 70 | 71 | new DatabaseConfig( hostname, port, dbName, dbUser, dbPassword ) 72 | } 73 | 74 | def initDB( dbConfig : DatabaseConfig ) : ScalikePostgresInfo = { 75 | log.debug( "initDB()" ) 76 | new ScalikePostgresInfo( dbConfig.host, dbConfig.port, dbConfig.db_name, dbConfig.username, dbConfig.password ) 77 | } 78 | 79 | def getDataSource( connection : ScalikePostgresInfo ) = { 80 | val ds = new BasicDataSource() 81 | ds.setDriverClassName(connection.driver) 82 | ds.setUsername(connection.username.get) 83 | ds.setPassword(connection.password.get) 84 | ds.setUrl(connection.url()) 85 | log.debug("url: " + ds.getUrl) 86 | ds 87 | } 88 | 89 | def migrate( connection : ScalikePostgresInfo, bClean : Boolean, username : String, password : String ) = { 90 | log.debug( "migrate()" ) 91 | val fly = new Flyway() 92 | val dataSource = getDataSource( connection ) 93 | fly.setDataSource( dataSource ) 94 | if( bClean ) fly.clean() 95 | fly.migrate() 96 | } 97 | 98 | } 99 | 100 | 101 | 102 | -------------------------------------------------------------------------------- /lambda/app/com/galacticfog/gestalt/lambda/actor/EnvironmentActor.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.actor 2 | 3 | import akka.actor.{ActorRef, Props, ActorLogging, Actor} 4 | import akka.event.LoggingReceive 5 | import com.galacticfog.gestalt.lambda.io.domain.LambdaDao 6 | import com.galacticfog.gestalt.lambda.actor.LambdaMessages._ 7 | import com.galacticfog.gestalt.lambda.util.WebClient 8 | import com.galacticfog.gestalt.lambda.utils.SecureIdGenerator 9 | import play.api.Logger 10 | 11 | import scala.concurrent.{Await, Promise} 12 | import scala.concurrent.duration._ 13 | 14 | class EnvironmentActor( id : String ) extends Actor with ActorLogging { 15 | 16 | val ID_LENGTH = 24 17 | val actorMap = scala.collection.mutable.Map[String,ActorRef]() 18 | var count = 0 19 | 20 | def receive = LoggingReceive { handleRequests } 21 | 22 | val handleRequests : Receive = { 23 | 24 | case LookupVariables( lambda, promise ) => { 25 | Logger.debug( s"LookupVariables( ${lambda.id} )" ) 26 | 27 | val id = SecureIdGenerator.genId62( ID_LENGTH ) 28 | val actor = newVariableFetchActor(count, id) 29 | count += 1 30 | actorMap += (id -> actor) 31 | 32 | actor ! LookupVariables( lambda, promise ) 33 | } 34 | 35 | case StopActor( id ) => { 36 | Logger.debug( s"StopActor( ${id} )" ) 37 | val actor = actorMap.get( id ).get 38 | context.system.stop( actor ) 39 | actorMap -= id 40 | } 41 | 42 | } 43 | 44 | def newVariableFetchActor( n : Int, id : String ) = { 45 | Logger.debug( s"newVariableFetchActor(( $n )" ) 46 | context.actorOf( VariableFetchActor.props( id ), name = s"variable-fetch-$n" ) 47 | } 48 | } 49 | 50 | object EnvironmentActor { 51 | def props( id : String ) : Props = Props( new LookupActor( id ) ) 52 | } 53 | 54 | case class MetaClientConfig( protocol : String, host : String, port : Int, user : String, password : String ) 55 | 56 | class VariableFetchActor( id : String ) extends Actor with ActorLogging { 57 | 58 | def receive = LoggingReceive { handleRequests } 59 | 60 | def getClientConfig : MetaClientConfig = { 61 | val protocol = sys.env.getOrElse( "META_PROTOCOL", "http" ) 62 | val host = sys.env.getOrElse( "META_HOSTNAME", "meta.dev2.galacticfog.com" ) 63 | val port = sys.env.getOrElse( "META_PORT", "80" ).toInt 64 | val user = sys.env.getOrElse( "META_USER", "root" ) 65 | val password = sys.env.getOrElse( "META_PASSWORD", "letmein" ) 66 | 67 | new MetaClientConfig( protocol, host, port, user, password ) 68 | } 69 | 70 | val handleRequests : Receive = { 71 | 72 | case LookupVariables( lambda, env ) => { 73 | Logger.debug( s"LookupVariables( ${lambda.id} )" ) 74 | 75 | val promise = env.asInstanceOf[Promise[Map[String,String]]] 76 | 77 | val builder = new (com.ning.http.client.AsyncHttpClientConfig.Builder)() 78 | val client = new play.api.libs.ws.ning.NingWSClient(builder.build()) 79 | val config = getClientConfig 80 | 81 | val wc = new WebClient( client, config.protocol, config.host, config.port, config.user, config.password ) 82 | 83 | try { 84 | 85 | val response = wc.get( s"/lambdas/${lambda.id.get}/env" ) 86 | val result = Await.result( response, 3 seconds ).validate[Map[String,String]].get 87 | 88 | promise.success( result ) 89 | 90 | } catch { 91 | case ex : Exception => { 92 | ex.printStackTrace() 93 | log.debug( "Variables timed out" ) 94 | promise.success( Map[String,String]()) 95 | } 96 | } 97 | 98 | context.parent ! StopActor( id ) 99 | } 100 | } 101 | } 102 | 103 | object VariableFetchActor { 104 | def props( id : String ) : Props = Props( new VariableFetchActor( id ) ) 105 | } 106 | -------------------------------------------------------------------------------- /lambda/app/com/galacticfog/gestalt/lambda/actor/LambdaMessages.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.actor 2 | 3 | import akka.actor.{ActorPath, ActorRef} 4 | import com.galacticfog.gestalt.lambda.io.domain.{LambdaEvent, LambdaDao} 5 | import com.galacticfog.gestalt.lambda.plugin.LambdaAdapter 6 | import scala.concurrent.Promise 7 | import play.api.libs.json.JsValue 8 | 9 | import scala.concurrent.{Future, Promise} 10 | 11 | object LambdaMessages { 12 | 13 | sealed trait LambdaMessage 14 | 15 | case class IncomingEvent( eventFilter : String, event : LambdaEvent, executionId : String ) extends LambdaMessage 16 | case class IncomingInvoke( lambdaId : String, event : LambdaEvent, executionId : String, creds : Option[String] ) extends LambdaMessage 17 | case class IncomingInvokeSync( lambdaId : String, event : LambdaEvent, executionId : String, creds : Option[String] ) extends LambdaMessage 18 | case class LookupLambda( lambdaAdapter : LambdaAdapter, eventName : Option[String], lambdaId : Option[String], event : LambdaEvent, executionId : String, syncActor : Option[ActorRef] = None, creds : Option[String] = None ) extends LambdaMessage 19 | case class LookupVariables( lambda : LambdaDao, env : AnyRef ) extends LambdaMessage 20 | case class InvokeLambda( lambdaAdapter : LambdaAdapter, lambda : LambdaDao, event : LambdaEvent, executionId : String, syncActor : Option[ActorRef] = None, creds : Option[String] = None ) extends LambdaMessage 21 | case class StopActor( id : String ) extends LambdaMessage 22 | 23 | case object LambdaShutdown extends LambdaMessage 24 | 25 | } 26 | -------------------------------------------------------------------------------- /lambda/app/com/galacticfog/gestalt/lambda/actor/LookupActor.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.actor 2 | 3 | import akka.actor.{ActorRef, Props, ActorLogging, Actor} 4 | import akka.event.LoggingReceive 5 | import com.galacticfog.gestalt.lambda.io.domain.LambdaDao 6 | import com.galacticfog.gestalt.lambda.actor.LambdaMessages._ 7 | import play.api.Logger 8 | 9 | class LookupActor( id : String ) extends Actor with ActorLogging { 10 | 11 | def receive = LoggingReceive { handleRequests } 12 | 13 | val handleRequests : Receive = { 14 | 15 | case LookupLambda( lambdaAdapter, eventName, lambdaId, event, executionId, syncActor, creds ) => { 16 | Logger.debug( s"LookupLambda( $eventName )" ) 17 | 18 | //TODO : this should be offloaded to the actor system 19 | val optionLambda = if( lambdaId.isDefined ) LambdaDao.findById( lambdaId.get ) else LambdaDao.find( eventName.get ) 20 | optionLambda match { 21 | case Some(s) => { 22 | 23 | if( syncActor.isDefined ) 24 | { 25 | Logger.debug( s"LookupLambda passing syncActor to Invoke : " + syncActor.get.path.toSerializationFormat ) 26 | } 27 | 28 | //TODO : this should only forward path if defined 29 | context.parent ! InvokeLambda( lambdaAdapter, s, event, executionId, syncActor = syncActor, creds ) 30 | } 31 | case None => { 32 | log.debug( s"no lambda handler found for eventname : ${eventName}" ) 33 | } 34 | } 35 | 36 | Logger.debug( s"Stopping LookupActor( $id )" ) 37 | context.parent ! StopActor( id ) 38 | } 39 | } 40 | } 41 | 42 | object LookupActor { 43 | def props( id : String ) : Props = Props( new LookupActor( id ) ) 44 | } 45 | -------------------------------------------------------------------------------- /lambda/app/com/galacticfog/gestalt/lambda/actor/UnhandledMessageActor.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.actor 2 | 3 | import akka.actor.{Actor, ActorLogging, Props, UnhandledMessage} 4 | import akka.event.LoggingReceive 5 | import play.api.{Logger => log} 6 | 7 | class UnhandledMessageActor extends Actor with ActorLogging { 8 | 9 | def receive = LoggingReceive { handleRequests } 10 | 11 | def handleRequests : Receive = { 12 | case message: UnhandledMessage => { 13 | log.debug(s"CRITICAL! No actors found for message ${message.getMessage}") 14 | } 15 | 16 | /* 17 | if (!Environment.isProduction) { 18 | // Fail fast, fail LOUD 19 | logger.error("Shutting application down") 20 | System.exit(-1) 21 | } 22 | */ 23 | } 24 | } 25 | 26 | object UnhandledMessageActor { 27 | def props() : Props = Props( new UnhandledMessageActor ) 28 | } 29 | 30 | -------------------------------------------------------------------------------- /lambda/app/com/galacticfog/gestalt/lambda/adapters/FakeLambdaAdapter.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.adapters 2 | 3 | import com.galacticfog.gestalt.lambda.io.domain.{LambdaContentType, LambdaResult, LambdaEvent, LambdaDao} 4 | import com.galacticfog.gestalt.lambda.plugin.LambdaAdapter 5 | import play.api.Logger 6 | 7 | import scala.concurrent.{ExecutionContext, Future} 8 | 9 | class FakeLambdaAdapter extends LambdaAdapter { 10 | 11 | def getPluginName : String = "FakeLambdaAdapter" 12 | 13 | def createLambda( data : LambdaDao ) : String = { 14 | Logger.debug("FakeLambdaAdapter::createLambda") 15 | "returned_payload" 16 | } 17 | 18 | def deleteLambda( data : LambdaDao ) : Unit = { 19 | Logger.debug("FakeLambdaAdapter::deleteLambda") 20 | } 21 | 22 | def invalidateCache( data : LambdaDao ) : Unit = { 23 | Logger.debug( "FakeLambdaAdapter::invalidateCache" ) 24 | } 25 | 26 | def invokeLambda( data : LambdaDao, event : LambdaEvent, env : Future[Map[String,String]], creds : Option[String] )(implicit context : ExecutionContext ) : Future[LambdaResult] = { 27 | Logger.debug("FakeLambdaAdapter::invokeLambda") 28 | Future { 29 | new LambdaResult( LambdaContentType.TEXT, "Done" ) 30 | } 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /lambda/app/com/galacticfog/gestalt/lambda/config/DatabaseConfig.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.config 2 | 3 | import play.api.libs.json.Json 4 | 5 | case class 6 | DatabaseConfig( 7 | host : String, 8 | port : Int, 9 | db_name : String, 10 | username : String, 11 | password : String 12 | ) 13 | 14 | object DatabaseConfig { 15 | implicit val dbConfigFormat = Json.format[DatabaseConfig] 16 | } 17 | 18 | -------------------------------------------------------------------------------- /lambda/app/com/galacticfog/gestalt/lambda/util/SecureIdGenerator.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.util 2 | 3 | import java.security.SecureRandom 4 | 5 | object SecureIdGenerator { 6 | 7 | val random = new SecureRandom() 8 | 9 | val alpha62 = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" 10 | val alpha64 = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789/+" 11 | 12 | def genId62(len: Int) = { 13 | val id = new StringBuilder(len) 14 | (1 to len) foreach(_ => id += alpha62.charAt(random.nextInt(alpha62.size))) 15 | id.toString 16 | } 17 | 18 | def genId64(len: Int) = { 19 | val id = new StringBuilder(len) 20 | (1 to len) foreach(_ => id += alpha64.charAt(random.nextInt(alpha64.size))) 21 | id.toString 22 | } 23 | 24 | def main(args: Array[String]) { 25 | val num = if (args.size > 0) args(0).toInt else 10 26 | for (i <- 1 to num) println(genId62(24)) 27 | } 28 | 29 | } 30 | -------------------------------------------------------------------------------- /lambda/app/views/index.scala.html: -------------------------------------------------------------------------------- 1 | @(message: String) 2 | 3 | @main("Welcome to Play") { 4 | 5 | @play20.welcome(message) 6 | 7 | } 8 | -------------------------------------------------------------------------------- /lambda/app/views/main.scala.html: -------------------------------------------------------------------------------- 1 | @(title: String)(content: Html) 2 | 3 | 4 | 5 | 6 | 7 | @title 8 | 9 | 10 | 11 | 12 | 13 | @content 14 | 15 | 16 | -------------------------------------------------------------------------------- /lambda/build.sbt: -------------------------------------------------------------------------------- 1 | name := """gestalt-lambda""" 2 | 3 | version := "1.0.4-SNAPSHOT" 4 | 5 | lazy val root = (project in file(".")).enablePlugins(PlayScala) 6 | 7 | enablePlugins(DockerPlugin) 8 | enablePlugins(NewRelic) 9 | 10 | import com.typesafe.sbt.packager.docker._ 11 | 12 | dockerBaseImage := "galacticfog.artifactoryonline.com/gestalt-mesos-base:0.0.0-1cbe9134" 13 | 14 | maintainer in Docker := "Brad Futch " 15 | 16 | dockerUpdateLatest := true 17 | 18 | dockerExposedPorts in Docker := Seq(9000) 19 | 20 | dockerRepository := Some("galacticfog.artifactoryonline.com") 21 | 22 | scalaVersion := "2.11.7" 23 | 24 | libraryDependencies ++= Seq( 25 | jdbc, 26 | cache, 27 | ws, 28 | "com.amazonaws" % "aws-java-sdk" % "1.10.44", 29 | "com.typesafe.play" %% "play-json" % "2.4.0-M2", 30 | "com.newrelic.agent.java" % "newrelic-api" % "3.29.0", 31 | "com.galacticfog" %% "gestalt-lambda-io" % "0.3.0-SNAPSHOT" withSources(), 32 | "com.galacticfog" %% "gestalt-lambda-plugin" % "0.2.1-SNAPSHOT" withSources(), 33 | "com.galacticfog" %% "gestalt-security-play" % "2.2.3-SNAPSHOT" withSources(), 34 | "com.galacticfog" %% "gestalt-meta-sdk-scala" % "0.3.0-SNAPSHOT" withSources(), 35 | "com.galacticfog" %% "gestalt-utils" % "0.0.1-SNAPSHOT" withSources() 36 | ) 37 | 38 | 39 | resolvers ++= Seq( 40 | "scalaz-bintray" at "http://dl.bintray.com/scalaz/releases", 41 | "snapshots" at "http://scala-tools.org/repo-snapshots", 42 | "releases" at "http://scala-tools.org/repo-releases", 43 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local", 44 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-releases-local" 45 | ) 46 | 47 | import NativePackagerHelper._ 48 | mappings in Universal ++= directory("plugins") 49 | 50 | val mesosLib = new File("/usr/local/lib/libmesos.dylib") 51 | mappings in Universal += mesosLib -> "lib/libmesos.dylib" 52 | 53 | newrelicConfig := (resourceDirectory in Compile).value / "newrelic.yml" 54 | -------------------------------------------------------------------------------- /lambda/conf/application-logger.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | %coloredLevel - %logger - %message%n%xException 6 | 7 | 8 | 9 | lambda.log 10 | 11 | %date %level [%thread] %logger{10} [%file:%line] %msg%n 12 | 13 | 14 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /lambda/conf/application.conf: -------------------------------------------------------------------------------- 1 | # This is the main configuration file for the application. 2 | # ~~~~~ 3 | 4 | #internal-threadpool-size=4 5 | #akka.log-config-on-start = on 6 | 7 | #play { 8 | # akka { 9 | # actor { 10 | # default-dispatcher = { 11 | # fork-join-executor { 12 | # parallelism-factor = 20.0 13 | # parallelism-max = 24 14 | # } 15 | # } 16 | # } 17 | # } 18 | #} 19 | 20 | 21 | # Secret key 22 | # ~~~~~ 23 | # The secret key is used to secure cryptographics functions. 24 | # 25 | # This must be changed for production, but we recommend not changing it in this file. 26 | # 27 | # See http://www.playframework.com/documentation/latest/ApplicationSecret for more details. 28 | play.crypto.secret = "changeme" 29 | 30 | # The application languages 31 | # ~~~~~ 32 | play.i18n.langs = [ "en" ] 33 | 34 | application.global=com.galacticfog.gestalt.lambda.Global 35 | 36 | database.clean = false 37 | database.clean = ${?LAMBDA_FLYWAY_CLEAN} 38 | database.migrate = false 39 | database.migrate = ${?LAMBDA_FLYWAY_MIGRATE} 40 | 41 | # Router 42 | # ~~~~~ 43 | # Define the Router object to use for this application. 44 | # This router will be looked up first when the application is starting up, 45 | # so make sure this is the entry point. 46 | # Furthermore, it's assumed your route file is named properly. 47 | # So for an application router like `my.application.Router`, 48 | # you may need to define a router file `conf/my.application.routes`. 49 | # Default to Routes in the root package (and conf/routes) 50 | # play.http.router = my.application.Routes 51 | 52 | # Database configuration 53 | # ~~~~~ 54 | # You can declare as many datasources as you want. 55 | # By convention, the default datasource is named `default` 56 | # 57 | # db.default.driver=org.h2.Driver 58 | # db.default.url="jdbc:h2:mem:play" 59 | # db.default.username=sa 60 | # db.default.password="" 61 | 62 | # Evolutions 63 | # ~~~~~ 64 | # You can disable evolutions if needed 65 | # play.evolutions.enabled=false 66 | 67 | # You can disable evolutions for a specific datasource if necessary 68 | # play.evolutions.db.default.enabled=false 69 | 70 | #akka 71 | akka.loglevel=DEBUG 72 | akka.debug.receive=ON 73 | -------------------------------------------------------------------------------- /lambda/conf/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | %coloredLevel - %logger - %message%n%xException 8 | 9 | 10 | 11 | 15 | 16 | 17 | 18 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /lambda/conf/routes: -------------------------------------------------------------------------------- 1 | # Routes 2 | # This file defines all application routes (Higher priority routes first) 3 | # ~~~~ 4 | 5 | # Home page 6 | GET /health controllers.Application.getHealth 7 | 8 | 9 | GET /lambdas/:id controllers.Application.getLambda( id : String ) 10 | GET /lambdas controllers.Application.searchLambdas 11 | POST /lambdas controllers.Application.createLambda 12 | PUT /lambdas/:id controllers.Application.updateLambda( id : String ) 13 | DELETE /lambdas/:id controllers.Application.deleteLambda( id : String ) 14 | 15 | POST /lambdas/:id/invoke controllers.Application.invokeLambda( id : String ) 16 | GET /lambdas/:id/invoke controllers.Application.invokeLambdaSyncNoBody( id : String ) 17 | # TODO : browse should not cache this request - 18 | 19 | POST /lambdas/:id/invokeSync controllers.Application.invokeLambdaSync( id : String ) 20 | GET /lambdas/:id/invokeSync controllers.Application.invokeLambdaSyncNoBody( id : String ) 21 | 22 | POST /lambdas/:id/invalidate controllers.Application.invalidateCache( id : String ) 23 | 24 | GET /results/:id controllers.Application.getResult( id : String ) 25 | 26 | # Map static resources from the /public folder to the /assets URL path 27 | GET /assets/*file controllers.Assets.versioned(path="/public", file: Asset) 28 | -------------------------------------------------------------------------------- /lambda/create_test_db.sh: -------------------------------------------------------------------------------- 1 | docker run --name lambdadb -d -p 5432:5432 -e DB_NAME="gestaltlambdatestdb" -e DB_USER=gestaltdev -e DB_PASS=M8keitw0rk galacticfog.artifactoryonline.com/centos7postgresql944:latest 2 | -------------------------------------------------------------------------------- /lambda/deleteAllLambdas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | http lambda.dev.galacticfog.com/lambdas | jq ".[] | .id" | sed -e 's/"//g' | xargs -I '{}' http DELETE lambda.dev.galacticfog.com/lambdas/'{}' 4 | -------------------------------------------------------------------------------- /lambda/docker-build-push.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | # set -x 5 | 6 | 7 | export IMG=galacticfog.artifactoryonline.com/gestalt-lambda 8 | 9 | export SHA=$(git rev-parse --short=8 HEAD) 10 | export VER=$(grep "^version" build.sbt | sed 's/.*:=[ ]*//' | sed 's/"//g') 11 | export TAG=$VER-$SHA 12 | 13 | echo "Building $TAG" 14 | 15 | echo "Creating build image..." 16 | sbt docker:stage 17 | cd target/docker/docker/stage 18 | docker build -t $IMG:$TAG . 19 | echo "Pushing new image to artifactory..." 20 | docker push $IMG:$TAG 21 | docker tag $IMG:$TAG $IMG:$VER 22 | docker push $IMG:$VER 23 | -------------------------------------------------------------------------------- /lambda/gestalt-security.conf: -------------------------------------------------------------------------------- 1 | { 2 | "protocol": "http", 3 | "hostname": "localhost", 4 | "port": 9455, 5 | "apiKey": "root", 6 | "appId": "95bf63d1-f285-4ece-a9ef-34f4999afe65", 7 | "apiSecret": "letmein" 8 | } 9 | -------------------------------------------------------------------------------- /lambda/gestalt.conf: -------------------------------------------------------------------------------- 1 | { 2 | "meta": "http://localhost:14374", 3 | "org": "com.galacticfog", 4 | "id": "316", 5 | "version": "0.1.0-SNAPSHOT", 6 | "node_id": 9, 7 | "env": { 8 | "name": "Module-DEV", 9 | "tag": "DEV" 10 | }, 11 | "secret": "9f57a371065545e993684e2c53070b1f" 12 | } 13 | -------------------------------------------------------------------------------- /lambda/local_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "stream-listener": { 3 | "host": "events.galacticfog.com", 4 | "port": 2181, 5 | "channel": "brad_vatomic/lambda", 6 | "read_from_beginning" : false 7 | }, 8 | "vertx-adapter" : { 9 | "protocol" : "http", 10 | "host" : "localhost", 11 | "port" : 9099, 12 | "username" : "root", 13 | "password" : "letmein" 14 | }, 15 | "database" : { 16 | "host" : "localhost", 17 | "port" : 5432, 18 | "db_name" : "gestaltlambda", 19 | "username" : "gestaltdev", 20 | "password" : "M8keitw0rk" 21 | }, 22 | "gfi-adapter": { 23 | "host": "54.173.172.97", 24 | "port" : 5050, 25 | "role" : "*", 26 | "name" : "prod-lambda-scheduler" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /lambda/localrc: -------------------------------------------------------------------------------- 1 | export LAMBDA_FLYWAY_MIGRATE=true 2 | export LAMBDA_FLYWAY_CLEAN=false 3 | export LAMBDA_DATABASE_HOSTNAME=localhost 4 | export LAMBDA_DATABASE_PORT=5432 5 | export LAMBDA_DATABASE_NAME=gestaltlambda 6 | export LAMBDA_DATABASE_USER=gestaltdev 7 | export LAMBDA_DATABASE_PASSWORD=M8keitw0rk 8 | #export MESOS_MASTER_CONNECTION=zk://ec2-54-173-172-97.compute-1.amazonaws.com/mesos 9 | export MESOS_MASTER_CONNECTION=192.168.200.20:5050 10 | export MESOS_ROLE=* 11 | export SCHEDULER_NAME=lambda-scheduler 12 | 13 | #security config 14 | export GESTALT_SECURITY_PROTOCOL=https 15 | export GESTALT_SECURITY_HOSTNAME=security.test.galacticfog.com 16 | export GESTALT_SECURITY_PORT=9455 17 | export GESTALT_SECURITY_KEY=4eecb4dc-7658-432d-8504-b3fb538d91a3 18 | export GESTALT_SECURITY_SECRET=Rd8MublhWS21f5b2+BkD/yuwGvRNyypcjCDNp55c 19 | 20 | #export JAVA_OPTS=-javaagent:/Users/bradfutch/work/gfi/gestalt-lambda/lambda/newrelic/newrelic.jar 21 | -------------------------------------------------------------------------------- /lambda/payloads/alt.dotnet.create.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventFilter": "com.galacticfog.dotnet.AltHelloWorld", 3 | "artifactDescription": { 4 | "artifactUri": "https://s3.amazonaws.com/gfi.lambdas/alt_hello_world.zip", 5 | "description": "super simple hellow world lambda", 6 | "functionName": "dontmatter", 7 | "handler": "please", 8 | "memorySize": 1024, 9 | "cpus": 0.2, 10 | "publish": false, 11 | "role": "arn:aws:iam::245814043176:role/GFILambda", 12 | "runtime": "dotnet", 13 | "timeoutSecs": 180 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /lambda/payloads/aws.create.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventFilter": "com.test.aws.HelloWorld", 3 | "artifactDescription": { 4 | "s3Bucket": "gfi.lambdas", 5 | "s3Name": "hello_world.zip", 6 | "description": "super simple hellow world lambda", 7 | "functionName": "hello", 8 | "handler": "hello_world.hello", 9 | "memorySize": 128, 10 | "publish": false, 11 | "role": "arn:aws:iam::245814043176:role/GFILambda", 12 | "runtime": "nodejs", 13 | "timeoutSecs": 3 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /lambda/payloads/aws.event.json: -------------------------------------------------------------------------------- 1 | { "eventName" : "com.test.aws.HelloWorld", "data" : { "eventName" : "com.test.aws.HelloWorld", "data" : { "test" : "data" } } } 2 | -------------------------------------------------------------------------------- /lambda/payloads/aws.hello_world.json: -------------------------------------------------------------------------------- 1 | { 2 | "Code": { 3 | "S3Bucket": "gfi.lambdas", 4 | "S3Key": "hello_world.zip" 5 | }, 6 | "Description": "Simple test of AWS Lambda", 7 | "FunctionName": "hello_world", 8 | "Handler": "hello_world.js", 9 | "MemorySize": 128, 10 | "Publish": true, 11 | "Role": "arn:aws:iam::245814043176:role/aws-elasticbeanstalk-ec2-role", 12 | "Runtime": "nodejs", 13 | "Timeout": 3 14 | } 15 | -------------------------------------------------------------------------------- /lambda/payloads/dotnet.call.create.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventFilter": "com.galacticfog.dotnet.Call", 3 | "artifactDescription": { 4 | "artifactUri": "https://s3.amazonaws.com/gfi.lambdas/dotnet_call.zip", 5 | "description": "super simple twilio call lambda for dotnet", 6 | "functionName": "dontmatter", 7 | "handler": "dotnet_call/bin/Debug/dnxcore50/ubuntu1404_x64/publish/dotnet_call", 8 | "memorySize": 512, 9 | "cpus": 0.2, 10 | "publish": false, 11 | "role": "arn:aws:iam::245814043176:role/GFILambda", 12 | "runtime": "dotnet", 13 | "timeoutSecs": 180 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /lambda/payloads/dotnet.create.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventFilter": "com.galacticfog.dotnet.HelloWorld", 3 | "artifactDescription": { 4 | "artifactUri": "https://s3.amazonaws.com/gfi.lambdas/dotnet_hello_world.zip", 5 | "description": "super simple hellow world lambda", 6 | "functionName": "dontmatter", 7 | "handler": "bin/Debug/dnxcore50/ubuntu.14.04-x64/dotnet_call", 8 | "memorySize": 1024, 9 | "cpus": 0.2, 10 | "publish": false, 11 | "role": "arn:aws:iam::245814043176:role/GFILambda", 12 | "runtime": "dotnet", 13 | "timeoutSecs": 180 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /lambda/payloads/event.json: -------------------------------------------------------------------------------- 1 | { "eventName" : "com.vatomic.nascar.TestReactor.8675309", "data" : { "eventName" : "com.vatomic.nascar.TestReactor.8675309", "data" : { "test" : "data" } } } 2 | -------------------------------------------------------------------------------- /lambda/payloads/java.call.create.json: -------------------------------------------------------------------------------- 1 | { 2 | "id" : "6d401a9a-caaf-4001-b020-1a992d6ecdb8", 3 | "eventFilter": "com.test.laser.JavaCallTest", 4 | "artifactDescription": { 5 | "artifactUri": "https://s3.amazonaws.com/gfi.lambdas/java-call-lambda-1.0-SNAPSHOT-fat.jar", 6 | "description": "super simple java twilio call lambda", 7 | "functionName": "call", 8 | "handler": "com.galacticfog.test.JavaTestLambda", 9 | "memorySize": 1024, 10 | "cpus": 0.2, 11 | "publish": false, 12 | "role": "arn:aws:iam::245814043176:role/GFILambda", 13 | "runtime": "java", 14 | "timeoutSecs": 180 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /lambda/payloads/java.create.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventFilter": "com.vatomic.nascar.TestReactor", 3 | "artifactDescription": { 4 | "artifactUri": "https://s3.amazonaws.com/gfi.lambdas/test-reactor-1.0-SNAPSHOT-fat.jar", 5 | "description": "super simple hellow world lambda", 6 | "functionName": "react", 7 | "handler": "com.vatomic.nascar.TestReactor", 8 | "memorySize": 1024, 9 | "cpus": 0.2, 10 | "publish": false, 11 | "role": "arn:aws:iam::245814043176:role/GFILambda", 12 | "runtime": "java", 13 | "timeoutSecs": 180 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /lambda/payloads/java.sleep.create.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventFilter": "com.test.laser.JavaSleepTest", 3 | "id" : "sleep", 4 | "artifactDescription": { 5 | "artifactUri": "https://s3.amazonaws.com/gfi.lambdas/java-sleep-lambda-1.0-SNAPSHOT-fat.jar", 6 | "description": "super simple wimple java sleep lambda", 7 | "functionName": "call", 8 | "handler": "com.galacticfog.test.JavaTestLambda", 9 | "memorySize": 256, 10 | "cpus": 0.2, 11 | "publish": false, 12 | "role": "arn:aws:iam::245814043176:role/GFILambda", 13 | "runtime": "java", 14 | "timeoutSecs": 180 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /lambda/payloads/js.call.create.json: -------------------------------------------------------------------------------- 1 | { 2 | "id" : "78ec41f9-119e-4c6d-bfbf-72808113c4d5", 3 | "eventFilter": "com.test.laser.CallTest", 4 | "artifactDescription": { 5 | "artifactUri": "https://s3.amazonaws.com/gfi.lambdas/js_call.zip", 6 | "description": "super simple twilio call lambda", 7 | "functionName": "call", 8 | "handler": "js_call.js", 9 | "memorySize": 1024, 10 | "cpus": 0.2, 11 | "publish": false, 12 | "role": "arn:aws:iam::245814043176:role/GFILambda", 13 | "runtime": "nodejs", 14 | "timeoutSecs": 180, 15 | "headers" : [ 16 | { "key" : "Accept", 17 | "value" : "text/html" 18 | } 19 | ] 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /lambda/payloads/js.hello_world.create.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventFilter": "com.test.laser.HelloWorld", 3 | "artifactDescription": { 4 | "artifactUri": "https://s3.amazonaws.com/gfi.lambdas/hello_world.zip", 5 | "description": "super simple hellow world lambda", 6 | "functionName": "hello", 7 | "handler": "hello_world.js", 8 | "memorySize": 1024, 9 | "cpus": 0.2, 10 | "publish": false, 11 | "role": "arn:aws:iam::245814043176:role/GFILambda", 12 | "runtime": "nodejs", 13 | "timeoutSecs": 180, 14 | "headers" : [ { "key" : "Accept", "value" : "text/html" } ] 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /lambda/payloads/js.inline.hello.create.json: -------------------------------------------------------------------------------- 1 | { 2 | "id" : "d6483459-ce3e-43ca-8068-83183a42f9ea", 3 | "eventFilter": "FAKE1", 4 | "public" : true, 5 | "artifactDescription": { 6 | "code": "ZnVuY3Rpb24gaGVsbG8oZXZlbnQsIGNvbnRleHQpIHsNCiAgLy8gQ2FsbCB0aGUgY29uc29sZS5sb2cgZnVuY3Rpb24uDQogIGNvbnNvbGUubG9nKCJIZWxsbyBXb3JsZCIpOw0KICB2YXIgZW52ID0gamF2YS5sYW5nLlN5c3RlbS5nZXRlbnYoKTsNCiAgdmFyIHRlc3QgPSBlbnYuZ2V0KCJNRVRBX1RFU1QiKTsNCiAgcmV0dXJuICI8aHRtbD48aGVhZD48L2hlYWQ+PGJvZHk+PGgxPjxjZW50ZXI+SEVMTE8gV09STEQgSU5MSU5FIENPREUhISAtICIgKyB0ZXN0ICsgIiA8aHI+PC9oMT48YnI+PGg0PlNlcnZlcmxlc3Mgd2VicGFnZSEhITwvaDQ+PGJyPjxibGluaz53MDB0PC9ibGluaz48L2NlbnRlcj48L2JvZHk+PC9odG1sPiI7DQp9Ow==", 7 | "description": "super simple twilio call lambda", 8 | "functionName": "hello", 9 | "handler": "hello.js", 10 | "memorySize": 511, 11 | "cpus": 0.2, 12 | "publish": false, 13 | "role": "arn:aws:iam::245814043176:role/GFILambda", 14 | "runtime": "nodejs", 15 | "timeoutSecs": 180, 16 | "headers" : [ 17 | { 18 | "key" : "Accept", 19 | "value" : "text/html" 20 | } 21 | ] 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /lambda/payloads/kill.framework.json: -------------------------------------------------------------------------------- 1 | frameworkId=108df187-4ee6-41fb-9c06-a02cbdbdfd7c-0142 2 | -------------------------------------------------------------------------------- /lambda/payloads/lambda.create.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventFilter" : "lambda.com.vatomic.nascar.TestReactor.8675309", 3 | "artifactDescription" : { 4 | "verticleName": "lambda.com.vatomic.nascar.TestReactor", 5 | "artifactUri": "file:///Users/bradfutch/work/gfi/test-reactor/target/test-reactor-1.0-SNAPSHOT-fat.jar", 6 | "timeOut" : 5000, 7 | "eventFilter": "vertx.vatomic.com.nascar.stickerpack.gift", 8 | "policyConfig": { 9 | "metaConfig" : { 10 | "host" : "localhost", 11 | "port" : 14374, 12 | "protocol" : "http", 13 | "user" : "root", 14 | "password" : "letmein" 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /lambda/payloads/lambda_marathon.json: -------------------------------------------------------------------------------- 1 | { 2 | "id" : "gestalt-lambda", 3 | "args": ["-Dhttp.port=31899", "-J-Xmx1024m"], 4 | "env": { 5 | "GESTALT_VERSION": "1.0", 6 | "GESTALT_SECURITY_APPID": "bd96d05a-7065-4fa2-bea2-98beebe8ebe4", 7 | "GESTALT_ENV": "appliance; DEV", 8 | "GESTALT_SECURITY_PORT": "9455", 9 | "GESTALT_SECURITY_HOSTNAME": "v2.watercoins.io", 10 | "GESTALT_SECURITY_SECRET": "M8keitw0rk", 11 | "MESOS_NATIVE_JAVA_LIBRARY": "/usr/lib/libmesos.so", 12 | "GESTALT_SECURITY_PROTOCOL": "http", 13 | "GESTALT_NODE_ID": "0", 14 | "GESTALT_META": "http://wrong:1234", 15 | "GESTALT_SECURITY_KEY": "admin", 16 | "GESTALT_LOCAL": "/opt/docker/conf/local_config.json", 17 | "GESTALT_SECRET": "secret", 18 | "GESTALT_ID": "bd96d05a-7065-4fa2-bea2-98beebe8ebe4", 19 | "GESTALT_ORG": "com.galacticfog", 20 | "LAMBDA_FLYWAY_MIGRATE": "true", 21 | "CACHE_CHECK_SECONDS" : "30", 22 | "CACHE_EXPIRE_SECONDS" : "900" 23 | }, 24 | "instances": 1, 25 | "cpus": 0.2, 26 | "mem": 1024, 27 | "ports": [ 31899 ], 28 | "requirePorts": true, 29 | "container": { 30 | "type": "DOCKER", 31 | "volumes": [ 32 | { 33 | "containerPath": "/opt/docker/conf/local_config.json", 34 | "hostPath": "/home/centos/LambdaConfig.json", 35 | "mode": "RO" 36 | } 37 | ], 38 | "docker": { 39 | "image": "galacticfog.artifactoryonline.com/gestalt-lambda:1.0-SNAPSHOT-1b8514d4", 40 | "network": "HOST", 41 | "forcePullImage": true 42 | } 43 | }, 44 | "healthChecks": [ 45 | { 46 | "path": "/health", 47 | "protocol": "HTTP", 48 | "portIndex": 0, 49 | "gracePeriodSeconds": 300, 50 | "intervalSeconds": 60, 51 | "timeoutSeconds": 20, 52 | "maxConsecutiveFailures": 3, 53 | "ignoreHttp1xx": false 54 | } 55 | ], 56 | "labels": { 57 | "HAPROXY_0_VHOST": "lambda.galacticfog.com", 58 | "HAPROXY_0_PORT": "7777" 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /lambda/payloads/lambda_marathon_aqr.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "gestalt-framework/lambda", 3 | "args": [ 4 | "-Dhttp.port=31901", 5 | "-Dlogger.file=/opt/docker/conf/logback.xml", 6 | "-J-Xmx1024m" 7 | ], 8 | "env": { 9 | "GESTALT_SECURITY_PROTOCOL": "https", 10 | "GESTALT_SECURITY_PORT": "443", 11 | "GESTALT_SECURITY_HOSTNAME": "security.aqr.galacticfog.com", 12 | "GESTALT_SECURITY_KEY" : "00b80df2-42e7-45c2-b53d-f29bcaa248cd", 13 | "GESTALT_SECURITY_SECRET" : "DkYvvpwNmrCTWX1/XxfeLODexm3BZqW52uqFvZJO", 14 | "MESOS_NATIVE_JAVA_LIBRARY": "/usr/lib/libmesos.so", 15 | "MESOS_NATIVE_LIBRARY": "/usr/lib/libmesos.so", 16 | "LAMBDA_FLYWAY_MIGRATE": "true", 17 | "LAMBDA_FLYWAY_CLEAN": "true", 18 | "LAMBDA_DATABASE_HOSTNAME": "aqr-poc.cocpivpulpxy.us-east-1.rds.amazonaws.com", 19 | "LAMBDA_DATABASE_PORT": "5432", 20 | "LAMBDA_DATABASE_NAME": "lambda", 21 | "LAMBDA_DATABASE_USER": "aqrdev", 22 | "LAMBDA_DATABASE_PASSWORD": "CT2YVFtLStzznj7u3iVx", 23 | "META_PROTOCOL": "http", 24 | "META_HOSTNAME": "10.0.0.3", 25 | "META_PORT": "80", 26 | "META_USER" : "00b80df2-42e7-45c2-b53d-f29bcaa248cd", 27 | "META_PASSWORD" : "DkYvvpwNmrCTWX1/XxfeLODexm3BZqW52uqFvZJO", 28 | "MESOS_MASTER_CONNECTION": "zk://master.mesos:2181/mesos", 29 | "MESOS_ROLE": "*", 30 | "SCHEDULER_NAME": "lambda-aqr-scheduler", 31 | "CACHE_EXPIRE_SECONDS": "900", 32 | "NEW_RELIC_LICENSE_KEY" : "64300aae4a006efc6fa13ab9f88386f186707003", 33 | "OFFER_TTL" : "5", 34 | "MAX_LAMBDAS_PER_OFFER" : "6" 35 | }, 36 | "instances": 1, 37 | "cpus": 0.5, 38 | "mem": 1300, 39 | "ports": [ 40 | 31901 41 | ], 42 | "requirePorts": true, 43 | "container": { 44 | "type": "DOCKER", 45 | "volumes": [], 46 | "docker": { 47 | "image": "galacticfog.artifactoryonline.com/gestalt-lambda:1.0.3-SNAPSHOT-33c6837c", 48 | "network": "HOST", 49 | "forcePullImage": true 50 | } 51 | }, 52 | "healthChecks": [ 53 | { 54 | "path": "/health", 55 | "protocol": "HTTP", 56 | "portIndex": 0, 57 | "gracePeriodSeconds": 300, 58 | "intervalSeconds": 60, 59 | "timeoutSeconds": 20, 60 | "maxConsecutiveFailures": 3, 61 | "ignoreHttp1xx": false 62 | } 63 | ], 64 | "labels": { 65 | "HAPROXY_0_VHOST": "lambda.aqr.galacticfog.com", 66 | "HAPROXY_0_PORT" : "11111", 67 | "HAPROXY_GROUP" : "external" 68 | }, 69 | "portDefinitions": [ 70 | { 71 | "port": 31901, 72 | "protocol": "tcp", 73 | "name": "service", 74 | "labels": { 75 | "VIP_0": "10.0.20.1:80" 76 | } 77 | } 78 | ] 79 | } 80 | -------------------------------------------------------------------------------- /lambda/payloads/lambda_marathon_dev.json: -------------------------------------------------------------------------------- 1 | { 2 | "id" : "dev/gestalt-lambda", 3 | "args": ["-Dhttp.port=31900","-J-Xmx1024m"], 4 | "env": { 5 | "GESTALT_VERSION": "1.0", 6 | "GESTALT_SECURITY_APPID": "bd96d05a-7065-4fa2-bea2-98beebe8ebe4", 7 | "GESTALT_ENV": "appliance; DEV", 8 | "GESTALT_SECURITY_PORT": "9455", 9 | "GESTALT_SECURITY_HOSTNAME": "v2.watercoins.io", 10 | "GESTALT_SECURITY_SECRET": "M8keitw0rk", 11 | "MESOS_NATIVE_JAVA_LIBRARY": "/usr/lib/libmesos.so", 12 | "GESTALT_SECURITY_PROTOCOL": "http", 13 | "GESTALT_NODE_ID": "0", 14 | "GESTALT_META": "http://wrong:1234", 15 | "GESTALT_SECURITY_KEY": "admin", 16 | "GESTALT_LOCAL": "/opt/docker/conf/local_config.json", 17 | "GESTALT_SECRET": "secret", 18 | "GESTALT_ID": "bd96d05a-7065-4fa2-bea2-98beebe8ebe4", 19 | "GESTALT_ORG": "com.galacticfog", 20 | "LAMBDA_FLYWAY_MIGRATE": "true", 21 | "CACHE_CHECK_SECONDS" : "30", 22 | "CACHE_EXPIRE_SECONDS" : "900" 23 | }, 24 | "instances": 1, 25 | "cpus": 0.2, 26 | "mem": 1024, 27 | "ports": [ 31900 ], 28 | "requirePorts": true, 29 | "container": { 30 | "type": "DOCKER", 31 | "volumes": [ 32 | { 33 | "containerPath": "/opt/docker/conf/local_config.json", 34 | "hostPath": "/home/centos/LambdaConfigDev.json", 35 | "mode": "RO" 36 | } 37 | ], 38 | "docker": { 39 | "image": "galacticfog.artifactoryonline.com/gestalt-lambda:latest", 40 | "network": "HOST", 41 | "forcePullImage": true 42 | } 43 | }, 44 | "healthChecks": [ 45 | { 46 | "path": "/health", 47 | "protocol": "HTTP", 48 | "portIndex": 0, 49 | "gracePeriodSeconds": 300, 50 | "intervalSeconds": 60, 51 | "timeoutSeconds": 20, 52 | "maxConsecutiveFailures": 3, 53 | "ignoreHttp1xx": false 54 | } 55 | ], 56 | "labels": { 57 | "HAPROXY_0_VHOST": "lambda.dev.galacticfog.com", 58 | "HAPROXY_0_PORT": "17777" 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /lambda/payloads/lambda_marathon_test.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "gestalt-lambda-test", 3 | "args": [ 4 | "-Dhttp.port=31901", 5 | "-Dlogger.file=/opt/docker/conf/logback.xml", 6 | "-J-Xmx1024m" 7 | ], 8 | "env": { 9 | "GESTALT_SECURITY_PROTOCOL": "https", 10 | "GESTALT_SECURITY_PORT": "9455", 11 | "GESTALT_SECURITY_HOSTNAME": "security.test.galacticfog.com", 12 | "GESTALT_SECURITY_KEY" : "4eecb4dc-7658-432d-8504-b3fb538d91a3", 13 | "GESTALT_SECURITY_SECRET" : "Rd8MublhWS21f5b2+BkD/yuwGvRNyypcjCDNp55c", 14 | "MESOS_NATIVE_JAVA_LIBRARY": "/usr/lib/libmesos.so", 15 | "MESOS_NATIVE_LIBRARY": "/usr/lib/libmesos.so", 16 | "LAMBDA_FLYWAY_MIGRATE": "true", 17 | "LAMBDA_DATABASE_HOSTNAME": "gestalt-dev.crqimf2biim3.us-east-1.rds.amazonaws.com", 18 | "LAMBDA_DATABASE_PORT": "5432", 19 | "LAMBDA_DATABASE_NAME": "gestaltlambdatest", 20 | "LAMBDA_DATABASE_USER": "gestaltdev", 21 | "LAMBDA_DATABASE_PASSWORD": "M8keitw0rk", 22 | "META_PROTOCOL": "https", 23 | "META_HOSTNAME": "meta.test.galacticfog.com", 24 | "META_PORT": "14374", 25 | "META_USER" : "4eecb4dc-7658-432d-8504-b3fb538d91a3", 26 | "META_PASSWORD" : "Rd8MublhWS21f5b2+BkD/yuwGvRNyypcjCDNp55c", 27 | "MESOS_MASTER_CONNECTION": "zk://master.mesos:2181/mesos", 28 | "MESOS_ROLE": "*", 29 | "SCHEDULER_NAME": "lambda-test-scheduler", 30 | "CACHE_CHECK_SECONDS": "30", 31 | "CACHE_EXPIRE_SECONDS": "900", 32 | "NEW_RELIC_LICENSE_KEY" : "64300aae4a006efc6fa13ab9f88386f186707003", 33 | "OFFER_TTL" : "5", 34 | "MAX_LAMBDAS_PER_OFFER" : "6" 35 | }, 36 | "instances": 1, 37 | "cpus": 2.0, 38 | "mem": 1300, 39 | "ports": [ 40 | 31901 41 | ], 42 | "requirePorts": true, 43 | "container": { 44 | "type": "DOCKER", 45 | "volumes": [], 46 | "docker": { 47 | "image": "galacticfog.artifactoryonline.com/gestalt-lambda:1.0.3-SNAPSHOT-3074f26d", 48 | "network": "HOST", 49 | "forcePullImage": true 50 | } 51 | }, 52 | "healthChecks": [ 53 | { 54 | "path": "/health", 55 | "protocol": "HTTP", 56 | "portIndex": 0, 57 | "gracePeriodSeconds": 300, 58 | "intervalSeconds": 60, 59 | "timeoutSeconds": 20, 60 | "maxConsecutiveFailures": 3, 61 | "ignoreHttp1xx": false 62 | } 63 | ], 64 | "labels": { 65 | "HAPROXY_0_VHOST": "lambda.test.galacticfog.com", 66 | "HAPROXY_GROUP": "external" 67 | }, 68 | "portDefinitions": [ 69 | { 70 | "port": 31901, 71 | "protocol": "tcp", 72 | "name": "service", 73 | "labels": { 74 | "VIP_0": "10.0.20.1:80" 75 | } 76 | } 77 | ] 78 | } 79 | -------------------------------------------------------------------------------- /lambda/payloads/laser.event.json: -------------------------------------------------------------------------------- 1 | { "eventName" : "com.test.laser.HelloWorld", "data" : { "eventName" : "com.test.laser.HelloWorld", "data" : { "test" : "data" } } } 2 | -------------------------------------------------------------------------------- /lambda/payloads/remote_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "stream-listener": { 3 | "host": "events.galacticfog.com", 4 | "port": 2181, 5 | "channel": "brad_vatomic/lambda", 6 | "read_from_beginning": false 7 | }, 8 | "vertx-adapter": { 9 | "protocol": "http", 10 | "host": "localhost", 11 | "port": 9099, 12 | "username": "root", 13 | "password": "letmein" 14 | }, 15 | "database": { 16 | "host": "gestalt-dev.crqimf2biim3.us-east-1.rds.amazonaws.com", 17 | "port": 5432, 18 | "db_name": "gestaltlambda", 19 | "username": "gestaltdev", 20 | "password": "M8keitw0rk" 21 | }, 22 | "gfi-adapter": { 23 | "host": "v2.galacticfog.com", 24 | "port": 5050, 25 | "role" : "prod_scheduler", 26 | "name" : "prod-lambda-scheduler" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /lambda/payloads/remote_config_dev.json: -------------------------------------------------------------------------------- 1 | { 2 | "stream-listener": { 3 | "host": "events.galacticfog.com", 4 | "port": 2181, 5 | "channel": "brad_vatomic/lambda", 6 | "read_from_beginning": false 7 | }, 8 | "vertx-adapter": { 9 | "protocol": "http", 10 | "host": "localhost", 11 | "port": 9099, 12 | "username": "root", 13 | "password": "letmein" 14 | }, 15 | "database": { 16 | "host": "gestalt-dev.crqimf2biim3.us-east-1.rds.amazonaws.com", 17 | "port": 5432, 18 | "db_name": "gestaltlambdadev", 19 | "username": "gestaltdev", 20 | "password": "M8keitw0rk" 21 | }, 22 | "gfi-adapter": { 23 | "host": "v2.galacticfog.com", 24 | "port": 5050, 25 | "role" : "dev_scheduler", 26 | "name" : "dev-lambda-scheduler" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /lambda/payloads/sleep.event.json: -------------------------------------------------------------------------------- 1 | { "eventName" : "com.test.laser.HelloWorld", "data" : { "sleepTime" : 30000 } } 2 | -------------------------------------------------------------------------------- /lambda/payloads/telnet_test.json: -------------------------------------------------------------------------------- 1 | { 2 | "id" : "busybox", 3 | "cmd": "echo TEST && telnet gestalt-dev.crqimf2biim3.us-east-1.rds.amazonaws.com 5432", 4 | "env": { 5 | }, 6 | "instances": 1, 7 | "cpus": 0.2, 8 | "mem": 1024, 9 | "ports": [ 31900 ], 10 | "requirePorts": true, 11 | "container": { 12 | "type": "DOCKER", 13 | "volumes": [], 14 | "docker": { 15 | "image": "busybox:latest", 16 | "network": "HOST", 17 | "forcePullImage": true 18 | } 19 | }, 20 | "healthChecks": [], 21 | "labels": {} 22 | } 23 | -------------------------------------------------------------------------------- /lambda/payloads/unreserve.payload.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "cpus", 4 | "type": "SCALAR", 5 | "scalar": { 6 | "value": 1.20000000298023 7 | }, 8 | "role": "test", 9 | "reservation": { 10 | "principal": "foo" 11 | } 12 | }, 13 | { 14 | "name": "mem", 15 | "type": "SCALAR", 16 | "scalar": { 17 | "value": 1239 18 | }, 19 | "role": "test", 20 | "reservation": { 21 | "principal": "foo" 22 | } 23 | } 24 | ] 25 | -------------------------------------------------------------------------------- /lambda/payloads/watercoins_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "stream-listener": { 3 | "host": "v2.watercoins.io", 4 | "port": 2181, 5 | "channel": "brad_vatomic/lambda", 6 | "read_from_beginning" : false 7 | }, 8 | "vertx-adapter" : { 9 | "protocol" : "http", 10 | "host" : "v2.watercoins.io", 11 | "port" : 9099, 12 | "username" : "admin", 13 | "password" : "M8keitw0rk" 14 | }, 15 | "database" : { 16 | "host" : "gestalt.csf5fr9bpgp9.us-east-1.rds.amazonaws.com", 17 | "port" : 5432, 18 | "db_name" : "gestaltlambda", 19 | "username" : "gestaltdev", 20 | "password" : "GF4thewin!" 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /lambda/project/build.properties: -------------------------------------------------------------------------------- 1 | #Activator-generated Properties 2 | #Mon Nov 09 09:10:00 CST 2015 3 | template.uuid=fce2d244-c545-409f-806e-7a0c3681b306 4 | sbt.version=0.13.8 5 | -------------------------------------------------------------------------------- /lambda/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | // The Play plugin 2 | addSbtPlugin("com.typesafe.play" % "sbt-plugin" % "2.3.10") 3 | 4 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.0.2") 5 | 6 | addSbtPlugin("com.gilt.sbt" % "sbt-newrelic" % "0.1.4") 7 | 8 | addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.8.1") 9 | 10 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.1") 11 | -------------------------------------------------------------------------------- /lambda/public/images/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GalacticFog/gestalt-lambda/17429af43ba8b3f1bdfc2e4a2fd79e027f98e216/lambda/public/images/favicon.png -------------------------------------------------------------------------------- /lambda/public/javascripts/hello.js: -------------------------------------------------------------------------------- 1 | if (window.console) { 2 | console.log("Welcome to your Play application's JavaScript!"); 3 | } -------------------------------------------------------------------------------- /lambda/public/stylesheets/main.css: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GalacticFog/gestalt-lambda/17429af43ba8b3f1bdfc2e4a2fd79e027f98e216/lambda/public/stylesheets/main.css -------------------------------------------------------------------------------- /lambda/remote_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "stream-listener": { 3 | "host": "events.galacticfog.com", 4 | "port": 2181, 5 | "channel": "brad_vatomic/lambda", 6 | "read_from_beginning" : false 7 | }, 8 | "vertx-adapter" : { 9 | "protocol" : "http", 10 | "host" : "localhost", 11 | "port" : 9099, 12 | "username" : "root", 13 | "password" : "letmein" 14 | }, 15 | "database" : { 16 | "host" : "gestalt-dev.crqimf2biim3.us-east-1.rds.amazonaws.com", 17 | "port" : 5432, 18 | "db_name" : "gestaltlambdatest", 19 | "username" : "gestaltdev", 20 | "password" : "M8keitw0rk" 21 | }, 22 | "gfi-adapter": { 23 | "host": "54.173.172.97", 24 | "port" : 5050, 25 | "role" : "*", 26 | "name" : "prod-lambda-scheduler" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /lambda/test/ApplicationSpec.scala: -------------------------------------------------------------------------------- 1 | import org.specs2.mutable._ 2 | import org.specs2.runner._ 3 | import org.junit.runner._ 4 | 5 | import play.api.test._ 6 | import play.api.test.Helpers._ 7 | 8 | /** 9 | * Add your spec here. 10 | * You can mock out a whole application including requests, plugins etc. 11 | * For more information, consult the wiki. 12 | */ 13 | @RunWith(classOf[JUnitRunner]) 14 | class ApplicationSpec extends Specification { 15 | 16 | "Application" should { 17 | 18 | "send 404 on a bad request" in new WithApplication{ 19 | route(FakeRequest(GET, "/boum")) must beSome.which (status(_) == NOT_FOUND) 20 | } 21 | 22 | "render the index page" in new WithApplication{ 23 | val home = route(FakeRequest(GET, "/")).get 24 | 25 | status(home) must equalTo(OK) 26 | contentType(home) must beSome.which(_ == "text/html") 27 | contentAsString(home) must contain ("Your new application is ready.") 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /lambda/test/GlobalWithoutMeta.scala: -------------------------------------------------------------------------------- 1 | import com.galacticfog.gestalt.lambda.config.DatabaseConfig 2 | import com.galacticfog.gestalt.lambda.io.ScalikePostgresInfo 3 | import com.galacticfog.gestalt.meta.play.utils.GlobalMeta 4 | import org.apache.commons.dbcp.BasicDataSource 5 | import org.flywaydb.core.Flyway 6 | import play.api.Play.current 7 | import play.api.libs.json.{JsError, JsSuccess, Json} 8 | import play.api.{Application, GlobalSettings, Logger => log} 9 | import play.libs.Akka 10 | import scalikejdbc.{GlobalSettings, LoggingSQLAndTimeSettings} 11 | import scala.collection.JavaConverters._ 12 | 13 | import scala.util.{Failure, Success} 14 | 15 | object GlobalWithoutMeta extends GlobalSettings with GlobalMeta { 16 | 17 | GlobalSettings.loggingSQLAndTime = LoggingSQLAndTimeSettings( 18 | enabled = false, 19 | singleLineMode = false, 20 | printUnprocessedStackTrace = false, 21 | stackTraceDepth= 15, 22 | logLevel = 'connection, 23 | warningEnabled = false, 24 | warningThresholdMillis = 3000L, 25 | warningLogLevel = 'warn 26 | ) 27 | 28 | override def onStart( app : Application ) : Unit = { 29 | 30 | 31 | val databaseConfig = getDBConfig( "database" ) 32 | val connection = initDB( databaseConfig ) 33 | 34 | //now check if we're doing migration and cleaning 35 | val bClean = current.configuration.getBoolean( "database.clean" ) getOrElse false 36 | val bMigrate = current.configuration.getBoolean( "database.migrate" ) getOrElse false 37 | if( bMigrate ) migrate( connection, bClean, databaseConfig.username, databaseConfig.password ) 38 | 39 | sys.addShutdownHook( akkaShutdown ) 40 | 41 | log.debug( "app started" ) 42 | } 43 | 44 | def akkaShutdown = { 45 | } 46 | 47 | override def onStop( app : Application ): Unit = { 48 | akkaShutdown 49 | } 50 | 51 | def getDBConfig( name : String ) : DatabaseConfig = { 52 | log.debug( s"getDBConfig( $name )") 53 | 54 | current.configuration.getObject("database") match { 55 | case None => 56 | throw new RuntimeException("FATAL: Database configuration not found.") 57 | case Some(config) => { 58 | val configMap = config.unwrapped.asScala.toMap 59 | DatabaseConfig( 60 | host = configMap("host").toString, 61 | db_name = configMap("dbname").toString, 62 | port = configMap("port").toString.toInt, 63 | username = configMap("username").toString, 64 | password = configMap("password").toString) 65 | } 66 | } 67 | } 68 | 69 | def initDB( dbConfig : DatabaseConfig ) : ScalikePostgresInfo = { 70 | log.debug( "initDB()" ) 71 | new ScalikePostgresInfo( dbConfig.host, dbConfig.port, dbConfig.db_name, dbConfig.username, dbConfig.password ) 72 | } 73 | 74 | def getDataSource( connection : ScalikePostgresInfo ) = { 75 | val ds = new BasicDataSource() 76 | ds.setDriverClassName(connection.driver) 77 | ds.setUsername(connection.username.get) 78 | ds.setPassword(connection.password.get) 79 | ds.setUrl(connection.url()) 80 | log.debug("url: " + ds.getUrl) 81 | ds 82 | } 83 | 84 | def migrate( connection : ScalikePostgresInfo, bClean : Boolean, username : String, password : String ) = { 85 | log.debug( "migrate()" ) 86 | val fly = new Flyway() 87 | val dataSource = getDataSource( connection ) 88 | fly.setDataSource( dataSource ) 89 | if( bClean ) fly.clean() 90 | fly.migrate() 91 | } 92 | 93 | } 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /lambda/test/IntegrationSpec.scala: -------------------------------------------------------------------------------- 1 | import org.specs2.mutable._ 2 | import org.specs2.runner._ 3 | import org.junit.runner._ 4 | 5 | import play.api.test._ 6 | import play.api.test.Helpers._ 7 | 8 | /** 9 | * add your integration spec here. 10 | * An integration test will fire up a whole play application in a real (or headless) browser 11 | */ 12 | @RunWith(classOf[JUnitRunner]) 13 | class IntegrationSpec extends Specification { 14 | 15 | "Application" should { 16 | 17 | "work from within a browser" in new WithBrowser { 18 | 19 | browser.goTo("http://localhost:" + port) 20 | 21 | browser.pageSource must contain("Your new application is ready.") 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /lambda/test/LambdaServiceSpec.scala: -------------------------------------------------------------------------------- 1 | import java.util.UUID 2 | 3 | import com.galacticfog.gestalt.lambda.io.domain.LambdaDao 4 | import com.galacticfog.gestalt.utils.json.JsonUtils._ 5 | import org.specs2.matcher.Matcher 6 | import play.api.libs.json._ 7 | import play.api.libs.ws.WS 8 | import play.api.test.{TestServer, FakeApplication, PlaySpecification} 9 | 10 | class LambdaServiceSpec extends PlaySpecification { 11 | 12 | val additionalConfig = Map( 13 | "database.host" -> scala.sys.env.getOrElse("TESTDB_HOST","localhost"), 14 | "database.dbname" -> scala.sys.env.getOrElse("TESTDB_DBNAME","gestaltlambdatestdb"), 15 | "database.port" -> scala.sys.env.getOrElse("TESTDB_PORT", "5432").toInt, 16 | "database.username" -> scala.sys.env.getOrElse("TESTDB_USERNAME", "testuser"), 17 | "database.password" -> scala.sys.env.getOrElse("TESTDB_PASSWORD","testpassword"), 18 | "database.migrate" -> true, 19 | "database.clean" -> true, 20 | "database.shutdownAfterMigrate" -> false 21 | ) 22 | println(additionalConfig) 23 | 24 | lazy val fakeApp = FakeApplication(additionalConfiguration = additionalConfig, withGlobal = Some(GlobalWithoutMeta)) 25 | lazy val server = TestServer(port = testServerPort, application = fakeApp) 26 | 27 | stopOnFail 28 | sequential 29 | 30 | step({ 31 | server.start() 32 | }) 33 | 34 | val client = WS.client(fakeApp) 35 | 36 | "Service" should { 37 | 38 | "return OK on /health" in { 39 | await(client.url(s"http://localhost:${testServerPort}/health").get()).status must equalTo(OK) 40 | } 41 | } 42 | 43 | lazy val lambdaId = UUID.randomUUID.toString 44 | lazy val lambdaName = "testLamba" 45 | lazy val lambdaJson = Json.obj( 46 | "id" -> lambdaId, 47 | "eventFilter" -> UUID.randomUUID.toString, 48 | "artifactDescription" -> Json.obj( 49 | "artifactUri" -> "https://s3.amazonaws.com/gfi.lambdas/hello_world.zip", 50 | "description" -> "super simple hellow world lambda", 51 | "functionName" -> "hello", 52 | "handler" -> "hello_world.js", 53 | "memorySize" -> 1024, 54 | "cpus" -> 0.2, 55 | "publish" -> false, 56 | "role" -> "arn:aws:iam::245814043176:role/GFILambda", 57 | "runtime" -> "nodejs", 58 | "timeoutSecs" -> 180 59 | ) 60 | ) 61 | 62 | lazy val lambda = parseAs[LambdaDao]( await( client.url( s"http://localhost:${testServerPort}/lambdas" ).post(lambdaJson) ).json, "Unable to create lambda" ) 63 | 64 | "Lambdas" should { 65 | 66 | "return a list of lambdas on /lambdas" in { 67 | val lambdas = await( client.url( s"http://localhost:${testServerPort}/lambdas" ).get( ) ).json 68 | lambdas.toString.compareTo( "[]" ) must_== 0 69 | } 70 | 71 | "allow creation of a lambda with arbitrary id" in { 72 | lambda.id.get must_== lambdaId 73 | } 74 | } 75 | 76 | "Deletes" should { 77 | 78 | "allow deleting a lambda" in { 79 | await(client.url(s"http://localhost:${testServerPort}/lambdas/${lambdaId}").delete()).status must equalTo(OK) 80 | } 81 | 82 | "return empty list on /lambdas after delete" in { 83 | val lambdas = await(client.url(s"http://localhost:${testServerPort}/lambdas").get()).json 84 | lambdas.toString.compareTo( "[]" ) must_== 0 85 | } 86 | } 87 | 88 | 89 | 90 | 91 | step({ 92 | server.stop() 93 | }) 94 | 95 | } 96 | -------------------------------------------------------------------------------- /mesos_http/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | logs 3 | localrc 4 | remoterc 5 | project/project 6 | project/target 7 | mods 8 | target 9 | tmp 10 | change.json 11 | approver.json 12 | .history 13 | *.event 14 | *.json 15 | dist 16 | /.idea 17 | /*.iml 18 | /out 19 | /.idea_modules 20 | /.classpath 21 | /.project 22 | /RUNNING_PID 23 | /.settings 24 | *.jar 25 | /bin/ 26 | -------------------------------------------------------------------------------- /mesos_http/build.sbt: -------------------------------------------------------------------------------- 1 | name := """gestalt-mesos-http""" 2 | 3 | organization := "com.galacticfog" 4 | 5 | version := "0.0.1-SNAPSHOT" 6 | 7 | scalaVersion := "2.11.6" 8 | 9 | credentials += Credentials(Path.userHome / ".ivy2" / ".credentials") 10 | 11 | publishTo := Some("Artifactory Realm" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local/") 12 | 13 | resolvers ++= Seq( 14 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local", 15 | "snapshots" at "http://scala-tools.org/repo-snapshots", 16 | "releases" at "http://scala-tools.org/repo-releases", 17 | "Akka Snapshot Repository" at "http://repo.akka.io/snapshots/", 18 | "Mesosphere Repo" at "http://downloads.mesosphere.io/maven", 19 | "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository", 20 | "Local Ivy Repository" at "file://"+Path.userHome.absolutePath+"/.ivy2/cache" 21 | ) 22 | 23 | credentials ++= { 24 | (for { 25 | realm <- sys.env.get("GESTALT_RESOLVER_REALM") 26 | username <- sys.env.get("GESTALT_RESOLVER_USERNAME") 27 | resolverUrlStr <- sys.env.get("GESTALT_RESOLVER_URL") 28 | resolverUrl <- scala.util.Try{url(resolverUrlStr)}.toOption 29 | password <- sys.env.get("GESTALT_RESOLVER_PASSWORD") 30 | } yield { 31 | Seq(Credentials(realm, resolverUrl.getHost, username, password)) 32 | }) getOrElse(Seq()) 33 | } 34 | 35 | resolvers ++= { 36 | sys.env.get("GESTALT_RESOLVER_URL") map { 37 | url => Seq("gestalt-resolver" at url) 38 | } getOrElse(Seq()) 39 | } 40 | 41 | // 42 | // Adds project name to prompt like in a Play project 43 | // 44 | shellPrompt in ThisBuild := { state => "\033[0;36m" + Project.extract(state).currentRef.project + "\033[0m] " } 45 | 46 | libraryDependencies ++= Seq ( 47 | "org.slf4j" % "slf4j-api" % "1.7.10", 48 | "ch.qos.logback" % "logback-classic" % "1.1.2", 49 | "com.typesafe.scala-logging" %% "scala-logging" % "3.1.0", 50 | "mesosphere" %% "mesos-utils" % "0.28.0" withJavadoc(), 51 | "com.typesafe.play" %% "play-ws" % "2.3.9", 52 | "com.typesafe.play" %% "play-iteratees" % "2.3.9", 53 | "com.google.protobuf" % "protobuf-java" % "2.6.1" 54 | ) 55 | 56 | assemblyMergeStrategy in assembly := { 57 | case "META-INF/MANIFEST.MF" => MergeStrategy.discard 58 | case _ => MergeStrategy.first 59 | } 60 | -------------------------------------------------------------------------------- /mesos_http/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.typesafe.sbteclipse" % "sbteclipse-plugin" % "2.5.0") 2 | 3 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.1") 4 | 5 | -------------------------------------------------------------------------------- /mesos_http/src/main/resources/META-INF/services/com.galacticfog.gestalt.lambda.plugin.LambdaAdapter: -------------------------------------------------------------------------------- 1 | com.galacticfog.gestalt.lambda.impl.GFILambdaAdapter 2 | -------------------------------------------------------------------------------- /mesos_http/src/main/scala/com/galacticfog/gestalt/mesos/http/GestaltSchedulerDriver.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.impl.http 2 | 3 | import com.galacticfog.gestalt.lambda.impl.http.InternalSchedulerDriver 4 | 5 | import org.apache.mesos.Protos.Credential 6 | import org.apache.mesos.Protos.FrameworkInfo 7 | import org.apache.mesos.{Protos, Scheduler, SchedulerDriver} 8 | 9 | class GestaltSchedulerDriver( scheduler : Scheduler, frameworkInfo : Protos.FrameworkInfo, master : String, implicitAcknowledgement : Boolean = true, credential : Credential = null ) extends 10 | InternalSchedulerDriver( scheduler, frameworkInfo, master, implicitAcknowledgement, credential ) with SchedulerDriver //with Closeable 11 | { 12 | 13 | } 14 | -------------------------------------------------------------------------------- /mesos_http/src/main/scala/com/galacticfog/gestalt/mesos/http/SubscribedResposne.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.impl.http 2 | 3 | case class Subscribed( framework_id : FrameworkID ) 4 | case class SubscribedResponseEnvelope( subscribed : Subscribed, `type` : String ) 5 | -------------------------------------------------------------------------------- /mesos_http/src/main/scala/com/galacticfog/gestalt/mesos/http/package.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.impl 2 | 3 | import org.apache.mesos.Protos 4 | import play.api.libs.json.Json 5 | 6 | package object http { 7 | 8 | implicit val offerIdParser = Json.format[OfferID] 9 | implicit val frameworkIdParser = Json.format[FrameworkID] 10 | implicit val agentIDParser = Json.format[AgentID] 11 | 12 | implicit val scalarParser = Json.format[Scalar] 13 | implicit val rangeParser = Json.format[Range] 14 | implicit val rangeEnvelopeParser = Json.format[RangeEnvelope] 15 | implicit val resourceParser = Json.format[Resource] 16 | 17 | implicit val addressParser = Json.format[Address] 18 | implicit val urlParser = Json.format[URL] 19 | 20 | implicit val offerParser = Json.format[Offer] 21 | implicit val caseParser = Json.format[OfferEnvelope] 22 | implicit val offerEventParser = Json.format[OfferEvent] 23 | 24 | implicit val subscribedParser = Json.format[Subscribed] 25 | implicit val subscribedEnvelopeParser = Json.format[SubscribedResponseEnvelope] 26 | 27 | implicit val envVarParser = Json.format[EnvironmentVariable] 28 | implicit val environmentParser = Json.format[Environment] 29 | 30 | implicit val commandUriParser = Json.format[CommandInfoUri] 31 | implicit val commandInfoParser = Json.format[CommandInfo] 32 | 33 | implicit val parameterParser = Json.format[Parameter] 34 | implicit val portMappingParser = Json.format[PortMapping] 35 | 36 | implicit val mesosAppcParser = Json.format[MesosAppC] 37 | implicit val dockerInfoParser = Json.format[DockerInfo] 38 | implicit val dockerParser = Json.format[Docker] 39 | implicit val imageDockerParser = Json.format[Image] 40 | implicit val mesosInfoParser = Json.format[MesosInfo] 41 | 42 | implicit val volumeParser = Json.format[Volume] 43 | implicit val ipAddressParser = Json.format[IPAddress] 44 | 45 | implicit val networkInfoParser = Json.format[NetworkInfo] 46 | implicit val containerInfoParser = Json.format[ContainerInfo] 47 | 48 | implicit val executorIdParser = Json.format[ExecutorID] 49 | implicit val executorInfoParser = Json.format[ExecutorInfo] 50 | 51 | implicit val taskIDParser = Json.format[TaskID] 52 | implicit val taskInfoParser = Json.format[TaskInfo] 53 | 54 | implicit val launchParser = Json.format[Launch] 55 | implicit val reserveParser = Json.format[Reserve] 56 | implicit val unreserveParser = Json.format[Unreserve] 57 | implicit val createParser = Json.format[Create] 58 | implicit val destroyParser = Json.format[Destroy] 59 | 60 | implicit val operationParser = Json.format[Operation] 61 | 62 | implicit val filterParser = Json.format[Filter] 63 | implicit val acceptParser = Json.format[Accept] 64 | implicit val acceptRequestParser = Json.format[AcceptRequest] 65 | 66 | } 67 | -------------------------------------------------------------------------------- /plugins/lambda-aws-plugin/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | logs 3 | localrc 4 | remoterc 5 | project/project 6 | project/target 7 | mods 8 | target 9 | tmp 10 | change.json 11 | approver.json 12 | .history 13 | *.event 14 | *.json 15 | dist 16 | /.idea 17 | /*.iml 18 | /out 19 | /.idea_modules 20 | /.classpath 21 | /.project 22 | /RUNNING_PID 23 | /.settings 24 | *.jar 25 | /bin/ 26 | -------------------------------------------------------------------------------- /plugins/lambda-aws-plugin/META-INF/services/com.galacticfog.gestalt.lambda.plugin.LambdaAdapter: -------------------------------------------------------------------------------- 1 | com.galacticfog.gestalt.lambda.impl.AWSLambdaAdapter 2 | -------------------------------------------------------------------------------- /plugins/lambda-aws-plugin/README.md: -------------------------------------------------------------------------------- 1 | #com.galacticfog.gestalt.lambda.plugin.AWSAdapter 2 | 3 | This holds the plugin implementation for AWS Lambda Service 4 | -------------------------------------------------------------------------------- /plugins/lambda-aws-plugin/build.sbt: -------------------------------------------------------------------------------- 1 | name := """gestalt-lambda-aws-plugin""" 2 | 3 | organization := "com.galacticfog" 4 | 5 | version := "0.0.1-SNAPSHOT" 6 | 7 | scalaVersion := "2.11.5" 8 | 9 | credentials += Credentials(Path.userHome / ".ivy2" / ".credentials") 10 | 11 | resolvers ++= Seq( 12 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local", 13 | "snapshots" at "http://scala-tools.org/repo-snapshots", 14 | "releases" at "http://scala-tools.org/repo-releases") 15 | 16 | credentials ++= { 17 | (for { 18 | realm <- sys.env.get("GESTALT_RESOLVER_REALM") 19 | username <- sys.env.get("GESTALT_RESOLVER_USERNAME") 20 | resolverUrlStr <- sys.env.get("GESTALT_RESOLVER_URL") 21 | resolverUrl <- scala.util.Try{url(resolverUrlStr)}.toOption 22 | password <- sys.env.get("GESTALT_RESOLVER_PASSWORD") 23 | } yield { 24 | Seq(Credentials(realm, resolverUrl.getHost, username, password)) 25 | }) getOrElse(Seq()) 26 | } 27 | 28 | resolvers ++= { 29 | sys.env.get("GESTALT_RESOLVER_URL") map { 30 | url => Seq("gestalt-resolver" at url) 31 | } getOrElse(Seq()) 32 | } 33 | 34 | // 35 | // Adds project name to prompt like in a Play project 36 | // 37 | shellPrompt in ThisBuild := { state => "\033[0;36m" + Project.extract(state).currentRef.project + "\033[0m] " } 38 | 39 | libraryDependencies ++= Seq ( 40 | "com.amazonaws" % "aws-java-sdk" % "1.10.44", 41 | "com.galacticfog" %% "gestalt-lambda-io" % "0.0.1-SNAPSHOT" withSources(), 42 | "com.galacticfog" %% "gestalt-lambda-plugin" % "0.0.1-SNAPSHOT" withSources(), 43 | "com.galacticfog" %% "gestalt-io" % "1.0.5-SNAPSHOT" withSources(), 44 | "org.slf4j" % "slf4j-api" % "1.7.10", 45 | "ch.qos.logback" % "logback-classic" % "1.1.2", 46 | "com.typesafe.scala-logging" %% "scala-logging" % "3.1.0" 47 | ) 48 | 49 | assemblyMergeStrategy in assembly := { 50 | case "META-INF/MANIFEST.MF" => MergeStrategy.discard 51 | case _ => MergeStrategy.first 52 | } 53 | 54 | 55 | // 56 | // This bit here adds a mapping for the "package" task that will add our necessary ServiceLoader sauce to the JAR 57 | // 58 | 59 | mappings in (Compile, packageBin) <+= baseDirectory map { base => 60 | (base / "META-INF" / "services" / "com.galacticfog.gestalt.lambda.plugin.LambdaAdapter") -> "META-INF/services/com.galacticfog.gestalt.lambda.plugin.LambdaAdapter" 61 | } 62 | -------------------------------------------------------------------------------- /plugins/lambda-aws-plugin/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.typesafe.sbteclipse" % "sbteclipse-plugin" % "2.5.0") 2 | 3 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.1") 4 | 5 | -------------------------------------------------------------------------------- /plugins/lambda-gfi-plugin/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | logs 3 | localrc 4 | remoterc 5 | project/project 6 | project/target 7 | mods 8 | target 9 | tmp 10 | change.json 11 | approver.json 12 | .history 13 | *.event 14 | *.json 15 | dist 16 | /.idea 17 | /*.iml 18 | /out 19 | /.idea_modules 20 | /.classpath 21 | /.project 22 | /RUNNING_PID 23 | /.settings 24 | *.jar 25 | /bin/ 26 | -------------------------------------------------------------------------------- /plugins/lambda-gfi-plugin/README.md: -------------------------------------------------------------------------------- 1 | #com.galacticfog.gestalt.lambda.plugin.GFIAdapter 2 | 3 | This holds the plugin implementation for Gestalt LASER : Lambda Application SErveR 4 | -------------------------------------------------------------------------------- /plugins/lambda-gfi-plugin/build.sbt: -------------------------------------------------------------------------------- 1 | name := """gestalt-lambda-gfi-plugin""" 2 | 3 | organization := "com.galacticfog" 4 | 5 | version := "0.2.8-SNAPSHOT" 6 | 7 | scalaVersion := "2.11.5" 8 | 9 | credentials += Credentials(Path.userHome / ".ivy2" / ".credentials") 10 | 11 | enablePlugins(NewRelic) 12 | 13 | resolvers ++= Seq( 14 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local", 15 | "snapshots" at "http://scala-tools.org/repo-snapshots", 16 | "releases" at "http://scala-tools.org/repo-releases", 17 | "Akka Snapshot Repository" at "http://repo.akka.io/snapshots/", 18 | "Mesosphere Repo" at "http://downloads.mesosphere.io/maven", 19 | "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository" 20 | ) 21 | 22 | credentials ++= { 23 | (for { 24 | realm <- sys.env.get("GESTALT_RESOLVER_REALM") 25 | username <- sys.env.get("GESTALT_RESOLVER_USERNAME") 26 | resolverUrlStr <- sys.env.get("GESTALT_RESOLVER_URL") 27 | resolverUrl <- scala.util.Try{url(resolverUrlStr)}.toOption 28 | password <- sys.env.get("GESTALT_RESOLVER_PASSWORD") 29 | } yield { 30 | Seq(Credentials(realm, resolverUrl.getHost, username, password)) 31 | }) getOrElse(Seq()) 32 | } 33 | 34 | resolvers ++= { 35 | sys.env.get("GESTALT_RESOLVER_URL") map { 36 | url => Seq("gestalt-resolver" at url) 37 | } getOrElse(Seq()) 38 | } 39 | 40 | // 41 | // Adds project name to prompt like in a Play project 42 | // 43 | shellPrompt in ThisBuild := { state => "\033[0;36m" + Project.extract(state).currentRef.project + "\033[0m] " } 44 | 45 | libraryDependencies ++= Seq ( 46 | "com.typesafe.akka" % "akka-actor_2.11" % "2.3.4", 47 | "com.galacticfog" %% "gestalt-lambda-io" % "0.3.0-SNAPSHOT" withSources(), 48 | "com.galacticfog" %% "gestalt-lambda-plugin" % "0.2.1-SNAPSHOT" withSources(), 49 | "com.galacticfog" %% "gestalt-io" % "1.0.5-SNAPSHOT" withSources(), 50 | "org.slf4j" % "slf4j-api" % "1.7.10", 51 | "ch.qos.logback" % "logback-classic" % "1.1.2", 52 | "com.newrelic.agent.java" % "newrelic-api" % "3.29.0", 53 | "com.typesafe.scala-logging" %% "scala-logging" % "3.1.0", 54 | "mesosphere" %% "mesos-utils" % "0.28.0" withJavadoc() 55 | ) 56 | 57 | assemblyMergeStrategy in assembly := { 58 | case "META-INF/MANIFEST.MF" => MergeStrategy.discard 59 | case _ => MergeStrategy.first 60 | } 61 | 62 | 63 | // 64 | // This bit here adds a mapping for the "package" task that will add our necessary ServiceLoader sauce to the JAR 65 | // 66 | 67 | unmanagedResourceDirectories in Compile += { baseDirectory.value / "META-INF/services/com.galacticfog.gestalt.lambda.plugin.LambdaAdapter" } 68 | 69 | mappings in (Compile, packageBin) <+= baseDirectory map { base => 70 | (base / "META-INF" / "services" / "com.galacticfog.gestalt.lambda.plugin.GFIAdapter") -> "META-INF/services/com.galacticfog.gestalt.lambda.plugin.GFIAdapter" 71 | } 72 | 73 | newrelicVersion := "3.29.0" 74 | 75 | newrelicAppName := "Lambda-Test" 76 | -------------------------------------------------------------------------------- /plugins/lambda-gfi-plugin/newrelic/CHANGELOG: -------------------------------------------------------------------------------- 1 | CHANGELOG 2 | ========= 3 | 4 | Please refer to: https://docs.newrelic.com/docs/release-notes/agent-release-notes/java-release-notes 5 | -------------------------------------------------------------------------------- /plugins/lambda-gfi-plugin/newrelic/README.txt: -------------------------------------------------------------------------------- 1 | New Relic Java Agent 2 | -------------------- 3 | New Relic is an application performance monitoring (APM) service that lets 4 | you see and understand application performance metrics in real time so you 5 | can fix problems fast. The Java agent collects system and application 6 | metrics by injecting bytecode into a select set of methods. The metrics are 7 | reported to the New Relic service once a minute. The agent also reports 8 | uncaught exceptions and slow transactions. 9 | 10 | The Java Agent installs into your app server and monitors the performance 11 | of your apps. The agent monitors applications written in Java, Scala, and 12 | other languages that run on the JVM. New Relic provides additional detail 13 | for many common frameworks and libraries. See this page for details: 14 | 15 | http://newrelic.com/java 16 | 17 | 18 | Getting Started 19 | --------------- 20 | If you don't already have a New Relic account, sign up for a free account: 21 | 22 | https://newrelic.com/signup 23 | 24 | When you sign up, you will be provided with a customized zip file that is 25 | configured with your license key. 26 | 27 | Using Java SE 5? You will need a different version of the agent. See below. 28 | 29 | 30 | Installation 31 | ------------ 32 | Complete installation instructions and troubleshooting tips are available 33 | at: 34 | 35 | https://newrelic.com/docs/java/new-relic-for-java 36 | 37 | For most users, the following self-installer instructions apply: 38 | 39 | https://newrelic.com/docs/java/java-agent-self-installer 40 | 41 | Configuration options are available at: 42 | 43 | https://newrelic.com/docs/java/java-agent-configuration 44 | 45 | 46 | Using New Relic 47 | --------------- 48 | Once you have installed the agent and restarted your app server, you can 49 | login to New Relic at 50 | 51 | https://rpm.newrelic.com 52 | 53 | and see your application's performance information. It takes about two 54 | minutes for the application data to show up. By default, your data will 55 | appear under an application named "My Application". You can change this by 56 | updating the app_name setting in newrelic.yml (see below). 57 | 58 | 59 | Agent Files 60 | ----------- 61 | Typically, you will unzip the newrelic files in your app server root. The 62 | layout is: 63 | 64 | newrelic/ 65 | newrelic.jar 66 | newrelic-api.jar 67 | newrelic.yml 68 | logs/ 69 | ... 70 | 71 | The installation process adds a JVM argument 72 | -javaagent:newrelic/newrelic.jar 73 | to your app server startup script. 74 | 75 | The newrelic.yml file provides configuration options. Most of the options 76 | take effect on restart of the app server. NOTE: yml requires exact 77 | whitespace indentation! If the indentation is incorrect, the option may be 78 | ignored. 79 | 80 | The logs directory contains important diagnostic information about the 81 | agent. In particular, view newrelic/logs/newrelic_agent.log if you are 82 | troubleshooting. 83 | 84 | If you use the New Relic API in your code, you will need to include the 85 | newrelic-api.jar file at compile time and add it as a dependency to your 86 | app. Make sure that you use the same version for your API and newrelic.jar. 87 | New Relic publishes to the Maven Central Repository, so you can add a 88 | dependency to newrelic-api in your favorite build tool. 89 | 90 | 91 | Note to Java SE 5 Users 92 | ----------------------- 93 | This version of the agent works with Java SE 6, 7 and 8. At signup or in your 94 | Account Settings page, you have the option to download a version of the 95 | agent that works with Java SE 5. 96 | 97 | 98 | Support 99 | ------- 100 | Email , or visit our support site at 101 | 102 | https://support.newrelic.com 103 | 104 | 105 | Licenses 106 | -------- 107 | See the LICENSE file for New Relic's license terms and the list of 108 | third-party components that are included in the agent. 109 | -------------------------------------------------------------------------------- /plugins/lambda-gfi-plugin/newrelic/nrcerts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GalacticFog/gestalt-lambda/17429af43ba8b3f1bdfc2e4a2fd79e027f98e216/plugins/lambda-gfi-plugin/newrelic/nrcerts -------------------------------------------------------------------------------- /plugins/lambda-gfi-plugin/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.typesafe.play" % "sbt-plugin" % "2.3.8") 2 | 3 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.0.2") 4 | 5 | addSbtPlugin("com.typesafe.sbteclipse" % "sbteclipse-plugin" % "2.5.0") 6 | 7 | addSbtPlugin("com.gilt.sbt" % "sbt-newrelic" % "0.1.4") 8 | 9 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.1") 10 | 11 | -------------------------------------------------------------------------------- /plugins/lambda-gfi-plugin/src/main/resources/META-INF/services/com.galacticfog.gestalt.lambda.plugin.LambdaAdapter: -------------------------------------------------------------------------------- 1 | com.galacticfog.gestalt.lambda.impl.GFILambdaAdapter 2 | -------------------------------------------------------------------------------- /plugins/lambda-gfi-plugin/src/main/scala/com/galacticfog/gestalt/lambda/impl/EnvironmentCache.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.impl 2 | 3 | import java.util.concurrent.TimeoutException 4 | 5 | import org.apache.mesos.Protos 6 | import org.joda.time.DateTime 7 | import scala.collection.mutable 8 | import scala.concurrent.{Await, Future} 9 | import scala.concurrent.duration._ 10 | 11 | case class EnvironmentCacheEntry( lambdaId : String, env : Protos.Environment, queuedTime : DateTime = DateTime.now ) 12 | class EnvironmentCache { 13 | 14 | val cache : mutable.Map[String, EnvironmentCacheEntry] = mutable.Map[String, EnvironmentCacheEntry]() 15 | val EXPIRATION_SECONDS = sys.env.getOrElse( "ENV_CACHE_EXPIRATION_SECONDS", "900" ).toInt 16 | 17 | def getEnvironment( lambdaId : String, env : Future[Map[String,String]] ) : Protos.Environment = { 18 | 19 | val cacheEntry = cache.get( lambdaId ) 20 | 21 | if( !cacheEntry.isDefined || cacheEntry.get.queuedTime.plusSeconds( EXPIRATION_SECONDS ).isBeforeNow ) { 22 | //wait for the future 23 | try { 24 | val result = Await.result( env, 5 seconds ) 25 | val builder = Protos.Environment.newBuilder 26 | result.foreach{ entry => 27 | builder.addVariables( Protos.Environment.Variable.newBuilder 28 | .setName( entry._1 ) 29 | .setValue( entry._2 ) 30 | ) 31 | } 32 | 33 | val newEnv = builder.build 34 | cache( lambdaId ) = new EnvironmentCacheEntry( lambdaId, newEnv ) 35 | newEnv 36 | } 37 | catch { 38 | case ex : TimeoutException => { 39 | println( "TIMEOUT" ) 40 | Protos.Environment.newBuilder.build 41 | } 42 | } 43 | } 44 | else { 45 | cache( lambdaId ).env 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /plugins/lambda-gfi-plugin/src/main/scala/com/galacticfog/gestalt/lambda/impl/ExecutorCache.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.impl 2 | 3 | import java.util.UUID 4 | 5 | import org.apache.mesos.{SchedulerDriver, Protos} 6 | import org.joda.time.DateTime 7 | import org.slf4j.LoggerFactory 8 | 9 | case class CacheStatus( 10 | executorId : String, 11 | lastRan : DateTime = DateTime.now, 12 | taskId : Option[Protos.TaskID] = None, 13 | bDone : Boolean = false, 14 | bRemove : Boolean = false, 15 | driver : Option[SchedulerDriver] = None, 16 | executor : Option[Protos.ExecutorID] = None, 17 | slaveId : Option[Protos.SlaveID] = None, 18 | environment : Option[Protos.Environment] = None 19 | ) 20 | 21 | class ExecutorCache { 22 | 23 | val log = LoggerFactory.getLogger( ExecutorCache.this.getClass ) 24 | 25 | val cache = scala.collection.mutable.Map[String, scala.collection.mutable.Map[String, CacheStatus]]() 26 | 27 | def debugCache() : Unit = { 28 | 29 | val buf = new StringBuilder 30 | cache.foreach { top => 31 | buf ++= ( "\ncache [" + top._1 + "]: \n" ) 32 | top._2.foreach { entry => 33 | buf ++= ( "\tentry [" + entry._1 + "]: bDone [" + entry._2.bDone + "], lastRan [" + entry._2.lastRan.toLocalDateTime.toString + "]\n" ) 34 | } 35 | } 36 | 37 | log.debug( buf.toString ) 38 | } 39 | 40 | def remove( lambdaId : String, lambdaExecId : String ) : Unit = { 41 | val entryMap = cache.get( lambdaId ) getOrElse ??? 42 | entryMap -= lambdaExecId 43 | 44 | if( entryMap.size == 0 ) 45 | { 46 | cache -= lambdaId 47 | } 48 | } 49 | 50 | //TODO : validate that this actually does the thing 51 | def invalidateCache( lambdaId : String ) : Unit = { 52 | val entry = cache.get( lambdaId ) 53 | if( entry.isDefined ) 54 | { 55 | val keySet = entry.get.keys 56 | keySet.foreach{ key => 57 | entry.get(key) = entry.get(key).copy( bRemove = true ) 58 | } 59 | } 60 | } 61 | 62 | def filterExpired( seconds : Int ) : scala.collection.mutable.Map[ String, scala.collection.mutable.Map[String, CacheStatus]] = { 63 | 64 | cache.map{ entry => { 65 | val oldies = entry._2.filter{ item => 66 | (item._2.bRemove || 67 | (item._2.bDone && item._2.lastRan.plusSeconds( seconds ).isBeforeNow)) 68 | } 69 | ( entry._1, oldies ) 70 | } 71 | } 72 | } 73 | 74 | def getAvailableExecutor( lambdaId : String ) : Option[(String, CacheStatus)] = { 75 | cache.get( lambdaId ).flatMap{ entry => 76 | entry.find( item => item._2.bDone && !item._2.bRemove ) 77 | } 78 | } 79 | 80 | def getOrCreateCacheStatus( lambdaId : String, uuid : String ) : CacheStatus = { 81 | 82 | val available = getAvailableExecutor( lambdaId ) 83 | if( !available.isDefined ) 84 | { 85 | if( cache.contains( lambdaId ) ) 86 | cache( lambdaId ) ++= scala.collection.mutable.Map( uuid -> new CacheStatus( uuid )) 87 | else 88 | cache( lambdaId ) = scala.collection.mutable.Map( uuid -> new CacheStatus( uuid )) 89 | get( lambdaId, uuid ).get 90 | } 91 | else { 92 | log.debug( "found available cache entry : " + available.get._1 ) 93 | available.get._2 94 | } 95 | } 96 | 97 | def get( lambdaId : String, uuid : String ) : Option[CacheStatus] = { 98 | cache.get( lambdaId ).flatMap( _.get( uuid ) ) 99 | } 100 | 101 | /* 102 | The scala compiler is nice and allows us to create this update method that will allow syntax 103 | like this : 104 | 105 | val executionCache : ExecutionCache = ... 106 | val status : CacheStatus = ... 107 | 108 | executionCache( lambdaId, uuid ) = status 109 | 110 | which makes this looke like a regular map update statement, which is nice. 111 | 112 | */ 113 | 114 | def update( lambdaId : String, uuid : String, status : CacheStatus ) = { 115 | if( !cache.contains( lambdaId ) ) ??? 116 | cache.get( lambdaId ).get( uuid ) = status 117 | } 118 | 119 | } 120 | -------------------------------------------------------------------------------- /plugins/lambda-gfi-plugin/src/main/scala/com/galacticfog/gestalt/lambda/impl/actor/GFIMessages.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.impl.actor 2 | 3 | import akka.actor.ActorRef 4 | import com.galacticfog.gestalt.lambda.impl.GFILambdaInfo 5 | import com.galacticfog.gestalt.lambda.io.domain.{LambdaDao, LambdaEvent} 6 | import org.apache.mesos.Protos 7 | 8 | import scala.concurrent.Future 9 | 10 | object GFIMessages { 11 | 12 | sealed trait GFIMessage 13 | 14 | case class InvokeLambda( 15 | lambda : GFILambdaInfo, 16 | event : LambdaEvent, 17 | uuid : String, 18 | env : Future[Map[String,String]], 19 | senderActor : Option[ActorRef] = None, 20 | creds : Option[String] = None ) extends GFIMessage 21 | 22 | case class TimeoutLambda( uuid : String ) extends GFIMessage 23 | case class InvalidateCache( lambda : LambdaDao ) extends GFIMessage 24 | case object CheckCache extends GFIMessage 25 | 26 | 27 | case class QueueLambda( lambda : GFILambdaInfo, event : LambdaEvent, uuid : String, env : Future[Map[String,String]], senderActor : ActorRef, creds : Option[String] ) extends GFIMessage 28 | case class MatchOffer( offer : OfferMatch ) extends GFIMessage 29 | case class LaunchTasks( offer : OfferMatch ) extends GFIMessage 30 | case object RequestOffer extends GFIMessage 31 | case class RemoveOffer( offer : Protos.Offer ) extends GFIMessage 32 | case class TimeoutOffer( offer : Protos.Offer ) extends GFIMessage 33 | 34 | 35 | case class IncomingOffer( offer : Protos.Offer ) extends GFIMessage 36 | case class IncomingOffers( offers : Seq[Protos.Offer] ) extends GFIMessage 37 | case class RejectOffer( offer : Protos.Offer ) extends GFIMessage 38 | case object CheckOffers extends GFIMessage 39 | 40 | 41 | case object TestMessage extends GFIMessage 42 | case object ShutdownScheduler extends GFIMessage 43 | 44 | } 45 | -------------------------------------------------------------------------------- /plugins/lambda-gfi-plugin/src/main/scala/com/galacticfog/gestalt/lambda/impl/actor/OfferActor.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.impl.actor 2 | 3 | import akka.actor.{Props, ActorRef, ActorLogging, Actor} 4 | import akka.event.LoggingReceive 5 | import com.galacticfog.gestalt.lambda.impl.actor.GFIMessages._ 6 | import org.apache.mesos.{MesosSchedulerDriver, Protos} 7 | import org.apache.mesos.Protos.Offer 8 | import org.joda.time.DateTime 9 | import org.slf4j.LoggerFactory 10 | import scala.concurrent.duration._ 11 | import scala.concurrent.ExecutionContext.Implicits.global 12 | 13 | import scala.collection.mutable 14 | 15 | class OfferActor( driver : MesosSchedulerDriver, taskActor : ActorRef ) extends Actor with ActorLogging { 16 | 17 | val logger = LoggerFactory.getLogger( getClass ) 18 | 19 | case class QueuedOffer( 20 | offer : Protos.Offer, 21 | queueTime : DateTime = DateTime.now, 22 | bOffered : Boolean = false 23 | ) 24 | 25 | val TTL = sys.env.getOrElse( "OFFER_TTL", "3" ).toInt 26 | 27 | val offerMap : mutable.HashMap[Protos.OfferID, QueuedOffer] = new mutable.HashMap[Protos.OfferID, QueuedOffer]() 28 | 29 | def receive = LoggingReceive { handleRequests } 30 | 31 | override def preStart = { 32 | } 33 | 34 | def handleRequests : Receive = { 35 | 36 | case IncomingOffers( offers : Seq[Protos.Offer] ) => { 37 | logger.trace( s"IncomingOffers(${offers.size})" ) 38 | offers.foreach{ offer => 39 | self ! IncomingOffer( offer ) 40 | } 41 | } 42 | case IncomingOffer( offer ) => { 43 | logger.trace( s"IncomingOffer[${offer.getId.getValue}]") 44 | 45 | //schedule a timeout for TTL seconds later 46 | context.system.scheduler.scheduleOnce( TTL seconds, self, TimeoutOffer( offer ) ) 47 | 48 | //TODO : do something more interesting 49 | taskActor ! IncomingOffer( offer ) 50 | } 51 | 52 | case RemoveOffer( offer ) => { 53 | logger.trace( s"RemoveOffer[${offer.getId.getValue}]") 54 | offerMap -= offer.getId 55 | } 56 | 57 | case RejectOffer( offer ) => { 58 | logger.trace( s"RejectOffer[${offer.getId.getValue}]") 59 | 60 | val q = offerMap.get( offer.getId ) 61 | 62 | if( q.isDefined ) { 63 | 64 | if ( !q.get.bOffered && q.get.queueTime.plusSeconds( TTL ).isBeforeNow ) { 65 | self ! RemoveOffer( q.get.offer ) 66 | logger.trace( s"[${q.get.offer.getId.getValue}] - TTL expired, declining...") 67 | driver.declineOffer( offer.getId ) 68 | } 69 | offerMap.update( offer.getId, q.get.copy( bOffered = false ) ) 70 | 71 | } 72 | else 73 | { 74 | logger.trace( s"[${offer.getId.getValue}] - queue offer for " + TTL + " seconds...") 75 | offerMap += (offer.getId -> new QueuedOffer( offer )) 76 | } 77 | } 78 | 79 | case RequestOffer => { 80 | logger.trace( "RequestOffer()" ) 81 | 82 | offerMap.foreach { o => 83 | if( !o._2.bOffered ) { 84 | logger.trace( s"cache hit [${o._2.offer.getId.getValue}]" ) 85 | offerMap.update( o._1, o._2.copy( bOffered = true ) ) 86 | taskActor ! IncomingOffer( o._2.offer ) 87 | } 88 | } 89 | 90 | } 91 | 92 | case TimeoutOffer( offer ) => { 93 | logger.trace( s"TimeOutOffer(${offer.getId.getValue})" ) 94 | 95 | if( offerMap.get( offer.getId ).isDefined && !offerMap.get( offer.getId ).get.bOffered ) 96 | { 97 | logger.trace( s"removing timed out offer [${offer.getId.getValue}]") 98 | driver.declineOffer( offer.getId ) 99 | offerMap -= offer.getId 100 | } 101 | } 102 | } 103 | } 104 | 105 | object OfferActor { 106 | def props( driver: MesosSchedulerDriver, taskActor: ActorRef ) : Props = { 107 | Props( new OfferActor( driver, taskActor ) ) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | logs 3 | localrc 4 | remoterc 5 | project/project 6 | project/target 7 | mods 8 | target 9 | tmp 10 | change.json 11 | approver.json 12 | .history 13 | *.event 14 | *.json 15 | dist 16 | /.idea 17 | /*.iml 18 | /out 19 | /.idea_modules 20 | /.classpath 21 | /.project 22 | /RUNNING_PID 23 | /.settings 24 | *.jar 25 | /bin/ 26 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/Dockerfile: -------------------------------------------------------------------------------- 1 | # Extend vert.x image 2 | FROM ollin/vertx:latest 3 | 4 | # Set the name of the verticle to deploy 5 | ENV VERTICLE_NAME Server.scala 6 | 7 | # Set the location of the verticles 8 | ENV VERTICLE_HOME /usr/verticles 9 | 10 | EXPOSE 1234 11 | 12 | # Copy your verticle to the container 13 | COPY $VERTICLE_NAME $VERTICLE_HOME/ 14 | 15 | # Launch the verticle 16 | WORKDIR $VERTICLE_HOME 17 | ENTRYPOINT ["sh", "-c"] 18 | CMD ["vertx run $VERTICLE_NAME -cp $VERTICLE_HOME/*"] 19 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/README: -------------------------------------------------------------------------------- 1 | This is your new Play application 2 | ================================= 3 | 4 | This file will be packaged with your application, when using `activator dist`. 5 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/build.sbt: -------------------------------------------------------------------------------- 1 | name := """gestalt-vertx""" 2 | 3 | version := "1.0.0-SNAPSHOT" 4 | 5 | scalaVersion := "2.11.5" 6 | 7 | libraryDependencies ++= Seq( 8 | "com.fasterxml.jackson.core" % "jackson-core" % "2.2.2", 9 | "com.fasterxml.jackson.core" % "jackson-annotations" % "2.2.2", 10 | "com.fasterxml.jackson.core" % "jackson-databind" % "2.2.2", 11 | "io.vertx" % "vertx-core" % "3.1.0", 12 | "com.galacticfog" %% "gestalt-lambda-plugin" % "0.0.1-SNAPSHOT" withSources() 13 | ) 14 | 15 | resolvers ++= Seq( 16 | "scalaz-bintray" at "http://dl.bintray.com/scalaz/releases", 17 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local", 18 | "Atlassian Releases" at "https://maven.atlassian.com/public/" 19 | ) 20 | 21 | credentials ++= { 22 | (for { 23 | realm <- sys.env.get("GESTALT_RESOLVER_REALM") 24 | username <- sys.env.get("GESTALT_RESOLVER_USERNAME") 25 | resolverUrlStr <- sys.env.get("GESTALT_RESOLVER_URL") 26 | resolverUrl <- scala.util.Try{url(resolverUrlStr)}.toOption 27 | password <- sys.env.get("GESTALT_RESOLVER_PASSWORD") 28 | } yield { 29 | Seq(Credentials(realm, resolverUrl.getHost, username, password)) 30 | }) getOrElse(Seq()) 31 | } 32 | 33 | resolvers ++= { 34 | sys.env.get("GESTALT_RESOLVER_URL") map { 35 | url => Seq("gestalt-resolver" at url) 36 | } getOrElse(Seq()) 37 | } 38 | 39 | assemblyMergeStrategy in assembly := { 40 | case "META-INF/MANIFEST.MF" => MergeStrategy.discard 41 | case _ => MergeStrategy.first 42 | } 43 | 44 | // 45 | // This bit here adds a mapping for the "package" task that will add our necessary ServiceLoader sauce to the JAR 46 | // 47 | 48 | unmanagedResourceDirectories in Compile += { baseDirectory.value / "META-INF/services/com.galacticfog.gestalt.lambda.plugin.LambdaAdapter" } 49 | 50 | mappings in (Compile, packageBin) <+= baseDirectory map { base => 51 | (base / "META-INF" / "services" / "com.galacticfog.gestalt.lambda.plugin.LambdaAdapter") -> "META-INF/services/com.galacticfog.gestalt.lambda.plugin.LambdaAdapter" 52 | } 53 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/conf/langs.properties: -------------------------------------------------------------------------------- 1 | scala=io.vertx~lang-scala_2.11~1.0.1-SNAPSHOT:org.vertx.scala.platform.impl.ScalaVerticleFactory 2 | .scala=scala 3 | rhino=io.vertx~lang-rhino~2.1.1:org.vertx.java.platform.impl.RhinoVerticleFactory 4 | .js=rhino 5 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-mod/.gitignore: -------------------------------------------------------------------------------- 1 | logs 2 | localrc 3 | remoterc 4 | project/project 5 | project/target 6 | target 7 | tmp 8 | change.json 9 | approver.json 10 | .history 11 | *.event 12 | *.json 13 | dist 14 | /.idea 15 | /*.iml 16 | /out 17 | /.idea_modules 18 | /.classpath 19 | /.project 20 | /RUNNING_PID 21 | /.settings 22 | *.jar 23 | /bin/ 24 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-mod/build.sbt: -------------------------------------------------------------------------------- 1 | name := """test-mod""" 2 | 3 | version := "1.0.2-SNAPSHOT" 4 | 5 | organization := "com.galacticfog" 6 | 7 | scalaVersion := "2.11.5" 8 | 9 | libraryDependencies ++= Seq( 10 | "com.fasterxml.jackson.core" % "jackson-core" % "2.2.2", 11 | "com.fasterxml.jackson.core" % "jackson-annotations" % "2.2.2", 12 | "com.fasterxml.jackson.core" % "jackson-databind" % "2.2.2", 13 | "io.vertx" % "vertx-core" % "2.1.6", 14 | "io.vertx" % "vertx-platform" % "2.1.6", 15 | "io.vertx" % "lang-scala_2.11" % "1.1.0-SNAPSHOT" 16 | ) 17 | 18 | resolvers ++= Seq( 19 | "scalaz-bintray" at "http://dl.bintray.com/scalaz/releases", 20 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local", 21 | "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository", 22 | DefaultMavenRepository 23 | ) 24 | 25 | vertxSettings 26 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-mod/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.galacticfog" % "sbt-vertx" % "1.0.0") 2 | 3 | resolvers ++= Seq( 4 | "scalaz-bintray" at "http://dl.bintray.com/scalaz/releases", 5 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local", 6 | "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository" 7 | ) 8 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-mod/src/main/resources/hello-verticle.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | var vatomic = require('./vatomic.js') 4 | vatomic.core.console.log( "TESTING WTF" ) 5 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-mod/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 6 | 7 | 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-mod/src/main/resources/vatomic.js: -------------------------------------------------------------------------------- 1 | var vatomic = vatomic || {} 2 | 3 | vatomic.core = { 4 | 5 | vertx : require('vertx'), 6 | container : require('vertx/container'), 7 | console : require('vertx/console') 8 | //config : container.config, 9 | //event_bus : vertx.eventBus, 10 | 11 | //end : function( payload ) { 12 | //event_bus.publish("module-return", payload) 13 | //} 14 | } 15 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-mod/src/main/scala/com/galacticfog/gestalt/vertx/FactoryVerticle.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.vertx 2 | 3 | import java.io.File 4 | 5 | import org.vertx.java.core.{AsyncResult, AsyncResultHandler} 6 | import org.vertx.java.core.json.JsonObject 7 | import org.vertx.java.platform.{PlatformLocator, PlatformManager} 8 | import org.vertx.scala.core.json.JsonObject 9 | import org.vertx.scala.core.json.Json 10 | import org.vertx.scala.core.json.JsonObject 11 | import org.vertx.scala.platform.Verticle 12 | import org.vertx.scala.platform.impl.ScalaVerticle 13 | import org.vertx.scala.core.eventbus.Message 14 | import org.vertx.scala.core.http._ 15 | import org.vertx.scala.core._ 16 | import org.vertx.java.core.buffer.Buffer 17 | import org.slf4j.Logger 18 | import org.slf4j.LoggerFactory 19 | import scala.collection 20 | import scala.collection.Map 21 | 22 | import scala.collection.parallel.mutable 23 | 24 | 25 | class FactoryVerticle extends Verticle { 26 | 27 | //mutable data - ew 28 | private var policyMap : Map[String,(String,String)] = new collection.mutable.HashMap[String,(String,String)]() 29 | private var pm : PlatformManager = null; 30 | private val log : Logger = LoggerFactory.getLogger(classOf[FactoryVerticle]) 31 | def uuid : String = java.util.UUID.randomUUID.toString 32 | 33 | override def start() = { 34 | 35 | log.debug( "FactoryVerticle::start()" ) 36 | 37 | pm = PlatformLocator.factory.createPlatformManager() 38 | val eb = vertx.eventBus 39 | 40 | eb.registerHandler("kafka-address", deployHandler ) 41 | 42 | log.debug( "FactoryVerticle started successfully" ) 43 | } 44 | 45 | private val deployHandler = { message: Message[String] => 46 | log.debug("message : " + message.body) 47 | 48 | val jsConfig = new JsonObject( message.body ) 49 | 50 | //why doesn't this work 51 | /* 52 | val messageOK : Boolean = ( 53 | jsConfig.containsField( "event_label " ) && 54 | jsConfig.containsField( "policy_name" ) && 55 | jsConfig.containsField( "policy_config" ) 56 | ) 57 | 58 | if( !messageOK ) 59 | { 60 | throw new Exception( "Malformed Policy start event" ) 61 | } 62 | */ 63 | 64 | val label = jsConfig.getString( "event_label" ) 65 | val name = jsConfig.getString( "policy_name" ) 66 | val config = jsConfig.getObject( "policy_config" ) 67 | 68 | val containerId = uuid 69 | policyMap += (name -> (containerId, label)) 70 | log.debug( s"$name -> ($containerId, $label)" ) 71 | 72 | //now we want to spawn a new policy in a worker thread and tell it what to listen for 73 | 74 | val policyConfig = new JsonObject() 75 | policyConfig.putElement( "config", config ) 76 | policyConfig.putString( "id", containerId ) 77 | 78 | deployVerticle( name, Some(policyConfig) ) 79 | } 80 | 81 | 82 | def deployVerticle( verticleName : String, config : Option[JsonObject] = None ) = { 83 | log.debug( s"deployVerticle( $verticleName )") 84 | if( config.isDefined ) 85 | { 86 | log.debug( "config : " + config.get.toString ) 87 | } 88 | 89 | val handler = new AsyncResultHandler[String]() { 90 | override def handle(ar: AsyncResult[String]) = { 91 | if (ar.succeeded) 92 | log.debug(">>> Vertx deployment id is " + ar.result()) 93 | else { 94 | log.error(">>> Failed to deploy vertx module") 95 | ar.cause().printStackTrace() 96 | } 97 | } 98 | } 99 | 100 | val verticleHome = sys.env.get( "VERTICLE_HOME" ) getOrElse { "verticles" } 101 | val vertUrl = new File(verticleHome).toURI().toURL 102 | //@HACK 103 | //val resolvedConfig = config getOrElse { null } 104 | val resolvedConfig = null 105 | 106 | pm.deployVerticle( verticleName, resolvedConfig, Array(vertUrl), 1, null, handler ) 107 | } 108 | 109 | override def stop(): Unit = { 110 | log.debug( "FactoryVerticle::stop()" ) 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-post/.gitignore: -------------------------------------------------------------------------------- 1 | logs 2 | localrc 3 | remoterc 4 | project/project 5 | project/target 6 | target 7 | tmp 8 | change.json 9 | approver.json 10 | .history 11 | *.event 12 | *.json 13 | dist 14 | /.idea 15 | /*.iml 16 | /out 17 | /.idea_modules 18 | /.classpath 19 | /.project 20 | /RUNNING_PID 21 | /.settings 22 | *.jar 23 | /bin/ 24 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-post/build.sbt: -------------------------------------------------------------------------------- 1 | name := """test-post""" 2 | 3 | version := "1.0.0-SNAPSHOT" 4 | 5 | organization := "com.galacticfog" 6 | 7 | scalaVersion := "2.11.5" 8 | 9 | libraryDependencies ++= Seq( 10 | "com.fasterxml.jackson.core" % "jackson-core" % "2.2.2", 11 | "com.fasterxml.jackson.core" % "jackson-annotations" % "2.2.2", 12 | "com.fasterxml.jackson.core" % "jackson-databind" % "2.2.2", 13 | "io.vertx" % "vertx-core" % "2.1.6", 14 | "io.vertx" % "vertx-platform" % "2.1.6", 15 | "io.vertx" % "lang-scala_2.11" % "1.1.0-SNAPSHOT" 16 | ) 17 | 18 | resolvers ++= Seq( 19 | "scalaz-bintray" at "http://dl.bintray.com/scalaz/releases", 20 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local", 21 | "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository", 22 | DefaultMavenRepository 23 | ) 24 | 25 | vertxSettings 26 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-post/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.galacticfog" % "sbt-vertx" % "1.0.0") 2 | 3 | 4 | resolvers ++= Seq( 5 | "scalaz-bintray" at "http://dl.bintray.com/scalaz/releases", 6 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local", 7 | "Typesafe repository" at "http://repo.typesafe.com/typesafe/releases/", 8 | "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository" 9 | ) 10 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-post/src/main/resources/hello-verticle.js: -------------------------------------------------------------------------------- 1 | //------------------------------------------ 2 | // Lifecycle Post Policy 3 | // 4 | // This is an example of a lifecycle policy that is called 5 | // after the completion of a resource lifecycle. This could 6 | // be after a creation type lifecycle or even a deletion 7 | // lifecycle. 8 | // 9 | //------------------------------------------ 10 | 11 | 12 | var vertx = require('vertx'); //this is the container for all of built in functionality 13 | var container = require( 'vertx/container' ) //this is our context 14 | var console = require('vertx/console'); 15 | var eb = vertx.eventBus; //the vertx event bus that we'll use to communicate with the framework 16 | 17 | //the deployment has passed us in some config that will be in the container context 18 | var config = container.config 19 | console.log( "config is " + JSON.stringify( config ) ) 20 | 21 | //this is configured when we register the policy with the vertx framework 22 | var policyConfig = config.config; 23 | console.log( "policy config is " + JSON.stringify( policyConfig ) ) 24 | 25 | //gross date handling because we need a specific format for the message data structure : yyyy-MM-dd 26 | var d = new Date(), 27 | month = '' + (d.getMonth() + 1), 28 | day = '' + d.getDate(), 29 | year = d.getFullYear(); 30 | 31 | if (month.length < 2) month = '0' + month; 32 | if (day.length < 2) day = '0' + day; 33 | 34 | var thisDay = [year, month, day].join('-'); 35 | 36 | var task = config.task 37 | var detail = task.detail 38 | 39 | //build the message structure that we're going to use for the calling 40 | var message = {}; 41 | message.messageId = "GARBAGE"; 42 | message.source = "sms"; 43 | message.to = policyConfig.toNumber; 44 | message.from = policyConfig.fromNumber; 45 | message.date = thisDay; 46 | message.subject = "Provisioning Complete"; 47 | message.body = "The requested resource has finished provisioning : " + detail.resource 48 | 49 | 50 | //the vertx conversation topic "kafka" is mapped on the framework side to forward directly to the 51 | //gestalt event bus. Which means the payload needs to be in the GestaltEvent json format 52 | var messageEvent = {}; 53 | messageEvent.event_name = "notifier.message.fromBus.event"; 54 | messageEvent.data = message; 55 | eb.publish( "kafka", JSON.stringify( messageEvent )); 56 | 57 | //the "module-return" vertx conversation address is configured by the framework to listen for the return value 58 | //of a vertx module 59 | var returnVal = {}; 60 | returnVal.id = config.id; 61 | returnVal.status = "success"; 62 | returnVal.payload = "event sent : " + JSON.stringify( messageEvent ); 63 | eb.publish( "module-return", JSON.stringify( returnVal )); 64 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-post/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 6 | 7 | 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/examples/test-post/src/main/scala/com/galacticfog/gestalt/vertx/FactoryVerticle.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.vertx 2 | 3 | import java.io.File 4 | 5 | import org.vertx.java.core.{AsyncResult, AsyncResultHandler} 6 | import org.vertx.java.core.json.JsonObject 7 | import org.vertx.java.platform.{PlatformLocator, PlatformManager} 8 | import org.vertx.scala.core.json.JsonObject 9 | import org.vertx.scala.core.json.Json 10 | import org.vertx.scala.core.json.JsonObject 11 | import org.vertx.scala.platform.Verticle 12 | import org.vertx.scala.platform.impl.ScalaVerticle 13 | import org.vertx.scala.core.eventbus.Message 14 | import org.vertx.scala.core.http._ 15 | import org.vertx.scala.core._ 16 | import org.vertx.java.core.buffer.Buffer 17 | import org.slf4j.Logger 18 | import org.slf4j.LoggerFactory 19 | import scala.collection 20 | import scala.collection.Map 21 | 22 | import scala.collection.parallel.mutable 23 | 24 | 25 | class FactoryVerticle extends Verticle { 26 | 27 | //mutable data - ew 28 | private var policyMap : Map[String,(String,String)] = new collection.mutable.HashMap[String,(String,String)]() 29 | private var pm : PlatformManager = null; 30 | private val log : Logger = LoggerFactory.getLogger(classOf[FactoryVerticle]) 31 | def uuid : String = java.util.UUID.randomUUID.toString 32 | 33 | override def start() = { 34 | 35 | log.debug( "FactoryVerticle::start()" ) 36 | 37 | pm = PlatformLocator.factory.createPlatformManager() 38 | val eb = vertx.eventBus 39 | 40 | eb.registerHandler("kafka-address", deployHandler ) 41 | 42 | log.debug( "FactoryVerticle started successfully" ) 43 | } 44 | 45 | private val deployHandler = { message: Message[String] => 46 | log.debug("message : " + message.body) 47 | 48 | val jsConfig = new JsonObject( message.body ) 49 | 50 | //why doesn't this work 51 | /* 52 | val messageOK : Boolean = ( 53 | jsConfig.containsField( "event_label " ) && 54 | jsConfig.containsField( "policy_name" ) && 55 | jsConfig.containsField( "policy_config" ) 56 | ) 57 | 58 | if( !messageOK ) 59 | { 60 | throw new Exception( "Malformed Policy start event" ) 61 | } 62 | */ 63 | 64 | val label = jsConfig.getString( "event_label" ) 65 | val name = jsConfig.getString( "policy_name" ) 66 | val config = jsConfig.getObject( "policy_config" ) 67 | 68 | val containerId = uuid 69 | policyMap += (name -> (containerId, label)) 70 | log.debug( s"$name -> ($containerId, $label)" ) 71 | 72 | //now we want to spawn a new policy in a worker thread and tell it what to listen for 73 | 74 | val policyConfig = new JsonObject() 75 | policyConfig.putElement ( "config", config ) 76 | policyConfig.putString( "id", containerId ) 77 | 78 | deployVerticle( name, Some(policyConfig) ) 79 | } 80 | 81 | 82 | def deployVerticle( verticleName : String, config : Option[JsonObject] = None ) = { 83 | log.debug( s"deployVerticle( $verticleName )") 84 | if( config.isDefined ) 85 | { 86 | log.debug( "config : " + config.get.toString ) 87 | } 88 | 89 | val handler = new AsyncResultHandler[String]() { 90 | override def handle(ar: AsyncResult[String]) = { 91 | if (ar.succeeded) 92 | log.debug(">>> Vertx deployment id is " + ar.result()) 93 | else { 94 | log.error(">>> Failed to deploy vertx module") 95 | ar.cause().printStackTrace() 96 | } 97 | } 98 | } 99 | 100 | val verticleHome = sys.env.get( "VERTICLE_HOME" ) getOrElse { "verticles" } 101 | val vertUrl = new File(verticleHome).toURI().toURL 102 | //@HACK 103 | //val resolvedConfig = config getOrElse { null } 104 | val resolvedConfig = null 105 | 106 | pm.deployVerticle( verticleName, resolvedConfig, Array(vertUrl), 1, null, handler ) 107 | } 108 | 109 | override def stop(): Unit = { 110 | log.debug( "FactoryVerticle::stop()" ) 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/gestalt-security.conf: -------------------------------------------------------------------------------- 1 | { 2 | "protocol": "http", 3 | "hostname": "localhost", 4 | "port": 9455, 5 | "apiKey": "root", 6 | "appId": "95bf63d1-f285-4ece-a9ef-34f4999afe65", 7 | "apiSecret": "letmein" 8 | } 9 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/langs.properties: -------------------------------------------------------------------------------- 1 | scala=io.vertx~lang-scala_2.11~1.0.1-SNAPSHOT:org.vertx.scala.platform.impl.ScalaVerticleFactory.scala 2 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/notifier.patch: -------------------------------------------------------------------------------- 1 | [ 2 | { "op" : "replace", "path" : "/sources", "value" : "[{\"sourceType\":\"gestalt-event-source\",\"config\":{\"host\":\"events.galacticfog.com\",\"port\":2181,\"channel\":\"test/notifier\",\"read_from_beginning\":false},\"handler\":{\"handlerType\":\"sink-handler\",\"config\":{\"sinkType\":\"twilio-sink\",\"config\":{\"number\":\"+15122130291\",\"account_sid\":\"AC88f9684ffa9e089c8051a917a754f320\",\"auth_token\":\"a8caa959b5b462263a9e63b1d8549d64\"}}}},{\"sourceType\":\"twilio-source\",\"config\":{\"delay\":5,\"number\":\"+15122130291\",\"account_sid\":\"AC88f9684ffa9e089c8051a917a754f320\",\"auth_token\":\"a8caa959b5b462263a9e63b1d8549d64\"},\"handler\":{\"handlerType\":\"sink-handler\",\"config\":{\"sinkType\":\"gestalt-event-sink\",\"config\":{\"channel\":\"test/default\",\"hosts\":[{\"host\":\"events.galacticfog.com\",\"port\":9092}]}}}}]" }, 3 | { "op" : "replace", "path" : "/sinks", "value" : "[]" } 4 | ] 5 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/project/build.properties: -------------------------------------------------------------------------------- 1 | #Activator-generated Properties 2 | #Wed Aug 26 14:43:52 CDT 2015 3 | template.uuid=a91771f5-1745-4f51-b877-badeea610f64 4 | sbt.version=0.13.8 5 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.1") 2 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/public/images/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GalacticFog/gestalt-lambda/17429af43ba8b3f1bdfc2e4a2fd79e027f98e216/plugins/lambda-vertx-plugin/public/images/favicon.png -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/public/javascripts/hello.js: -------------------------------------------------------------------------------- 1 | if (window.console) { 2 | console.log("Welcome to your Play application's JavaScript!"); 3 | } -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/public/stylesheets/main.css: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GalacticFog/gestalt-lambda/17429af43ba8b3f1bdfc2e4a2fd79e027f98e216/plugins/lambda-vertx-plugin/public/stylesheets/main.css -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/record.post: -------------------------------------------------------------------------------- 1 | { 2 | "name" : "testing13.example.com", 3 | "recordType" : "A", 4 | "ttl" : 300, 5 | "dnsDomainId" : 1, 6 | "ipAddresses" : [ 7 | { "address" : "192.168.0.114" } 8 | ], 9 | "change_management" : { 10 | "reason" : "cause I said so", 11 | "schedule" : "2015-08-13", 12 | "deadline" : "2015-10-30", 13 | "provider_id" : "1" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/src/main/resources/META-INF/services/com.galacticfog.gestalt.lambda.plugin.LambdaAdapter: -------------------------------------------------------------------------------- 1 | com.galacticfog.gestalt.lambda.impl.VertxLambdaAdapter 2 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/src/main/scala/com/galacticfog/TestVerticle.class: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GalacticFog/gestalt-lambda/17429af43ba8b3f1bdfc2e4a2fd79e027f98e216/plugins/lambda-vertx-plugin/src/main/scala/com/galacticfog/TestVerticle.class -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/src/main/scala/com/galacticfog/gestalt/lambda/impl/DynLoader.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.impl 2 | 3 | import java.io.File 4 | 5 | import java.io.IOException 6 | import java.lang.reflect.Method 7 | import java.net.URL 8 | import java.net.URLClassLoader 9 | import java.util.Iterator 10 | import java.util.ServiceLoader 11 | 12 | case class ClassPathExistsException( msg : String ) extends Exception 13 | 14 | object DynLoader { 15 | 16 | def extendClasspath( dir : File, inLoader : ClassLoader ) = { 17 | try { 18 | 19 | val sysLoader : URLClassLoader = inLoader match { 20 | case u : URLClassLoader => u 21 | case _ => throw new Exception( "Cast Exception" ) 22 | } 23 | 24 | val urls : Seq[URL] = sysLoader.getURLs() 25 | val udir = dir.toURI().toURL() 26 | 27 | val udirs = udir.toString() 28 | urls.find( u => u.toString().equalsIgnoreCase(udirs) ) match { 29 | case Some(s) => throw new ClassPathExistsException( "class path exists" ) 30 | case None => {} 31 | } 32 | 33 | val sysClass = classOf[URLClassLoader] 34 | 35 | val method : Method = sysClass.getDeclaredMethod("addURL", classOf[URL] ) 36 | method.setAccessible(true) 37 | val udirObj = udir match { 38 | case o : Object => o 39 | case _ => throw new Exception( "impossible" ) 40 | } 41 | 42 | method.invoke(sysLoader, udirObj ) 43 | println( "Loaded " + udirs + " dynamically...") 44 | } 45 | catch { 46 | case cpe : ClassPathExistsException => { 47 | println( "class path exists, ignoring" ) 48 | } 49 | case t : Throwable => { 50 | t.printStackTrace(); 51 | } 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/src/main/scala/com/galacticfog/gestalt/lambda/impl/PolicyReturnEvent.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.impl 2 | 3 | import play.api.libs.json.{JsValue, Json, JsObject} 4 | 5 | case class PolicyReturnEvent( id : String, status : String, payload : String ) 6 | 7 | object PolicyReturnEvent { 8 | implicit val policyReturnEventFormat = Json.format[PolicyReturnEvent] 9 | 10 | //TODO : fix this to be enum 11 | def isSuccess( status : String ) : Boolean = { 12 | if( status.compareTo( "success" ) == 0 ) { true } else { false } 13 | } 14 | 15 | def isFailure( status : String ) : Boolean = { !isSuccess( status) } 16 | } 17 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/src/main/scala/com/galacticfog/gestalt/lambda/impl/Utility.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.impl 2 | 3 | import io.vertx.core.json.JsonObject 4 | //import play.api.Logger 5 | import play.api.libs.json.{Json, JsValue} 6 | 7 | object Utility { 8 | 9 | def convert( json : JsValue ) : JsonObject = { 10 | new JsonObject( Json.stringify( json ) ) 11 | } 12 | 13 | def stripTrailingSlash( path : String ) : String = { 14 | println( "before slash strip: " + path ) 15 | val after = """/$""".r.replaceAllIn( path, "" ) 16 | println( "after slash strip : " + after ) 17 | after 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/test/ApplicationSpec.scala: -------------------------------------------------------------------------------- 1 | import org.specs2.mutable._ 2 | import org.specs2.runner._ 3 | import org.junit.runner._ 4 | 5 | import play.api.test._ 6 | import play.api.test.Helpers._ 7 | 8 | /** 9 | * Add your spec here. 10 | * You can mock out a whole application including requests, plugins etc. 11 | * For more information, consult the wiki. 12 | */ 13 | @RunWith(classOf[JUnitRunner]) 14 | class ApplicationSpec extends Specification { 15 | 16 | "Application" should { 17 | 18 | "send 404 on a bad request" in new WithApplication{ 19 | route(FakeRequest(GET, "/boum")) must beSome.which (status(_) == NOT_FOUND) 20 | } 21 | 22 | "render the index page" in new WithApplication{ 23 | val home = route(FakeRequest(GET, "/")).get 24 | 25 | status(home) must equalTo(OK) 26 | contentType(home) must beSome.which(_ == "text/html") 27 | contentAsString(home) must contain ("Your new application is ready.") 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /plugins/lambda-vertx-plugin/test/IntegrationSpec.scala: -------------------------------------------------------------------------------- 1 | import org.specs2.mutable._ 2 | import org.specs2.runner._ 3 | import org.junit.runner._ 4 | 5 | import play.api.test._ 6 | import play.api.test.Helpers._ 7 | 8 | /** 9 | * add your integration spec here. 10 | * An integration test will fire up a whole play application in a real (or headless) browser 11 | */ 12 | @RunWith(classOf[JUnitRunner]) 13 | class IntegrationSpec extends Specification { 14 | 15 | "Application" should { 16 | 17 | "work from within a browser" in new WithBrowser { 18 | 19 | browser.goTo("http://localhost:" + port) 20 | 21 | browser.pageSource must contain("Your new application is ready.") 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /test-harness/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | logs 3 | localrc 4 | remoterc 5 | project/project 6 | project/target 7 | mods 8 | target 9 | tmp 10 | change.json 11 | approver.json 12 | .history 13 | *.event 14 | *.json 15 | dist 16 | /.idea 17 | /*.iml 18 | /out 19 | /.idea_modules 20 | /.classpath 21 | /.project 22 | /RUNNING_PID 23 | /.settings 24 | *.jar 25 | /bin/ 26 | -------------------------------------------------------------------------------- /test-harness/README.md: -------------------------------------------------------------------------------- 1 | #com.galacticfog.gestalt.lambda.test-harness 2 | 3 | This holds the test harness 4 | -------------------------------------------------------------------------------- /test-harness/build.sbt: -------------------------------------------------------------------------------- 1 | name := """gestalt-lambda-test-harness""" 2 | 3 | organization := "com.galacticfog" 4 | 5 | version := "0.0.1-SNAPSHOT" 6 | 7 | scalaVersion := "2.11.5" 8 | 9 | credentials += Credentials(Path.userHome / ".ivy2" / ".credentials") 10 | 11 | publishTo := Some("Galacticfog Snapshot" at "https://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local") 12 | 13 | resolvers ++= Seq( 14 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local", 15 | "snapshots" at "http://scala-tools.org/repo-snapshots", 16 | "releases" at "http://scala-tools.org/repo-releases") 17 | 18 | credentials ++= { 19 | (for { 20 | realm <- sys.env.get("GESTALT_RESOLVER_REALM") 21 | username <- sys.env.get("GESTALT_RESOLVER_USERNAME") 22 | resolverUrlStr <- sys.env.get("GESTALT_RESOLVER_URL") 23 | resolverUrl <- scala.util.Try{url(resolverUrlStr)}.toOption 24 | password <- sys.env.get("GESTALT_RESOLVER_PASSWORD") 25 | } yield { 26 | Seq(Credentials(realm, resolverUrl.getHost, username, password)) 27 | }) getOrElse(Seq()) 28 | } 29 | 30 | resolvers ++= { 31 | sys.env.get("GESTALT_RESOLVER_URL") map { 32 | url => Seq("gestalt-resolver" at url) 33 | } getOrElse(Seq()) 34 | } 35 | 36 | // 37 | // Adds project name to prompt like in a Play project 38 | // 39 | shellPrompt in ThisBuild := { state => "\033[0;36m" + Project.extract(state).currentRef.project + "\033[0m] " } 40 | 41 | libraryDependencies ++= Seq ( 42 | "org.slf4j" % "slf4j-api" % "1.7.10", 43 | "joda-time" % "joda-time" % "2.9.4", 44 | "ch.qos.logback" % "logback-classic" % "1.1.2", 45 | "com.typesafe.play" %% "play-ws" % "2.3.9", 46 | "com.typesafe.scala-logging" %% "scala-logging" % "3.1.0" 47 | ) 48 | 49 | -------------------------------------------------------------------------------- /test-harness/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /test-harness/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | true 8 | 10 | 11 | UTF-8 12 | 13 | %-4r %highlight(%-5level) - %msg%n 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /test-harness/src/main/scala/com/galacticfog/gestalt/lambda/test/TestHarnessApp.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.test 2 | 3 | import akka.util.Timeout 4 | import org.joda.time.DateTime 5 | import org.slf4j.LoggerFactory 6 | import play.api.libs.ws.WSAuthScheme 7 | 8 | import scala.concurrent.ExecutionContext.Implicits.global 9 | import scala.concurrent.{Await, Future} 10 | import scala.concurrent.duration._ 11 | import scala.io.Source 12 | 13 | object TestHarnessApp { 14 | 15 | val log = LoggerFactory.getLogger( this.getClass ) 16 | 17 | val RUN_SECONDS = sys.env.getOrElse( "TEST_RUN_SECONDS", "60" ).toInt 18 | val TESTS_PER_SECOND = sys.env.getOrElse( "TESTS_PER_SECOND", "1" ).toDouble 19 | val HOST = sys.env.getOrElse( "TEST_HOST", "localhost" ) 20 | val PORT = sys.env.get( "TEST_PORT" ) 21 | val PATH = sys.env.getOrElse( "TEST_PATH", "/lambdas" ) 22 | val PAYLOAD = sys.env.get( "TEST_PAYLOAD" ) 23 | val TEST_TOKEN = sys.env.getOrElse( "TEST_TOKEN", "letmein" ) 24 | 25 | 26 | def main( args : Array[String] ) : Unit = { 27 | 28 | //convert to nanoseconds 29 | val testsPerNano = TESTS_PER_SECOND.toDouble / 1e9 30 | val testPeriod = 1.0 / testsPerNano 31 | 32 | log.debug( "Starting test harness for " + RUN_SECONDS + " seconds..." ) 33 | log.debug( "Test period is " + testPeriod + " nanosecods" ) 34 | 35 | val start = System.nanoTime() 36 | 37 | var lastTestTime = System.nanoTime 38 | do { 39 | 40 | if( (System.nanoTime - lastTestTime) < testPeriod ) 41 | { 42 | val waitNanos = testPeriod - (System.nanoTime - lastTestTime) 43 | val waitMillis = waitNanos / 1e6 44 | //log.debug( s"Waiting $waitMillis for next test ..." ) 45 | 46 | Thread.sleep( waitMillis.toInt ) 47 | } 48 | 49 | //log.debug( "Launching test..." ) 50 | launch() 51 | lastTestTime = System.nanoTime 52 | 53 | } while( (start + (RUN_SECONDS * 1e9) > System.nanoTime ) ) 54 | 55 | log.debug( "Stopped test harness" ) 56 | } 57 | 58 | def launch(): Unit = { 59 | 60 | val builder = new ( com.ning.http.client.AsyncHttpClientConfig.Builder )( ) 61 | val client = new play.api.libs.ws.ning.NingWSClient( builder.build( ) ) 62 | 63 | val portString = if( PORT.isDefined ) s":${PORT.get}" else "" 64 | val uri = s"http://${HOST}$portString$PATH" 65 | //log.debug( "calling : " + uri ) 66 | 67 | val start = System.nanoTime() 68 | 69 | Future { 70 | 71 | val res = if( PAYLOAD.isDefined ) { 72 | val load = Source.fromFile("./" + PAYLOAD.get).getLines.toList.mkString 73 | client.url( uri ).withTimeout( 60000 ).withHeaders( ( "Authorization" -> s"Bearer ${TEST_TOKEN}" ) ).withHeaders(("Content-type" -> "application/json")).post(load) 74 | } 75 | else 76 | { 77 | client.url( uri ).withTimeout( 60000 ).withHeaders( ( "Authorization" -> s"Bearer ${TEST_TOKEN}" ) ).get 78 | } 79 | val result = Await.result( res, 120 seconds ) 80 | result 81 | }.onComplete{ resp => 82 | 83 | val sb = new StringBuilder 84 | sb ++= ( "test result : \n") 85 | sb ++= (resp.get.body + "\n") 86 | resp.get.allHeaders.foreach{ head => 87 | sb ++= (head._1 + "\n\t") 88 | head._2.foreach{ entry => 89 | sb ++= (entry + ", ") 90 | } 91 | sb ++= "\n" 92 | } 93 | log.debug( sb.toString ) 94 | 95 | client.close 96 | 97 | val end = System.nanoTime 98 | val secondsToComplete = (end - start) * 1e-9 99 | log.info("elapsed time : " + secondsToComplete ) 100 | } 101 | 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /worker/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | logs 3 | localrc 4 | remoterc 5 | project/project 6 | project/target 7 | mods 8 | target 9 | tmp 10 | change.json 11 | approver.json 12 | .history 13 | *.event 14 | *.json 15 | dist 16 | /.idea 17 | /*.iml 18 | /out 19 | /.idea_modules 20 | /.classpath 21 | /.project 22 | /RUNNING_PID 23 | /.settings 24 | *.jar 25 | /bin/ 26 | -------------------------------------------------------------------------------- /worker/README.md: -------------------------------------------------------------------------------- 1 | #com.galacticfog.gestalt.lambda.worker 2 | 3 | This holds the worker node for the lambda service. 4 | -------------------------------------------------------------------------------- /worker/build.sbt: -------------------------------------------------------------------------------- 1 | name := """gestalt-lambda-worker""" 2 | 3 | organization := "com.galacticfog" 4 | 5 | version := "0.0.1-SNAPSHOT" 6 | 7 | scalaVersion := "2.11.5" 8 | 9 | credentials += Credentials(Path.userHome / ".ivy2" / ".credentials") 10 | 11 | publishTo := Some("Galacticfog Snapshot" at "https://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local") 12 | 13 | resolvers ++= Seq( 14 | "gestalt" at "http://galacticfog.artifactoryonline.com/galacticfog/libs-snapshots-local", 15 | "snapshots" at "http://scala-tools.org/repo-snapshots", 16 | "releases" at "http://scala-tools.org/repo-releases") 17 | 18 | credentials ++= { 19 | (for { 20 | realm <- sys.env.get("GESTALT_RESOLVER_REALM") 21 | username <- sys.env.get("GESTALT_RESOLVER_USERNAME") 22 | resolverUrlStr <- sys.env.get("GESTALT_RESOLVER_URL") 23 | resolverUrl <- scala.util.Try{url(resolverUrlStr)}.toOption 24 | password <- sys.env.get("GESTALT_RESOLVER_PASSWORD") 25 | } yield { 26 | Seq(Credentials(realm, resolverUrl.getHost, username, password)) 27 | }) getOrElse(Seq()) 28 | } 29 | 30 | resolvers ++= { 31 | sys.env.get("GESTALT_RESOLVER_URL") map { 32 | url => Seq("gestalt-resolver" at url) 33 | } getOrElse(Seq()) 34 | } 35 | 36 | // 37 | // Adds project name to prompt like in a Play project 38 | // 39 | shellPrompt in ThisBuild := { state => "\033[0;36m" + Project.extract(state).currentRef.project + "\033[0m] " } 40 | 41 | libraryDependencies ++= Seq ( 42 | "com.galacticfog" %% "gestalt-utils" % "0.0.1-SNAPSHOT" withSources(), 43 | "com.galacticfog" %% "gestalt-lambda-io" % "0.0.1-SNAPSHOT" withSources(), 44 | "org.slf4j" % "slf4j-api" % "1.7.10", 45 | "ch.qos.logback" % "logback-classic" % "1.1.2", 46 | "com.typesafe.scala-logging" %% "scala-logging" % "3.1.0" 47 | ) 48 | 49 | -------------------------------------------------------------------------------- /worker/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /worker/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Connection Pool settings 4 | dev.db.default.poolInitialSize=20 5 | dev.db.default.poolMaxSize=40 6 | dev.db.default.poolConnectionTimeoutMillis=7000 -------------------------------------------------------------------------------- /worker/src/main/resources/console.js: -------------------------------------------------------------------------------- 1 | var stdout = java.lang.System.out; 2 | var stderr = java.lang.System.err; 3 | 4 | /** 5 | * A simple console object that can be used to print log messages 6 | * errors, and warnings. 7 | * @example 8 | * 9 | * console.log('Hello standard out'); 10 | * console.warn('Warning standard error'); 11 | * console.error('Alert! Alert!'); 12 | * 13 | */ 14 | var console = { 15 | 16 | /** 17 | * Log the msg to STDOUT. 18 | * 19 | * @param {string} msg The message to log to standard out. 20 | */ 21 | log: function(msg) { 22 | stdout.println(msg); 23 | }, 24 | 25 | /** 26 | * Log the msg to STDERR 27 | * 28 | * @param {string} msg The message to log with a warning to standard error. 29 | */ 30 | warn: function(msg) { 31 | stderr.println(msg); 32 | }, 33 | 34 | /** 35 | * Log the msg to STDERR 36 | * 37 | * @param {string} msg The message to log with a warning alert to standard error. 38 | */ 39 | error: function(msg) { 40 | stderr.println(msg); 41 | } 42 | }; 43 | 44 | module.exports = console; 45 | -------------------------------------------------------------------------------- /worker/src/main/resources/hello_world.js: -------------------------------------------------------------------------------- 1 | exports.hello = function(event, context) { 2 | var parsedEvent = JSON.parse( event ) 3 | // Call the console.log function. 4 | console.log("Hello World"); 5 | console.log( "Parsed Event : " + JSON.stringify( parsedEvent ) ); 6 | 7 | return JSON.stringify( parsedEvent ); 8 | } 9 | -------------------------------------------------------------------------------- /worker/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | true 8 | 10 | 11 | UTF-8 12 | 13 | %-4r %highlight(%-5level) - %msg%n 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /worker/src/main/scala/com/galacticfog/gestalt/lambda/worker/LambdaWorkerApp.scala: -------------------------------------------------------------------------------- 1 | package com.galacticfog.gestalt.lambda.worker 2 | 3 | import com.galacticfog.gestalt.lambda.io.domain.{LambdaEvent, LambdaDao} 4 | import com.galacticfog.gestalt.utils.servicefactory.GestaltPlugin 5 | import scala.concurrent.ExecutionContext.Implicits.global 6 | 7 | import scala.concurrent.Future 8 | 9 | object LambdaWorkerApp { 10 | 11 | def main( args : Array[String] ) : Unit = { 12 | 13 | if ( args.length < 3 ) { 14 | println( "Illegal number of arguments : " + args.length ) 15 | printUsage() 16 | sys.exit(1) 17 | } 18 | 19 | //args.foreach { a => println( "arg : " + a ) } 20 | 21 | val verticleName = args(0) 22 | val verticleFunction = args(1) 23 | val eventData = args(2) 24 | 25 | val worker : JSWorker = new JSWorker( verticleName, verticleFunction ) 26 | 27 | worker.init() 28 | worker.start( eventData ) 29 | } 30 | 31 | def printUsage(): Unit = { 32 | println( "Usage : ") 33 | println( "\t LambdaWorkerApp " ) 34 | } 35 | 36 | } 37 | --------------------------------------------------------------------------------