├── .gitignore ├── LICENSE ├── README.md ├── apps ├── akka-kafka-streams-model-server │ ├── Jenkinsfile │ ├── README.md │ ├── build.sh │ ├── data │ │ ├── optimized_WineQuality.pb │ │ ├── winequalityDecisionTreeClassification.pmml │ │ ├── winequalityDesisionTreeRegression.pmml │ │ ├── winequalityGeneralizedLinearRegressionGamma.pmml │ │ ├── winequalityGeneralizedLinearRegressionGaussian.pmml │ │ ├── winequalityLinearRegression.pmml │ │ ├── winequalityMultilayerPerceptron.pmml │ │ ├── winequalityRandonForrestClassification.pmml │ │ └── winequality_red.csv │ ├── helm │ │ ├── Chart.yaml.template │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── modelserviceinstall.yaml │ │ │ └── publisherinstall.yaml │ │ ├── values-metadata.yaml │ │ └── values.yaml.template │ ├── images │ │ ├── Akkajoin.png │ │ ├── Kafkastreamsclusters.png │ │ ├── kafkastreamsJoin.png │ │ ├── overallModelServing.png │ │ └── queryablestate.png │ ├── manager-support │ │ ├── fdp-akka-kafka-streams-model-server.descriptor.json │ │ ├── fdp-akka-kafka-streams-model-server.json │ │ └── fdp-akka-kafka-streams-model-server.options.yaml │ ├── modelserverchart.tgz │ └── source │ │ └── core │ │ ├── akkastreamssvc │ │ └── src │ │ │ └── main │ │ │ ├── resources │ │ │ ├── application.conf │ │ │ ├── cluster.conf │ │ │ ├── grafana-dashboard.json │ │ │ ├── grafana-source.json.template │ │ │ ├── grafana.json │ │ │ └── localWithCluster.conf │ │ │ └── scala │ │ │ └── com │ │ │ └── lightbend │ │ │ └── modelServer │ │ │ ├── DataRecord.scala │ │ │ ├── ModelToServe.scala │ │ │ ├── kafka │ │ │ ├── EmbeddedSingleNodeKafkaCluster.scala │ │ │ └── KafkaEmbedded.scala │ │ │ ├── model │ │ │ ├── DataRecord.scala │ │ │ ├── Model.scala │ │ │ ├── ModelFactory.scala │ │ │ ├── PMML │ │ │ │ └── PMMLModel.scala │ │ │ └── tensorflow │ │ │ │ └── TensorFlowModel.scala │ │ │ ├── modelServer │ │ │ ├── AkkaModelServer.scala │ │ │ ├── ModelStage.scala │ │ │ └── ReadableModelStateStore.scala │ │ │ └── queriablestate │ │ │ ├── QueriesAkkaHttpResource.scala │ │ │ └── QueriesAkkaHttpService.scala │ │ ├── build.sbt │ │ ├── configuration │ │ └── src │ │ │ └── main │ │ │ ├── java │ │ │ └── com │ │ │ │ └── lightbend │ │ │ │ └── configuration │ │ │ │ ├── AppConfig.java │ │ │ │ ├── AppParameters.java │ │ │ │ ├── GrafanaClient.java │ │ │ │ ├── GrafanaConfig.java │ │ │ │ ├── InfluxDBClient.java │ │ │ │ └── InfluxDBConfig.java │ │ │ └── resources │ │ │ └── reference.conf │ │ ├── kafkastreamssvc │ │ └── src │ │ │ └── main │ │ │ ├── java │ │ │ └── com │ │ │ │ └── lightbend │ │ │ │ ├── modelserver │ │ │ │ ├── store │ │ │ │ │ ├── ModelStateSerde.java │ │ │ │ │ ├── ModelStateStore.java │ │ │ │ │ ├── ModelStateStoreBuilder.java │ │ │ │ │ ├── ModelStateStoreChangeLogger.java │ │ │ │ │ ├── ReadableModelStateStore.java │ │ │ │ │ └── StoreState.java │ │ │ │ └── withstore │ │ │ │ │ ├── DataProcessorWithStore.java │ │ │ │ │ ├── ModelProcessorWithStore.java │ │ │ │ │ └── ModelServerWithStore.java │ │ │ │ └── queriablestate │ │ │ │ ├── HostStoreInfo.java │ │ │ │ ├── MetadataService.java │ │ │ │ ├── ModelServingInfo.java │ │ │ │ └── QueriesRestService.java │ │ │ └── resources │ │ │ ├── cluster.conf │ │ │ ├── grafana-dashboard.json │ │ │ ├── grafana-source.json.template │ │ │ ├── grafana.json │ │ │ └── localWithCluster.conf │ │ ├── model │ │ └── src │ │ │ └── main │ │ │ └── java │ │ │ └── com │ │ │ └── lightbend │ │ │ └── model │ │ │ ├── CurrentModelDescriptor.java │ │ │ ├── DataConverter.java │ │ │ ├── Model.java │ │ │ ├── ModelFactory.java │ │ │ ├── PMML │ │ │ ├── PMMLModel.java │ │ │ └── PMMLModelFactory.java │ │ │ └── tensorflow │ │ │ ├── TensorflowModel.java │ │ │ └── TensorflowModelFactory.java │ │ ├── project │ │ ├── Dependencies.scala │ │ ├── Versions.scala │ │ ├── assembly.sbt │ │ ├── build.properties │ │ ├── plugins.sbt │ │ └── scalapb.sbt │ │ ├── protobufs │ │ └── src │ │ │ └── main │ │ │ └── protobuf │ │ │ ├── modeldescriptor.proto │ │ │ └── winerecord.proto │ │ └── publisher │ │ └── src │ │ ├── main │ │ ├── resources │ │ │ ├── application.conf │ │ │ ├── cluster.conf │ │ │ └── localWithCluster.conf │ │ └── scala │ │ │ └── com │ │ │ └── lightbend │ │ │ └── kafka │ │ │ ├── DataProvider.scala │ │ │ └── KafkaMessageSender.scala │ │ └── test │ │ └── scala │ │ └── com │ │ └── lightbend │ │ └── kafka │ │ └── WineRecordOpsTest.scala ├── anomaly-detection │ ├── README.md │ ├── build.sh │ ├── helm │ │ ├── .helmignore │ │ ├── Chart.yaml.template │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── modelserviceinstall.yaml │ │ │ ├── publisherinstall.yaml │ │ │ └── trainingpod.yaml │ │ ├── values-metadata.yaml │ │ └── values.yaml.template │ └── source │ │ └── core │ │ ├── admodelserver │ │ └── src │ │ │ └── main │ │ │ ├── resources │ │ │ ├── admodelserver.json │ │ │ ├── application.conf │ │ │ ├── cluster.conf │ │ │ ├── grafana-dashboard.json │ │ │ └── grafana-source.json │ │ │ └── scala │ │ │ └── com │ │ │ └── lightbend │ │ │ └── intel │ │ │ └── modelserver │ │ │ ├── AkkaModelServer.scala │ │ │ ├── actors │ │ │ ├── ModelServingActor.scala │ │ │ ├── ModelServingManager.scala │ │ │ └── persistence │ │ │ │ └── FilePersistence.scala │ │ │ └── queryablestate │ │ │ └── RestService.scala │ │ ├── adpublisher │ │ └── src │ │ │ └── main │ │ │ ├── resources │ │ │ ├── adpublisher.json │ │ │ ├── application.conf │ │ │ ├── cluster.conf │ │ │ ├── grafana-dashboard.json │ │ │ ├── grafana-source.json │ │ │ └── grafana.json │ │ │ └── scala │ │ │ └── com │ │ │ └── lightbend │ │ │ └── intel │ │ │ ├── generator │ │ │ └── Generator.scala │ │ │ └── publish │ │ │ └── DataProvider.scala │ │ ├── adspeculativemodelserver │ │ └── src │ │ │ └── main │ │ │ ├── resources │ │ │ ├── adspeculativemodelserver.json │ │ │ ├── application.conf │ │ │ ├── cluster.conf │ │ │ ├── grafana-dashboard.json │ │ │ └── grafana-source.json │ │ │ └── scala │ │ │ └── com │ │ │ └── lightbend │ │ │ └── intel │ │ │ └── speculativemodelserver │ │ │ ├── actors │ │ │ ├── DataManager.scala │ │ │ ├── ModelManager.scala │ │ │ ├── ModelServingActor.scala │ │ │ ├── ModelServingManager.scala │ │ │ ├── SpeculativeModelServingCollectorActor.scala │ │ │ └── SpeculativeModelServingStarterActor.scala │ │ │ ├── modelserver │ │ │ └── AkkaModelServer.scala │ │ │ ├── persistence │ │ │ └── FilePersistence.scala │ │ │ ├── processor │ │ │ ├── Decider.scala │ │ │ └── VotingDesider.scala │ │ │ └── queryablestate │ │ │ └── QueriesAkkaHttpResource.scala │ │ ├── build.sbt │ │ ├── configuration │ │ └── src │ │ │ └── main │ │ │ └── scala │ │ │ └── com │ │ │ └── lightbend │ │ │ └── ad │ │ │ ├── configuration │ │ │ └── IntelSettings.scala │ │ │ └── package.scala │ │ ├── data │ │ ├── CPU_examples.csv │ │ ├── CP_examples.csv │ │ ├── data_preparation_complete.txt │ │ ├── last_timestamp.txt │ │ └── model.pb │ │ ├── influxsupport │ │ └── src │ │ │ └── main │ │ │ └── scala │ │ │ └── com │ │ │ └── lightbend │ │ │ └── ad │ │ │ └── influx │ │ │ ├── GrafanaSetup.scala │ │ │ ├── InfluxDBSink.scala │ │ │ ├── InfluxUtils.scala │ │ │ └── ServingData.scala │ │ ├── kafkasupport │ │ └── src │ │ │ └── main │ │ │ └── scala │ │ │ └── com │ │ │ └── lightbend │ │ │ └── ad │ │ │ └── kafka │ │ │ └── KafkaMessageSender.scala │ │ ├── model │ │ └── src │ │ │ └── main │ │ │ └── scala │ │ │ └── com │ │ │ └── lightbend │ │ │ └── ad │ │ │ └── model │ │ │ ├── DataRecord.scala │ │ │ ├── Model.scala │ │ │ ├── ModelFactory.scala │ │ │ ├── ModelToServe.scala │ │ │ ├── ModelToServeStats.scala │ │ │ ├── ModelWithDescriptor.scala │ │ │ ├── speculative │ │ │ ├── RequestResponse.scala │ │ │ └── SpeculativeExecutionStats.scala │ │ │ └── tensorflow │ │ │ └── TensorFlowModel.scala │ │ ├── persistence │ │ └── cpu │ │ ├── project │ │ ├── Common.scala │ │ ├── Dependencies.scala │ │ ├── Versions.scala │ │ ├── build.properties │ │ ├── plugins.sbt │ │ └── scalapb.sbt │ │ ├── protobufs │ │ └── src │ │ │ └── main │ │ │ └── protobuf │ │ │ ├── cpudata.proto │ │ │ ├── modeldescriptor.proto │ │ │ ├── servingrequest.proto │ │ │ ├── servingresponse.proto │ │ │ └── speculativedescriptor.proto │ │ ├── training_data_ingestion │ │ └── src │ │ │ └── main │ │ │ ├── resources │ │ │ └── application.conf │ │ │ └── scala │ │ │ └── com │ │ │ └── lightbend │ │ │ └── ad │ │ │ └── training │ │ │ ├── ingestion │ │ │ ├── IOUtils.scala │ │ │ └── TrainingIngestion.scala │ │ │ └── package.scala │ │ └── training_model_publish │ │ └── src │ │ └── main │ │ ├── resources │ │ └── application.conf │ │ └── scala │ │ └── com │ │ └── lightbend │ │ └── ad │ │ └── training │ │ └── publish │ │ ├── ModelPublisher.scala │ │ └── ModelUtils.scala ├── flink │ ├── README.md │ ├── build.sh │ ├── helm │ │ ├── Chart.yaml.template │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── configmap-flink.yaml │ │ │ ├── taxirideappinstall.yaml │ │ │ ├── taxirideingestioninstall.yaml │ │ │ └── taxirideprint.yaml │ │ ├── values.yaml.template │ │ └── values_metadata.yaml │ ├── manager-support │ │ ├── fdp-taxiride.descriptor.json │ │ ├── fdp-taxiride.json │ │ └── fdp-taxiride.options.yaml │ ├── source │ │ ├── Flink-operator │ │ │ └── project │ │ │ │ ├── Dependencies.scala │ │ │ │ ├── Versions.scala │ │ │ │ └── plugins.sbt │ │ ├── core │ │ │ ├── app │ │ │ │ └── src │ │ │ │ │ └── main │ │ │ │ │ ├── resources │ │ │ │ │ └── logback.xml │ │ │ │ │ └── scala │ │ │ │ │ └── com │ │ │ │ │ └── lightbend │ │ │ │ │ └── fdp │ │ │ │ │ └── sample │ │ │ │ │ └── flink │ │ │ │ │ └── app │ │ │ │ │ ├── PredictedTimeSchema.scala │ │ │ │ │ ├── PredictionModel.scala │ │ │ │ │ ├── TaxiRideSchema.scala │ │ │ │ │ ├── TravelTimePrediction.scala │ │ │ │ │ ├── model │ │ │ │ │ └── TravelTimePredictionModel.scala │ │ │ │ │ └── utils │ │ │ │ │ └── GeoUtils.scala │ │ │ ├── build.sbt │ │ │ ├── ingestion │ │ │ │ └── src │ │ │ │ │ └── main │ │ │ │ │ ├── resources │ │ │ │ │ ├── application.conf │ │ │ │ │ └── logback.xml │ │ │ │ │ └── scala │ │ │ │ │ └── com │ │ │ │ │ └── lightbend │ │ │ │ │ └── fdp │ │ │ │ │ └── sample │ │ │ │ │ └── flink │ │ │ │ │ └── ingestion │ │ │ │ │ └── DataIngestion.scala │ │ │ ├── project │ │ │ │ ├── Common.scala │ │ │ │ ├── Dependencies.scala │ │ │ │ ├── PackagingTypePlugin.scala │ │ │ │ ├── Versions.scala │ │ │ │ ├── build.properties │ │ │ │ └── plugin.sbt │ │ │ ├── resultprinter │ │ │ │ └── src │ │ │ │ │ └── main │ │ │ │ │ ├── resources │ │ │ │ │ ├── application.conf │ │ │ │ │ └── logback.xml │ │ │ │ │ └── scala │ │ │ │ │ └── com │ │ │ │ │ └── lightbend │ │ │ │ │ └── fdp │ │ │ │ │ └── sample │ │ │ │ │ └── flink │ │ │ │ │ └── reader │ │ │ │ │ └── ResultReader.scala │ │ │ └── support │ │ │ │ └── src │ │ │ │ └── main │ │ │ │ └── scala │ │ │ │ └── com │ │ │ │ └── lightbend │ │ │ │ └── fdp │ │ │ │ └── sample │ │ │ │ └── flink │ │ │ │ ├── config │ │ │ │ └── TaxiRideConfig.scala │ │ │ │ └── models │ │ │ │ ├── PredictedTime.scala │ │ │ │ └── TaxiRide.scala │ │ └── fdp-flink-taxiride │ │ │ ├── project │ │ │ ├── Common.scala │ │ │ ├── Dependencies.scala │ │ │ ├── PackagingTypePlugin.scala │ │ │ ├── Versions.scala │ │ │ └── plugin.sbt │ │ │ └── src │ │ │ └── main │ │ │ ├── resources │ │ │ └── logback.xml │ │ │ └── scala │ │ │ └── com │ │ │ └── lightbend │ │ │ └── fdp │ │ │ └── sample │ │ │ └── flink │ │ │ └── app │ │ │ ├── PredictionModel.scala │ │ │ ├── TravelTimePrediction.scala │ │ │ ├── model │ │ │ └── TravelTimePredictionModel.scala │ │ │ └── utils │ │ │ └── GeoUtils.scala │ └── test │ │ ├── bin │ │ └── app-install-test.bats │ │ └── support │ │ ├── fake.app-install.properties │ │ ├── fake1.app-install.properties │ │ └── normal.app-install.properties ├── killrweather │ ├── Jenkinsfile │ ├── README-DEVELOPERS.md │ ├── README.md │ ├── build.sh │ ├── data │ │ ├── create-timeseries.cql │ │ ├── load-timeseries.cql │ │ ├── load │ │ │ ├── ny-2008.csv.gz │ │ │ ├── ny-sf-2008.csv.gz │ │ │ ├── sf-2008.csv.gz │ │ │ └── sfo-nyc-mia-lax-chi-2008-2014.csv.zip │ │ ├── use-with-cassandra-2.0.11-or-2.1.1.cql │ │ └── weather_stations.csv │ ├── diagrams │ │ ├── Architecture.png │ │ ├── Asynchronous_Data_Ingestion_External.png │ │ ├── Asynchronous_Data_Ingestion_Internal.png │ │ ├── Asynchronous_Fault_Tolerant_Data_Pipeline.png │ │ ├── KillrWeather.png │ │ ├── Primary_Component_Initialization.png │ │ └── Streaming_Data_Pipeline.png │ ├── helm-hdfs │ │ ├── Chart.yaml.template │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ └── killrweatherinstall.yaml │ │ ├── values-metadata.yaml │ │ └── values.yaml.template │ ├── helm-pvc │ │ ├── .helmignore │ │ ├── Chart.yaml.template │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── applicationdeployment.yaml │ │ │ ├── dataloaderdeployment.yaml │ │ │ └── role.yaml │ │ ├── values-metadata.yaml │ │ └── values.yaml.template │ ├── helm-spark-checkpointing-pvc-creation │ │ ├── Chart.yaml.template │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ └── deployment.yaml │ │ ├── values-metadata.yaml │ │ └── values.yaml.template │ ├── process-templates.sh │ ├── project │ │ └── build.properties │ ├── source │ │ └── core │ │ │ ├── build.sbt │ │ │ ├── killrweather-app-local │ │ │ └── src │ │ │ │ └── main │ │ │ │ └── resources │ │ │ │ ├── application.conf │ │ │ │ ├── cluster.conf │ │ │ │ ├── create-timeseries.cql │ │ │ │ ├── grafana-dashboard.json │ │ │ │ ├── grafana-source.json │ │ │ │ ├── grafana.json │ │ │ │ ├── killrweatherApp.json │ │ │ │ ├── local.conf │ │ │ │ └── localWithCluster.conf │ │ │ ├── killrweather-app │ │ │ └── src │ │ │ │ └── main │ │ │ │ ├── resources │ │ │ │ ├── application.conf │ │ │ │ ├── cluster.conf │ │ │ │ ├── create-timeseries.cql │ │ │ │ ├── grafana-dashboard.json │ │ │ │ ├── grafana-source.json │ │ │ │ ├── grafana.json │ │ │ │ ├── local.conf │ │ │ │ └── localWithCluster.conf │ │ │ │ └── scala │ │ │ │ └── com │ │ │ │ └── lightbend │ │ │ │ └── killrweather │ │ │ │ └── app │ │ │ │ ├── KillrWeather.scala │ │ │ │ ├── cassandra │ │ │ │ └── CassandraSetup.scala │ │ │ │ └── influxdb │ │ │ │ └── InfluxDBSink.scala │ │ │ ├── killrweather-app_structured │ │ │ └── src │ │ │ │ └── main │ │ │ │ ├── resources │ │ │ │ ├── application.conf │ │ │ │ ├── cluster.conf │ │ │ │ ├── create-timeseries.cql │ │ │ │ ├── grafana-dashboard.json │ │ │ │ ├── grafana-source.json │ │ │ │ ├── grafana.json │ │ │ │ └── local.conf │ │ │ │ └── scala │ │ │ │ └── com │ │ │ │ └── lightbend │ │ │ │ └── killrweather │ │ │ │ └── app │ │ │ │ └── structured │ │ │ │ ├── KillrWeatherStructured.scala │ │ │ │ ├── cassandra │ │ │ │ ├── CassandraSetup.scala │ │ │ │ ├── CassandraSinkForEachKillrweather.scala │ │ │ │ └── streaming │ │ │ │ │ ├── CassandraSink.scala │ │ │ │ │ ├── CassandraSinkProvider.scala │ │ │ │ │ └── DataEntities.scala │ │ │ │ └── influxDB │ │ │ │ ├── InfluxDBSinkForEachKillrweather.scala │ │ │ │ └── streaming │ │ │ │ ├── InfluxDBSink.scala │ │ │ │ └── InfluxDBSinkProvider.scala │ │ │ ├── killrweather-beam │ │ │ └── src │ │ │ │ └── main │ │ │ │ ├── resources │ │ │ │ ├── create-timeseries.cql │ │ │ │ ├── grafana-dashboard.json │ │ │ │ └── grafana-source.json │ │ │ │ └── scala │ │ │ │ └── com │ │ │ │ └── lightbend │ │ │ │ └── killrweater │ │ │ │ └── beam │ │ │ │ ├── KillrWeatherBeam.scala │ │ │ │ ├── cassandra │ │ │ │ ├── CassandraSetup.scala │ │ │ │ └── Entities.scala │ │ │ │ ├── coders │ │ │ │ ├── ScalaDoubleCoder.scala │ │ │ │ ├── ScalaIntCoder.scala │ │ │ │ └── ScalaStringCoder.scala │ │ │ │ ├── data │ │ │ │ ├── DataObjects.scala │ │ │ │ └── StatCounter.scala │ │ │ │ ├── grafana │ │ │ │ └── GrafanaSetup.scala │ │ │ │ ├── influxdb │ │ │ │ └── DataTransformers.scala │ │ │ │ ├── kafka │ │ │ │ ├── JobConfiguration.scala │ │ │ │ └── KafkaOptions.scala │ │ │ │ └── processors │ │ │ │ ├── CassandraTransformFn.scala │ │ │ │ ├── ConvertDataRecordFn.scala │ │ │ │ ├── GroupIntoBatchesFn.scala │ │ │ │ ├── SimplePrintFn.scala │ │ │ │ ├── WriteDailyToCassandraFn.scala │ │ │ │ ├── WriteMonthlyToCassandraFn.scala │ │ │ │ ├── WriteRawToCassandraFn.scala │ │ │ │ └── WriteToInfluxDBFn.scala │ │ │ ├── killrweather-core │ │ │ └── src │ │ │ │ ├── main │ │ │ │ ├── resources │ │ │ │ │ ├── logback.xml │ │ │ │ │ └── reference.conf │ │ │ │ └── scala │ │ │ │ │ ├── com │ │ │ │ │ └── lightbend │ │ │ │ │ │ └── killrweather │ │ │ │ │ │ ├── grafana │ │ │ │ │ │ └── GrafanaSetup.scala │ │ │ │ │ │ ├── influxdb │ │ │ │ │ │ └── InfluxSetup.scala │ │ │ │ │ │ ├── kafka │ │ │ │ │ │ ├── KafkaLocalServer.scala │ │ │ │ │ │ ├── MessageListener.scala │ │ │ │ │ │ ├── MessageSender.scala │ │ │ │ │ │ └── RecordProcessorTrait.scala │ │ │ │ │ │ ├── settings │ │ │ │ │ │ └── WeatherSettings.scala │ │ │ │ │ │ └── utils │ │ │ │ │ │ ├── DailyPrecipitation.scala │ │ │ │ │ │ ├── DailyPressure.scala │ │ │ │ │ │ ├── DailyTemperature.scala │ │ │ │ │ │ ├── DailyWeatherData.scala │ │ │ │ │ │ ├── DailyWindSpeed.scala │ │ │ │ │ │ ├── RawWeatherData.scala │ │ │ │ │ │ └── WeatherStation.scala │ │ │ │ │ └── org │ │ │ │ │ └── apache │ │ │ │ │ └── spark │ │ │ │ │ └── sql │ │ │ │ │ └── catalyst.scala │ │ │ │ └── test │ │ │ │ ├── resources │ │ │ │ └── application.conf │ │ │ │ └── scala │ │ │ │ └── com │ │ │ │ └── lightbend │ │ │ │ └── killrweather │ │ │ │ └── settings │ │ │ │ └── WeatherSettingsTest.scala │ │ │ ├── killrweather-grpclient │ │ │ └── src │ │ │ │ └── main │ │ │ │ ├── resources │ │ │ │ ├── cluster.conf │ │ │ │ └── localWithCluster.conf │ │ │ │ └── scala │ │ │ │ └── com │ │ │ │ └── lightbend │ │ │ │ └── killrweather │ │ │ │ └── client │ │ │ │ └── grpc │ │ │ │ ├── WeatherGRPCAsynchClientTest.scala │ │ │ │ ├── WeatherGRPCClient.scala │ │ │ │ └── WeatherGRPCClientTest.scala │ │ │ ├── killrweather-httpclient │ │ │ └── src │ │ │ │ └── main │ │ │ │ ├── resources │ │ │ │ ├── cluster.conf │ │ │ │ └── localWithCluster.conf │ │ │ │ └── scala │ │ │ │ └── com │ │ │ │ └── lightbend │ │ │ │ └── killrweather │ │ │ │ └── client │ │ │ │ └── http │ │ │ │ ├── RestAPIs.scala │ │ │ │ ├── resources │ │ │ │ └── WeatherReportResource.scala │ │ │ │ ├── routing │ │ │ │ └── JSONResource.scala │ │ │ │ ├── serializers │ │ │ │ ├── JSONSupport.scala │ │ │ │ └── Json4sSupport.scala │ │ │ │ └── services │ │ │ │ └── RequestService.scala │ │ │ ├── killrweather-loader │ │ │ └── src │ │ │ │ ├── main │ │ │ │ ├── resources │ │ │ │ │ ├── cluster.conf │ │ │ │ │ ├── local.conf │ │ │ │ │ └── localWithCluster.conf │ │ │ │ └── scala │ │ │ │ │ └── com │ │ │ │ │ └── lightbend │ │ │ │ │ └── killrweather │ │ │ │ │ └── loader │ │ │ │ │ ├── grpc │ │ │ │ │ └── KafkaDataIngesterGRPC.scala │ │ │ │ │ ├── http │ │ │ │ │ └── KafkaDataIngesterRest.scala │ │ │ │ │ ├── kafka │ │ │ │ │ └── KafkaDataIngester.scala │ │ │ │ │ └── utils │ │ │ │ │ ├── BufferedReaderIterator.scala │ │ │ │ │ ├── DataConvertor.scala │ │ │ │ │ ├── FileContentIterator.scala │ │ │ │ │ ├── FilesIterator.scala │ │ │ │ │ ├── GzFileIterator.scala │ │ │ │ │ └── ZipFileIterator.scala │ │ │ │ └── test │ │ │ │ ├── resources │ │ │ │ └── samples │ │ │ │ │ ├── sample-1-2-csv.zip │ │ │ │ │ ├── sample1.csv │ │ │ │ │ ├── sample2.csv │ │ │ │ │ └── sample3.csv │ │ │ │ └── scala │ │ │ │ └── com │ │ │ │ └── lightbend │ │ │ │ └── killrweather │ │ │ │ └── loader │ │ │ │ └── utils │ │ │ │ ├── FilesIteratorTests.scala │ │ │ │ ├── ResourceLocator.scala │ │ │ │ ├── TextFileIteratorTests.scala │ │ │ │ └── ZipFileIteratorTests.scala │ │ │ ├── killrweather-manager-support │ │ │ ├── fdp-killrweather.descriptor.json │ │ │ ├── fdp-killrweather.json │ │ │ └── fdp-killrweather.options.yaml │ │ │ ├── killrweather-structured-app-local │ │ │ └── src │ │ │ │ └── main │ │ │ │ └── resources │ │ │ │ ├── application.conf │ │ │ │ ├── cluster.conf │ │ │ │ ├── create-timeseries.cql │ │ │ │ ├── grafana-dashboard.json │ │ │ │ ├── grafana-source.json │ │ │ │ ├── grafana.json │ │ │ │ ├── killrweatherApp.json │ │ │ │ ├── local.conf │ │ │ │ └── localWithCluster.conf │ │ │ ├── project │ │ │ ├── Dependencies.scala │ │ │ ├── Versions.scala │ │ │ ├── build.properties │ │ │ ├── plugins.sbt │ │ │ └── scalapb.sbt │ │ │ └── protobufs │ │ │ └── src │ │ │ └── main │ │ │ └── protobuf │ │ │ └── WeatherClient.proto │ └── spark-checkpointing-chart.tgz └── sbt-common-settings │ ├── build.sbt │ ├── project │ └── build.properties │ └── settings │ ├── CommonSettings.scala │ ├── DockerAssemblySettings.scala │ └── DockerPackagerSettings.scala ├── build.sh ├── common.sh ├── process-templates.sh ├── supportingcharts ├── README.md ├── cassandrachart │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── cassandra-role.yaml │ │ ├── pdb.yaml │ │ ├── service.yaml │ │ └── statefulset.yaml │ └── values.yaml ├── cluster.yaml ├── extendedcassandrachart │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── backups.yaml │ │ ├── cassandra.rules.yaml │ │ ├── cassandraExporterConfigmap.yaml │ │ ├── cassandraExporterServicemonitor.yaml │ │ ├── cassandraReaperDeploy.yaml │ │ ├── cassandraReaperSecret.yaml │ │ ├── cassandraReaperService.yaml │ │ ├── configmap.yaml │ │ ├── pdb.yaml │ │ ├── secret.yaml │ │ ├── service.yaml │ │ └── statefulset.yaml │ └── values.yaml ├── grafanachart │ ├── Chart.yaml │ ├── README.md │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── clusterrole.yaml │ │ ├── clusterrolebinding.yaml │ │ ├── configmap-dashboard-provider.yaml │ │ ├── configmap.yaml │ │ ├── dashboards-json-configmap.yaml │ │ ├── deployment.yaml │ │ ├── ingress.yaml │ │ ├── podsecuritypolicy.yaml │ │ ├── pvc.yaml │ │ ├── role.yaml │ │ ├── rolebinding.yaml │ │ ├── secret.yaml │ │ ├── service.yaml │ │ └── serviceaccount.yaml │ └── values.yaml ├── influxdbchart │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── config.yaml │ │ ├── deployment.yaml │ │ └── service.yaml │ └── values.yaml ├── nfschart │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── nfs-pv.png │ ├── templates │ │ ├── _helpers.tpl │ │ ├── nfs-pv.yaml │ │ ├── nfs-pvc.yaml │ │ ├── nfs-role.yaml │ │ ├── nfs-server-local-pv.yaml │ │ ├── nfs-server-rc.yaml │ │ └── nfs-server-service.yaml │ └── values.yaml ├── scc.yaml └── zeppelinchart │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── templates │ ├── NOTES.txt │ ├── _helpers.yaml │ ├── deployment.yaml │ └── svc.yaml │ └── values.yaml └── version.sh /.gitignore: -------------------------------------------------------------------------------- 1 | *.class 2 | *.log 3 | .cache 4 | .history 5 | .DS_Store 6 | .lib/ 7 | app/* 8 | dist/* 9 | target/ 10 | lib_managed/ 11 | src_managed/ 12 | project/boot/ 13 | project/target/ 14 | project/project/ 15 | project/plugins/project/ 16 | #idea 17 | .idea 18 | *.iml 19 | .idea_modules 20 | 21 | 22 | *.swp 23 | .scala_dependencies 24 | .worksheet 25 | ingest-intrusion-data/ingest-intrusiondata-tmp/* 26 | bigdl/source/lib 27 | release/staging/ 28 | deploy.conf 29 | apps/kstream/source/core/example-dsl/tmp/ 30 | apps/kstream/source/core/example-proc/tmp/ 31 | 32 | apps/nwintrusion/helm/values.yaml 33 | apps/nwintrusion/helm/Chart.yaml 34 | apps/bigdl/helm/values.yaml 35 | apps/bigdl/helm/Chart.yaml 36 | apps/flink/helm/values.yaml 37 | apps/flink/helm/Chart.yaml 38 | apps/kstream/helm/values.yaml 39 | apps/kstream/helm/Chart.yaml 40 | 41 | apps/killrweather/helm/values.yaml 42 | apps/killrweather/helm/Chart.yaml 43 | apps/killrweather/helm-hdfs/values.yaml 44 | apps/killrweather/helm-hdfs/Chart.yaml 45 | apps/killrweather/helm-pvc/values.yaml 46 | apps/killrweather/helm-pvc/Chart.yaml 47 | apps/killrweather/helm-spark-checkpointing-pvc-creation/values.yaml 48 | apps/killrweather/helm-spark-checkpointing-pvc-creation/Chart.yaml 49 | apps/akka-kafka-streams-model-server/helm/values.yaml 50 | apps/akka-kafka-streams-model-server/helm/Chart.yaml 51 | apps/anomaly-detection/helm/values.yaml 52 | apps/anomaly-detection/helm/Chart.yaml 53 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/Jenkinsfile: -------------------------------------------------------------------------------- 1 | 2 | pipeline { 3 | agent any 4 | 5 | stages { 6 | stage('Build') { 7 | steps { 8 | echo "Compiling..." 9 | echo "tool: ${tool name: 'sbt-1.1.0', type: 'org.jvnet.hudson.plugins.SbtPluginBuilder$SbtInstallation'}" 10 | sh "${tool name: 'sbt-1.1.0', type: 'org.jvnet.hudson.plugins.SbtPluginBuilder$SbtInstallation'}/bin/sbt -Dsbt.log.noformat=true clean compile" 11 | } 12 | } 13 | stage('Unit Test') { 14 | steps { 15 | echo "Testing..." 16 | sh "${tool name: 'sbt-1.1.0', type: 'org.jvnet.hudson.plugins.SbtPluginBuilder$SbtInstallation'}/bin/sbt -Dsbt.log.noformat=true test" 17 | junit "target/test-reports/*.xml" 18 | } 19 | } 20 | stage('Docker Publish') { 21 | steps { 22 | echo "TODO: Docker Publish stage" 23 | // Run the Docker tool to build the image 24 | //script { 25 | // sh "${tool name: 'sbt-1.1.0', type: 'org.jvnet.hudson.plugins.SbtPluginBuilder$SbtInstallation'}/bin/sbt -Dsbt.log.noformat=true docker:publish" 26 | //} 27 | } 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | : ${NOOP:=} 5 | 6 | HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )" 7 | 8 | source $HERE/../../common.sh 9 | 10 | docker_task="docker" 11 | push_msg= 12 | while [[ $# -gt 0 ]] 13 | do 14 | case $1 in 15 | -h|--h*) 16 | help 17 | exit 0 18 | ;; 19 | --push|--push-docker-images) 20 | docker_task="dockerBuildAndPush" 21 | push_msg="Pushed the docker images." 22 | ;; 23 | -v|--version*) 24 | shift 25 | VERSION=$(get_version $@) 26 | ;; 27 | *) 28 | error "Unrecognized argument $1" 29 | ;; 30 | esac 31 | shift 32 | done 33 | 34 | [[ -n $VERSION ]] || error "Version string can't be empty!" 35 | info2 "Using version $VERSION" 36 | 37 | cd ${HERE}/source/core 38 | $NOOP sbt -no-colors "set version in ThisBuild := \"$VERSION\"" "show version" clean package $docker_task 39 | 40 | echo "$PWD: built package and Docker images. $push_msg" 41 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/data/optimized_WineQuality.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/akka-kafka-streams-model-server/data/optimized_WineQuality.pb -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/helm/Chart.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: FDP_VERSION 3 | description: Akka Kafka streams model server sample application 4 | maintainers: 5 | - name: Boris Lublinsky 6 | name: Akka Kafka streams model server sample application 7 | version: FDP_VERSION 8 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/helm/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Model server for {{if eq .Values.server.type "akka" }} akka {{ else }} kafka {{ end }} is installed 2 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/helm/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "modelserverchart.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "modelserverchart.fullname" -}} 14 | {{- $name := default .Chart.Name .Values.nameOverride -}} 15 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 16 | {{- end -}} 17 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/helm/values.yaml.template: -------------------------------------------------------------------------------- 1 | # Default values for modelserverchart. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | # data.zip location 6 | data: 7 | datadirectory: http://s3-eu-west-1.amazonaws.com/fdp-killrweather-data/data/data.zip 8 | # docker images 9 | image: 10 | akka: lightbend/fdp-akka-kafka-streams-model-server-akka-streams-server 11 | kafka: lightbend/fdp-akka-kafka-streams-model-server-kafka-streams-server 12 | publisher: lightbend/fdp-akka-kafka-streams-model-server-model-publisher 13 | pullPolicy: Always 14 | version: FDP_VERSION 15 | # Configuration parameters for an application 16 | configuration: 17 | kafka: 18 | brokerlist : "broker.kafka.l4lb.thisdcos.directory:9092" 19 | grafana : 20 | host : grafana.marathon.l4lb.thisdcos.directory 21 | port : 3000 22 | influx : 23 | host : influxdb.marathon.l4lb.thisdcos.directory 24 | port : 8086 25 | statelistener : 26 | port : 5500 27 | publisher : 28 | data_publish_interval : 1 second 29 | model_publish_interval : 5 minutes 30 | data_mount : /usr/share 31 | data_file : winequality_red.csv 32 | # Server type. If akka - install akka implementation, otherwise - kafka implementation 33 | server: 34 | type: akka 35 | # INgress class used 36 | ingress: 37 | class: traefik -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/images/Akkajoin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/akka-kafka-streams-model-server/images/Akkajoin.png -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/images/Kafkastreamsclusters.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/akka-kafka-streams-model-server/images/Kafkastreamsclusters.png -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/images/kafkastreamsJoin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/akka-kafka-streams-model-server/images/kafkastreamsJoin.png -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/images/overallModelServing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/akka-kafka-streams-model-server/images/overallModelServing.png -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/images/queryablestate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/akka-kafka-streams-model-server/images/queryablestate.png -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/manager-support/fdp-akka-kafka-streams-model-server.json: -------------------------------------------------------------------------------- 1 | { 2 | "id" : "id", 3 | "name" : "Akka Kafka streams model server sample application", 4 | "description" : "Akka Kafka streams model server sample application", 5 | "group" : "group", 6 | "version" : "0.1.0", 7 | "packageType" : "helmchart", 8 | "defaultAdminLink" : false, 9 | "dependencies" : [ ], 10 | "optionalDeps" : [ ], 11 | "installable" : true, 12 | "resourcePath" : "file:////Users/boris/Projects/fdp-akka-kafka-streams-model-server/modelserverchart/", 13 | "order" : 0 14 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/manager-support/fdp-akka-kafka-streams-model-server.options.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | datadirectory: '{{ data|datadirectory }}' 3 | image: 4 | akka: '{{ image|akka }}' 5 | kafka: '{{ image|kafka }}' 6 | publisher: '{{ image|publisher }}' 7 | pullPolicy: '{{ image|pullPolicy }}' 8 | version: '{{ image|version }}' 9 | configuration: 10 | kafka: 11 | brokerlist: '{{ configuration|kafka|brokerlist }}' 12 | grafana: 13 | host: '{{ configuration|grafana|host }}' 14 | port: !!int '{{ configuration|grafana|port }}' 15 | influx: 16 | host: '{{ configuration|influx|host }}' 17 | port: !!int '{{ configuration|influx|port }}' 18 | statelistener: 19 | port: !!int '{{ configuration|statelistener|port }}' 20 | publisher: 21 | data_publish_interval: '{{ configuration|publisher|data_publish_interval }}' 22 | model_publish_interval: '{{ configuration|publisher|model_publish_interval }}' 23 | data_mount: '{{ configuration|publisher|data_mount }}' 24 | data_file: '{{ configuration|publisher|data_file }}' 25 | server: 26 | type: '{{ server|type }}' 27 | 28 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/modelserverchart.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/akka-kafka-streams-model-server/modelserverchart.tgz -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/akkastreamssvc/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | kafka.brokers = "localhost:29092" 2 | kafka.brokers = ${?KAFKA_BROKERS_LIST} 3 | 4 | zookeeper.hosts= "localhost:32181" 5 | zookeeper.hosts= ${?ZOOKEEPER_URL} 6 | 7 | grafana { 8 | host = ${?GRAFANA_HOST} 9 | port = "3000" 10 | port = ${?GRAFANA_PORT} 11 | } 12 | 13 | influxdb { 14 | host = ${?INFLUXDB_HOST} 15 | port = "8086" 16 | port = ${?INFLUXDB_PORT} 17 | } 18 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/akkastreamssvc/src/main/resources/cluster.conf: -------------------------------------------------------------------------------- 1 | kafka.brokers = ${?KAFKA_BROKERS_LIST} 2 | model_server.port = 5500 3 | model_server.port = ${?MODEL_SERVER_PORT} 4 | 5 | grafana { 6 | host = ${?GRAFANA_HOST} 7 | port = "3000" 8 | port = ${?GRAFANA_PORT} 9 | } 10 | 11 | influxdb { 12 | host = ${?INFLUXDB_HOST} 13 | port = "8086" 14 | port = ${?INFLUXDB_PORT} 15 | } 16 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/akkastreamssvc/src/main/resources/grafana-source.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "name": "serving", 3 | "type": "influxdb", 4 | "typeLogoUrl": "public/app/plugins/datasource/influxdb/img/influxdb_logo.svg", 5 | "access": "proxy", 6 | "url": "http://INFLUXDB_HOST:8086", 7 | "password": "root", 8 | "user": "root", 9 | "database": "serving", 10 | "basicAuth": true, 11 | "isDefault": false, 12 | "jsonData": {} 13 | } 14 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/akkastreamssvc/src/main/resources/localWithCluster.conf: -------------------------------------------------------------------------------- 1 | kafka.brokers = "10.0.9.234:1025" 2 | model_server.port = 5500 3 | 4 | grafana { 5 | host = "10.0.4.61" 6 | port = 20749 7 | } 8 | 9 | influxdb { 10 | host = "10.0.4.61" 11 | port = 18559 12 | } 13 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/akkastreamssvc/src/main/scala/com/lightbend/modelServer/DataRecord.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.modelServer 2 | 3 | import com.lightbend.model.winerecord.WineRecord 4 | 5 | import scala.util.Try 6 | 7 | /** 8 | * Created by boris on 5/8/17. 9 | */ 10 | object DataRecord { 11 | 12 | def fromByteArray(message: Array[Byte]): Try[WineRecord] = Try { 13 | WineRecord.parseFrom(message) 14 | } 15 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/akkastreamssvc/src/main/scala/com/lightbend/modelServer/ModelToServe.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.modelServer 2 | 3 | import com.lightbend.model.modeldescriptor.ModelDescriptor 4 | 5 | import scala.util.Try 6 | 7 | /** 8 | * Created by boris on 5/8/17. 9 | */ 10 | object ModelToServe { 11 | def fromByteArray(message: Array[Byte]): Try[ModelToServe] = Try { 12 | val m = ModelDescriptor.parseFrom(message) 13 | m.messageContent.isData match { 14 | case true => new ModelToServe(m.name, m.description, m.modeltype, m.getData.toByteArray, m.dataType) 15 | case _ => throw new Exception("Location based is not yet supported") 16 | } 17 | } 18 | } 19 | 20 | case class ModelToServe(name: String, description: String, 21 | modelType: ModelDescriptor.ModelType, 22 | model: Array[Byte], dataType: String) {} 23 | 24 | case class ModelToServeStats(name: String, description: String, modelType: String, 25 | since: Long, var usage: Long = 0, var duration: Double = .0, 26 | var min: Long = Long.MaxValue, var max: Long = Long.MinValue) { 27 | def this(m: ModelToServe) = this(m.name, m.description, m.modelType.name, System.currentTimeMillis()) 28 | def incrementUsage(execution: Long): ModelToServeStats = { 29 | usage = usage + 1 30 | duration = duration + execution 31 | if (execution < min) min = execution 32 | if (execution > max) max = execution 33 | this 34 | } 35 | } 36 | 37 | object ModelToServeStats { 38 | val empty = ModelToServeStats("None", "None", "None", 0, 0, .0, 0, 0) 39 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/akkastreamssvc/src/main/scala/com/lightbend/modelServer/model/DataRecord.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.modelServer.model 2 | 3 | import com.lightbend.model.winerecord.WineRecord 4 | 5 | import scala.util.Try 6 | 7 | /** 8 | * Created by boris on 5/8/17. 9 | */ 10 | object DataRecord { 11 | 12 | def fromByteArray(message: Array[Byte]): Try[WineRecord] = Try { 13 | WineRecord.parseFrom(message) 14 | } 15 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/akkastreamssvc/src/main/scala/com/lightbend/modelServer/model/Model.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.modelServer.model 2 | 3 | /** 4 | * Created by boris on 5/9/17. 5 | * Basic trait for model 6 | */ 7 | trait Model { 8 | def score(input: AnyVal): AnyVal 9 | def cleanup(): Unit 10 | def toBytes(): Array[Byte] 11 | def getType: Long 12 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/akkastreamssvc/src/main/scala/com/lightbend/modelServer/model/ModelFactory.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.modelServer.model 2 | 3 | import com.lightbend.modelServer.ModelToServe 4 | 5 | /** 6 | * Created by boris on 5/9/17. 7 | * Basic trait for model factory 8 | */ 9 | trait ModelFactory { 10 | def create(input: ModelToServe): Option[Model] 11 | def restore(bytes: Array[Byte]): Model 12 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/akkastreamssvc/src/main/scala/com/lightbend/modelServer/modelServer/ReadableModelStateStore.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.modelServer.modelServer 2 | 3 | import com.lightbend.modelServer.ModelToServeStats 4 | 5 | /** 6 | * Created by boris on 7/21/17. 7 | */ 8 | trait ReadableModelStateStore { 9 | def getCurrentServingInfo: ModelToServeStats 10 | } 11 | 12 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/akkastreamssvc/src/main/scala/com/lightbend/modelServer/queriablestate/QueriesAkkaHttpResource.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.modelServer.queriablestate 2 | 3 | import akka.http.scaladsl.server.Route 4 | import akka.http.scaladsl.server.Directives._ 5 | import com.lightbend.modelServer.ModelToServeStats 6 | import com.lightbend.modelServer.modelServer.ReadableModelStateStore 7 | import de.heikoseeberger.akkahttpjackson.JacksonSupport 8 | 9 | object QueriesAkkaHttpResource extends JacksonSupport { 10 | 11 | def storeRoutes(predictions: ReadableModelStateStore): Route = 12 | get { 13 | path("stats") { 14 | val info: ModelToServeStats = predictions.getCurrentServingInfo 15 | complete(info) 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/configuration/src/main/java/com/lightbend/configuration/AppConfig.java: -------------------------------------------------------------------------------- 1 | package com.lightbend.configuration; 2 | 3 | import com.typesafe.config.Config; 4 | import com.typesafe.config.ConfigFactory; 5 | 6 | public class AppConfig { 7 | 8 | public static final Config config = ConfigFactory.load(); 9 | 10 | public static final String KAFKA_BROKER = config.getString("kafka.brokers"); 11 | public static final InfluxDBConfig INFLUX_DB_CONFIG = new InfluxDBConfig(config.getConfig("influxdb")); 12 | public static final GrafanaConfig GRAFANA_CONFIG = new GrafanaConfig(config.getConfig("grafana")); 13 | public static final int MODEL_SERVER_PORT = config.getInt("model_server.port"); 14 | public static final int QUERIABLE_STATE_PORT = config.getInt("queriable_state.port"); 15 | 16 | private AppConfig() {} 17 | 18 | public static String stringify() { 19 | return "kafka brokers:"+ KAFKA_BROKER + 20 | " InfluxDB:"+ INFLUX_DB_CONFIG + " Grafana:" + GRAFANA_CONFIG; 21 | } 22 | 23 | } 24 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/configuration/src/main/java/com/lightbend/configuration/AppParameters.java: -------------------------------------------------------------------------------- 1 | package com.lightbend.configuration; 2 | 3 | public class AppParameters { 4 | 5 | private AppParameters(){} 6 | 7 | public static final String DATA_TOPIC = "models_data"; 8 | public static final String MODELS_TOPIC = "models_models"; 9 | 10 | public static final String DATA_GROUP = "wineRecordsGroup"; 11 | public static final String MODELS_GROUP = "modelRecordsGroup"; 12 | 13 | } 14 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/configuration/src/main/java/com/lightbend/configuration/GrafanaConfig.java: -------------------------------------------------------------------------------- 1 | package com.lightbend.configuration; 2 | 3 | import com.typesafe.config.Config; 4 | 5 | public class GrafanaConfig { 6 | public final String host; 7 | public final String port; 8 | public final String user = "admin"; 9 | public final String pass = "admin"; 10 | 11 | public GrafanaConfig(Config config) { 12 | host = config.getString("host"); 13 | port = config.getString("port"); 14 | } 15 | 16 | public String url() { 17 | return "http://" + host+":"+port; 18 | } 19 | 20 | public String toString() { 21 | return "host ["+host+"], port ["+port+"]"; 22 | } 23 | 24 | } 25 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/configuration/src/main/java/com/lightbend/configuration/InfluxDBConfig.java: -------------------------------------------------------------------------------- 1 | package com.lightbend.configuration; 2 | 3 | import com.typesafe.config.Config; 4 | 5 | public class InfluxDBConfig { 6 | public final String host; 7 | public final String port; 8 | public final String user = "root"; 9 | public final String pass = "root"; 10 | public final String database = "serving"; 11 | public final String retentionPolicy = "default"; 12 | 13 | public InfluxDBConfig(Config config) { 14 | host = config.getString("host"); 15 | port = config.getString("port"); 16 | } 17 | 18 | public String url() { 19 | return "http://"+host+":"+port; 20 | } 21 | 22 | public String toString() { 23 | return "host ["+host+"], port ["+port+"]"; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/configuration/src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | kafka.brokers = "localhost:29092" 2 | kafka.brokers = ${?KAFKA_BROKERS_LIST} 3 | 4 | zookeeper.hosts= "localhost:32181" 5 | zookeeper.hosts= ${?ZOOKEEPER_URL} 6 | 7 | grafana { 8 | host = "" 9 | host = ${?GRAFANA_HOST} 10 | port = "" 11 | port = ${?GRAFANA_PORT} 12 | } 13 | 14 | influxdb { 15 | host = "" 16 | host = ${?INFLUXDB_HOST} 17 | port = "" 18 | port = ${?INFLUXDB_PORT} 19 | } 20 | 21 | model_server { 22 | port = 5000 23 | } 24 | 25 | queriable_state { 26 | port = 8888 27 | } 28 | 29 | task_debug { 30 | port = "N/A" 31 | port = ${?PORT0} 32 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/kafkastreamssvc/src/main/java/com/lightbend/modelserver/store/ReadableModelStateStore.java: -------------------------------------------------------------------------------- 1 | package com.lightbend.modelserver.store; 2 | 3 | import com.lightbend.queriablestate.ModelServingInfo; 4 | 5 | /** 6 | * Created by boris on 7/13/17. 7 | */ 8 | public interface ReadableModelStateStore { 9 | ModelServingInfo getCurrentServingInfo(); 10 | } 11 | 12 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/kafkastreamssvc/src/main/resources/cluster.conf: -------------------------------------------------------------------------------- 1 | kafka.brokers = ${?KAFKA_BROKERS_LIST} 2 | queriable_state.port = 8880 3 | queriable_state.port = ${?MODEL_SERVER_PORT} 4 | 5 | grafana { 6 | host = ${?GRAFANA_HOST} 7 | port = "3000" 8 | port = ${?GRAFANA_PORT} 9 | } 10 | 11 | influxdb { 12 | host = ${?INFLUXDB_HOST} 13 | port = "8086" 14 | port = ${?INFLUXDB_PORT} 15 | } 16 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/kafkastreamssvc/src/main/resources/grafana-source.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "name": "serving", 3 | "type": "influxdb", 4 | "typeLogoUrl": "public/app/plugins/datasource/influxdb/img/influxdb_logo.svg", 5 | "access": "proxy", 6 | "url": "http://INFLUXDB_HOST:8086", 7 | "password": "root", 8 | "user": "root", 9 | "database": "serving", 10 | "basicAuth": true, 11 | "isDefault": false, 12 | "jsonData": {} 13 | } 14 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/kafkastreamssvc/src/main/resources/localWithCluster.conf: -------------------------------------------------------------------------------- 1 | kafka.brokers = "10.0.9.234:1025" 2 | queriable_state.port = 8880 3 | 4 | grafana { 5 | host = "10.0.4.61" 6 | port = 20749 7 | } 8 | 9 | influxdb { 10 | host = "10.0.4.61" 11 | port = 18559 12 | } 13 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/model/src/main/java/com/lightbend/model/Model.java: -------------------------------------------------------------------------------- 1 | package com.lightbend.model; 2 | 3 | import java.io.Serializable; 4 | 5 | /** 6 | * Created by boris on 5/9/17. 7 | * Basic trait for model 8 | */ 9 | public interface Model extends Serializable { 10 | Object score(Object input); 11 | void cleanup(); 12 | byte[] getBytes(); 13 | long getType(); 14 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/model/src/main/java/com/lightbend/model/ModelFactory.java: -------------------------------------------------------------------------------- 1 | package com.lightbend.model; 2 | 3 | import java.util.Optional; 4 | 5 | /** 6 | * Created by boris on 7/14/17. 7 | */ 8 | public interface ModelFactory { 9 | Optional create(CurrentModelDescriptor descriptor); 10 | Model restore(byte[] bytes); 11 | } 12 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/model/src/main/java/com/lightbend/model/PMML/PMMLModelFactory.java: -------------------------------------------------------------------------------- 1 | package com.lightbend.model.PMML; 2 | 3 | import com.lightbend.model.Model; 4 | import com.lightbend.model.CurrentModelDescriptor; 5 | import com.lightbend.model.ModelFactory; 6 | 7 | import java.util.Optional; 8 | 9 | /** 10 | * Created by boris on 7/15/17. 11 | */ 12 | public class PMMLModelFactory implements ModelFactory { 13 | 14 | private static ModelFactory instance = null; 15 | 16 | private PMMLModelFactory(){} 17 | 18 | @Override 19 | public Optional create(CurrentModelDescriptor descriptor) { 20 | try{ 21 | return Optional.of(new PMMLModel(descriptor.getModelData())); 22 | } 23 | catch (Throwable t){ 24 | System.out.println("Exception creating PMMLModel from " + descriptor); 25 | t.printStackTrace(); 26 | return Optional.empty(); 27 | } 28 | } 29 | 30 | @Override 31 | public Model restore(byte[] bytes) { 32 | try{ 33 | return new PMMLModel(bytes); 34 | } 35 | catch (Throwable t){ 36 | System.out.println("Exception restoring PMMLModel from "); 37 | t.printStackTrace(); 38 | return null; 39 | } 40 | } 41 | 42 | public static ModelFactory getInstance(){ 43 | if(instance == null) 44 | instance = new PMMLModelFactory(); 45 | return instance; 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/model/src/main/java/com/lightbend/model/tensorflow/TensorflowModelFactory.java: -------------------------------------------------------------------------------- 1 | package com.lightbend.model.tensorflow; 2 | 3 | import com.lightbend.model.Model; 4 | import com.lightbend.model.CurrentModelDescriptor; 5 | import com.lightbend.model.ModelFactory; 6 | 7 | import java.util.Optional; 8 | 9 | /** 10 | * Created by boris on 7/15/17. 11 | */ 12 | public class TensorflowModelFactory implements ModelFactory { 13 | 14 | private static TensorflowModelFactory instance = null; 15 | 16 | @Override 17 | public Optional create(CurrentModelDescriptor descriptor) { 18 | 19 | try{ 20 | return Optional.of(new TensorflowModel(descriptor.getModelData())); 21 | } 22 | catch (Throwable t){ 23 | System.out.println("Exception creating TensorflowModel from " + descriptor); 24 | t.printStackTrace(); 25 | return Optional.empty(); 26 | } 27 | } 28 | 29 | @Override 30 | public Model restore(byte[] bytes) { 31 | try{ 32 | return new TensorflowModel(bytes); 33 | } 34 | catch (Throwable t){ 35 | System.out.println("Exception restoring PMMLModel from "); 36 | t.printStackTrace(); 37 | return null; 38 | } 39 | } 40 | 41 | public static ModelFactory getInstance(){ 42 | if(instance == null) 43 | instance = new TensorflowModelFactory(); 44 | return instance; 45 | } 46 | 47 | } 48 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/project/Versions.scala: -------------------------------------------------------------------------------- 1 | object Versions { 2 | val Scala = "2.11.11" 3 | val JDK = "1.8" 4 | 5 | val reactiveKafkaVersion = "0.19" 6 | val akkaVersion = "2.5.11" 7 | val akkaHttpVersion = "10.1.0" 8 | val akkaHttpJsonVersion = "1.20.0" 9 | 10 | val Curator = "4.0.0" 11 | 12 | val kafkaVersion = "1.0.0" 13 | 14 | val tensorflowVersion = "1.4.0" 15 | val PMMLVersion = "1.3.10" 16 | 17 | val jettyVersion = "9.4.7.v20170914" 18 | val jacksonVersion = "2.8.8" 19 | val jerseyVersion = "2.25" 20 | val gsonVersion = "2.8.2" 21 | val wsrsVersion = "2.0.1" 22 | 23 | val slf4jVersion = "1.7.25" 24 | 25 | val influxDBClientVersion = "2.9" 26 | 27 | val codecVersion = "1.10" 28 | val TypesafeConfigVersion = "1.3.2" 29 | val ScalatestVersion = "3.0.4" 30 | } 31 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/project/assembly.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.1") -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.1.6 -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | logLevel := Level.Warn 2 | 3 | resolvers += Resolver.sonatypeRepo("releases") 4 | 5 | resolvers += "Bintray Repository" at "https://dl.bintray.com/shmishleniy/" 6 | 7 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.6") 8 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.4") 9 | addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.5.0") 10 | 11 | // Common settings plugin as a project 12 | lazy val root = project.in( file(".") ).dependsOn(RootProject(file("../../../sbt-common-settings").toURI)) 13 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/project/scalapb.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.thesamet" % "sbt-protoc" % "0.99.15") 2 | 3 | libraryDependencies += "com.trueaccord.scalapb" %% "compilerplugin" % "0.6.6" 4 | -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/protobufs/src/main/protobuf/modeldescriptor.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option java_package = "com.lightbend.model"; 4 | 5 | 6 | // Description of the trained model. 7 | message ModelDescriptor { 8 | // Model name 9 | string name = 1; 10 | // Human readable description. 11 | string description = 2; 12 | // Data type for which this model is applied. 13 | string dataType = 3; 14 | // Model type 15 | enum ModelType { 16 | TENSORFLOW = 0; 17 | PMML = 1; 18 | }; 19 | ModelType modeltype = 4; 20 | oneof MessageContent { 21 | // Byte array containing the model 22 | bytes data = 5; 23 | string location = 6; 24 | } 25 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/protobufs/src/main/protobuf/winerecord.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option java_package = "com.lightbend.model"; 4 | 5 | // Description of the wine. 6 | message WineRecord { 7 | double fixed_acidity = 1; 8 | double volatile_acidity = 2; 9 | double citric_acid = 3; 10 | double residual_sugar = 4; 11 | double chlorides = 5; 12 | double free_sulfur_dioxide = 6; 13 | double total_sulfur_dioxide = 7; 14 | double density = 8; 15 | double pH = 9; 16 | double sulphates = 10; 17 | double alcohol = 11; 18 | // Data type for this record 19 | string dataType = 12; 20 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/publisher/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | kafka.brokers = "localhost:29092" 2 | kafka.brokers = ${?KAFKA_BROKERS_LIST} 3 | zookeeper.hosts= "localhost:32181" 4 | zookeeper.hosts= ${?ZOOKEEPER_URL} 5 | 6 | publisher { 7 | data_publish_interval = 1 second 8 | data_publish_interval = ${?DATA_PUBLISH_INTERVAL} 9 | model_publish_interval = 5 minutes 10 | model_publish_interval = ${?MODEL_PUBLISH_INTERVAL} 11 | data_dir = "data" 12 | data_dir = ${?DATA_DIRECTORY} 13 | data_file = "winequality_red.csv" 14 | data_file = ${?DATA_FILENAME} 15 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/publisher/src/main/resources/cluster.conf: -------------------------------------------------------------------------------- 1 | kafka.brokers = "broker.kafka.l4lb.thisdcos.directory:9092" 2 | kafka.brokers = ${?KAFKA_BROKERS_LIST} 3 | 4 | publisher { 5 | data_publish_interval = 1 second 6 | data_publish_interval = ${?DATA_PUBLISH_INTERVAL} 7 | model_publish_interval = 5 minutes 8 | model_publish_interval = ${?MODEL_PUBLISH_INTERVAL} 9 | data_dir = "/usr/share/data" 10 | data_dir = ${?DATA_DIRECTORY} 11 | data_file = "winequality_red.csv" 12 | data_file = ${?DATA_FILENAME} 13 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/publisher/src/main/resources/localWithCluster.conf: -------------------------------------------------------------------------------- 1 | kafka.brokers = "10.0.9.234:1025" 2 | kafka.brokers = ${?KAFKA_BROKERS_LIST} 3 | 4 | publisher { 5 | data_publish_interval = 1 second 6 | data_publish_interval = ${?DATA_PUBLISH_INTERVAL} 7 | model_publish_interval = 5 minutes 8 | model_publish_interval = ${?MODEL_PUBLISH_INTERVAL} 9 | data_dir = "data" 10 | data_dir = ${?DATA_DIRECTORY} 11 | data_file = "winequality_red.csv" 12 | data_file = ${?DATA_FILENAME} 13 | } -------------------------------------------------------------------------------- /apps/akka-kafka-streams-model-server/source/core/publisher/src/test/scala/com/lightbend/kafka/WineRecordOpsTest.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.kafka 2 | 3 | import org.scalatest.{ Matchers, WordSpec } 4 | 5 | class WineRecordOpsTest extends WordSpec with Matchers { 6 | 7 | "toWineRecord" should { 8 | "create a WineRecord from CSV values in a String" in { 9 | 10 | // sample line from data file "7.4;0.7;0;1.9;0.076;11;34;0.9978;3.51;0.56;9.4;5" 11 | 12 | val sampleData = (7.4, 0.7, 0.0, 1.9, 0.076, 11.0, 34.0, 0.9978, 3.51, 0.56, 9.4, 5) 13 | val (fixedAcidity, volatileAcidity, citricAcid, residualSugar, chlorides, freeSulfurDioxide, 14 | totalSulfurDioxide, density, pH, sulphates, alcohol, _) = sampleData 15 | 16 | val sample = sampleData.productIterator.mkString(";") 17 | val wineRecord = WineRecordOps.toWineRecord(sample) 18 | wineRecord.dataType should be("wine") 19 | wineRecord.fixedAcidity should be(fixedAcidity) 20 | wineRecord.volatileAcidity should be(volatileAcidity) 21 | wineRecord.citricAcid should be(citricAcid) 22 | wineRecord.residualSugar should be(residualSugar) 23 | wineRecord.chlorides should be(chlorides) 24 | wineRecord.freeSulfurDioxide should be(freeSulfurDioxide) 25 | wineRecord.totalSulfurDioxide should be(totalSulfurDioxide) 26 | wineRecord.density should be(density) 27 | wineRecord.pH should be(pH) 28 | wineRecord.sulphates should be(sulphates) 29 | wineRecord.alcohol should be(alcohol) 30 | } 31 | 32 | } 33 | 34 | } 35 | -------------------------------------------------------------------------------- /apps/anomaly-detection/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | : ${NOOP:=} 5 | 6 | HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )" 7 | 8 | source $HERE/../../common.sh 9 | 10 | docker_task="docker" 11 | push_msg= 12 | while [[ $# -gt 0 ]] 13 | do 14 | case $1 in 15 | --push|--push-docker-images) 16 | docker_task="dockerBuildAndPush" 17 | push_msg="Pushed the docker images." 18 | ;; 19 | -v|--version*) 20 | shift 21 | VERSION=$(get_version $@) 22 | ;; 23 | *) 24 | error "Unrecognized argument $1" 25 | ;; 26 | esac 27 | shift 28 | done 29 | 30 | [[ -n $VERSION ]] || error "Version string can't be empty!" 31 | info2 "Using version $VERSION" 32 | 33 | cd ${HERE}/source/core 34 | $NOOP sbt -no-colors "set version in ThisBuild := \"$VERSION\"" clean $docker_task 35 | 36 | echo "$PWD: built package and Docker images. $push_msg" 37 | -------------------------------------------------------------------------------- /apps/anomaly-detection/helm/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /apps/anomaly-detection/helm/Chart.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: FDP_VERSION 3 | description: Anomaly detection sample application 4 | maintainers: 5 | - name: Boris Lublinsky 6 | name: Anomaly detection sample application 7 | version: FDP_VERSION 8 | -------------------------------------------------------------------------------- /apps/anomaly-detection/helm/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Anomaly Detection for is installed 2 | -------------------------------------------------------------------------------- /apps/anomaly-detection/helm/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "modelserverchart.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "modelserverchart.fullname" -}} 14 | {{- $name := default .Chart.Name .Values.nameOverride -}} 15 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 16 | {{- end -}} 17 | -------------------------------------------------------------------------------- /apps/anomaly-detection/helm/templates/publisherinstall.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: fdp-ad-modelserverpublisher 5 | labels: 6 | app: fdp-ad-modelserverpublisher 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: fdp-ad-modelserverpublisher 12 | strategy: 13 | type: RollingUpdate 14 | template: 15 | metadata: 16 | labels: 17 | app: fdp-ad-modelserverpublisher 18 | spec: 19 | containers: 20 | - name: fdp-ad-modelserverpublisher 21 | image: {{ .Values.image.publisher}}:{{.Values.image.version }} 22 | imagePullPolicy: {{ .Values.image.pullPolicy }} 23 | env: 24 | - name: KAFKA_BROKERS 25 | value: {{ .Values.configuration.kafka.brokerlist}} 26 | - name: PUBLISHER_INTERVAL 27 | value: {{ .Values.configuration.publisher.data_publish_interval}} 28 | - name: GRAFANA_HOST 29 | value: {{ .Values.configuration.grafana.host}} 30 | - name: GRAFANA_PORT 31 | value: "{{ .Values.configuration.grafana.port}}" 32 | - name: INFLUXDB_HOST 33 | value: {{ .Values.configuration.influx.host}} 34 | - name: INFLUXDB_PORT 35 | value: "{{ .Values.configuration.influx.port}}" 36 | 37 | -------------------------------------------------------------------------------- /apps/anomaly-detection/helm/values.yaml.template: -------------------------------------------------------------------------------- 1 | # Default values for modelserverchart. 2 | # This is a YAML-formatted file. 3 | 4 | # docker images 5 | image: 6 | akka: lightbend/fdp-ad-speculativemodelserver 7 | publisher: lightbend/fdp-ad-publisher 8 | trainingpublish: lightbend/trainingmodelpublish 9 | trainingingest: lightbend/trainingdataingestion 10 | trainingprocess: lightbend/amomaly-detection-intel-bigdl 11 | pullPolicy: Always 12 | version: FDP_VERSION 13 | # Configuration parameters for an application 14 | configuration: 15 | storage: 16 | localStorage: 1Gi 17 | storageshare: "/usr/data" 18 | kafka: 19 | brokerlist : "sample-cluster-kafka-brokers.fdp.svc:9092" 20 | grafana : 21 | host: "grafana.fdp.svc" 22 | port: "80" 23 | influx : 24 | host : "influxdb.fdp.svc" 25 | port : 8086 26 | statelistener : 27 | port : 5500 28 | publisher : 29 | data_publish_interval : "2 second" 30 | trainingdata : 31 | ingestion_interval : "10 minutes" 32 | ingestion_threshold_count : "1024" 33 | data_file_name : "/usr/data/CPU_examples.csv" 34 | generation_complete_file_name : "/usr/data/data_preparation_complete.txt" 35 | last_timestamp_file_name : "/usr/data/last_timestamp.txt" 36 | training_complete_file_name : "/usr/data/training_complete.txt" 37 | hyperparameters_file_name : "/usr/data/hyperparams.properties" 38 | publishingmodel : 39 | pbFileName: "/usr/data/model.pb" 40 | attributesFileName: "/usr/data/model-attributes.properties" 41 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/admodelserver/src/main/resources/admodelserver.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "/admodelserver", 3 | "container": { 4 | "docker": { 5 | "image": "lightbend/admodelserver:1.3.0", 6 | "forcePull": true 7 | } 8 | }, 9 | "instances": 1, 10 | "cpus": 0.2, 11 | "mem": 2048, 12 | "disk": 512, 13 | "gpus": 0 14 | } 15 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/admodelserver/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | akka.kafka.consumer { 2 | 3 | # If for any reason `KafkaConsumer.poll` blocks for longer than the configured 4 | # poll-timeout then it is forcefully woken up with `KafkaConsumer.wakeup`. 5 | # See https://kafka.apache.org/10/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html#wakeup-- 6 | # The KafkaConsumerActor will throw 7 | # `org.apache.kafka.common.errors.WakeupException` which will be ignored 8 | # until `max-wakeups` limit gets exceeded. 9 | wakeup-timeout = 10s 10 | 11 | # After exceeding maxinum wakeups the consumer will stop and the stage and fail. 12 | # Setting it to 0 will let it ignore the wakeups and try to get the polling done forever. 13 | max-wakeups = 10 14 | 15 | # If enabled, log stack traces before waking up the KafkaConsumer to give 16 | # some indication why the KafkaConsumer is not honouring the `poll-timeout` 17 | wakeup-debug = false 18 | } 19 | 20 | kafka { 21 | brokers = "kafka-0-broker.kafka.autoip.dcos.thisdcos.directory:1025" 22 | brokers = ${?KAFKA_BROKERS} 23 | sourcetopic = "intelData" 24 | sourcegroup = "IntelDataGroup" 25 | modeltopic = "intelModel" 26 | modelgroup = "IntelModelGroup" 27 | servingtopic = "intelServing" 28 | servinggroup = "IntelServingGroup" 29 | } 30 | 31 | grafana { 32 | host = "10.0.14.207" 33 | host = ${?GRAFANA_HOST} 34 | port = "12504" 35 | port = ${?GRAFANA_PORT} 36 | } 37 | 38 | influxdb { 39 | host = "10.0.6.63" 40 | host = ${?INFLUXDB_HOST} 41 | port = "13299" 42 | port = ${?INFLUXDB_PORT} 43 | } 44 | 45 | modelServer{ 46 | port = 5500 47 | port = ${?MODELSERVER_PORT} 48 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/admodelserver/src/main/resources/grafana-source.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "intelDemo", 3 | "type": "influxdb", 4 | "typeLogoUrl": "public/app/plugins/datasource/influxdb/img/influxdb_logo.svg", 5 | "access": "proxy", 6 | "url": "http://10.0.6.63:13299", 7 | "password": "root", 8 | "user": "root", 9 | "database": "intelDemo", 10 | "basicAuth": true, 11 | "isDefault": false, 12 | "jsonData": { 13 | "keepCookies": [] 14 | }, 15 | "readOnly": false 16 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/admodelserver/src/main/scala/com/lightbend/intel/modelserver/actors/persistence/FilePersistence.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.ad.modelserver.actors.persistence 2 | 3 | import java.io._ 4 | 5 | import com.lightbend.ad.model.{Model, ModelToServeStats, ModelWithDescriptor} 6 | 7 | object FilePersistence { 8 | 9 | private final val basDir = "persistence" 10 | 11 | def restoreState(dataType: String) : (Option[Model], Option[ModelToServeStats]) = { 12 | getDataInputStream(dataType) match { 13 | case Some(input) => (ModelWithDescriptor.readModel(input), ModelToServeStats.readServingInfo(input)) 14 | case _ => (None, None) 15 | } 16 | } 17 | 18 | private def getDataInputStream(fileName: String) : Option[DataInputStream] = { 19 | val file = new File(basDir + "/" + fileName) 20 | file.exists() match { 21 | case true => Some(new DataInputStream(new FileInputStream(file))) 22 | case _ => None 23 | } 24 | } 25 | 26 | def saveState(dataType: String, model: Model, servingInfo: ModelToServeStats) : Unit = { 27 | val output = getDataOutputStream(dataType) 28 | ModelWithDescriptor.writeModel(output, model) 29 | ModelToServeStats.writeServingInfo(output, servingInfo) 30 | output.flush() 31 | output.close() 32 | } 33 | 34 | private def getDataOutputStream(fileName: String) : DataOutputStream = { 35 | 36 | val dir = new File(basDir) 37 | if(!dir.exists()) { 38 | dir.mkdir() 39 | } 40 | val file = new File(dir, fileName) 41 | if(!file.exists()) 42 | file.createNewFile() 43 | new DataOutputStream(new FileOutputStream(file)) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/adpublisher/src/main/resources/adpublisher.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "/adpublisher", 3 | "container": { 4 | "docker": { 5 | "image": "lightbend/adpublisher:1.3.0", 6 | "forcePull": true 7 | } 8 | }, 9 | "instances": 1, 10 | "cpus": 0.2, 11 | "mem": 1024, 12 | "disk": 512, 13 | "gpus": 0 14 | } 15 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/adpublisher/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | kafka { 2 | brokers = "kafka-0-broker.kafka.autoip.dcos.thisdcos.directory:1025,kafka-1-broker.kafka.autoip.dcos.thisdcos.directory:1025,kafka-2-broker.kafka.autoip.dcos.thisdcos.directory:1025,kafka-3-broker.kafka.autoip.dcos.thisdcos.directory:1025,kafka-4-broker.kafka.autoip.dcos.thisdcos.directory:1025" 3 | brokers = ${?KAFKA_BROKERS} 4 | sourcetopic = "intelData" 5 | sourcegroup = "IntelDataGroup" 6 | } 7 | 8 | grafana { 9 | host = "10.0.10.70" 10 | host = ${?GRAFANA_HOST} 11 | port = "4188" 12 | port = ${?GRAFANA_PORT} 13 | } 14 | 15 | influxdb { 16 | host = "10.0.12.89" 17 | host = ${?INFLUXDB_HOST} 18 | port = "29029" 19 | port = ${?INFLUXDB_PORT} 20 | } 21 | 22 | loader{ 23 | publishinterval = "2 second" 24 | publishinterval = ${?PUBLISHER_INTERVAL} 25 | } 26 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/adpublisher/src/main/resources/cluster.conf: -------------------------------------------------------------------------------- 1 | kafka { 2 | brokers = "broker.kafka.l4lb.thisdcos.directory:9092" 3 | brokers = ${?KAFKA_BROKERS} 4 | sourcetopic = "intelData" 5 | sourcegroup = "IntelDataGroup" 6 | } 7 | 8 | grafana { 9 | host = "grafana.marathon.l4lb.thisdcos.directory" 10 | host = ${?GRAFANA_HOST} 11 | port = "3000" 12 | port = ${?GRAFANA_PORT} 13 | } 14 | 15 | influxdb { 16 | host = "influxdb.marathon.l4lb.thisdcos.directory" 17 | host = ${?INFLUXDB_HOST} 18 | port = "8086" 19 | port = ${?INFLUXDB_PORT} 20 | } 21 | 22 | loader{ 23 | publishinterval = "2 second" 24 | publishinterval = ${?PUBLISHER_INTERVAL} 25 | } 26 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/adpublisher/src/main/resources/grafana-source.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "intelDemo", 3 | "type": "influxdb", 4 | "typeLogoUrl": "public/app/plugins/datasource/influxdb/img/influxdb_logo.svg", 5 | "access": "proxy", 6 | "url": "http://10.0.6.63:13299", 7 | "password": "root", 8 | "user": "root", 9 | "database": "intelDemo", 10 | "basicAuth": true, 11 | "isDefault": false, 12 | "jsonData": { 13 | "keepCookies": [] 14 | }, 15 | "readOnly": false 16 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/adspeculativemodelserver/src/main/resources/adspeculativemodelserver.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "/adspeculativemodelserver", 3 | "container": { 4 | "docker": { 5 | "image": "lightbend/adspeculativemodelserver:1.3.0", 6 | "forcePull": true 7 | } 8 | }, 9 | "instances": 1, 10 | "cpus": 0.5, 11 | "mem": 4096, 12 | "disk": 1024, 13 | "gpus": 0 14 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/adspeculativemodelserver/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | akka.kafka.consumer { 2 | 3 | # If for any reason `KafkaConsumer.poll` blocks for longer than the configured 4 | # poll-timeout then it is forcefully woken up with `KafkaConsumer.wakeup`. 5 | # See https://kafka.apache.org/10/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html#wakeup-- 6 | # The KafkaConsumerActor will throw 7 | # `org.apache.kafka.common.errors.WakeupException` which will be ignored 8 | # until `max-wakeups` limit gets exceeded. 9 | wakeup-timeout = 10s 10 | 11 | # After exceeding maxinum wakeups the consumer will stop and the stage and fail. 12 | # Setting it to 0 will let it ignore the wakeups and try to get the polling done forever. 13 | max-wakeups = 10 14 | 15 | # If enabled, log stack traces before waking up the KafkaConsumer to give 16 | # some indication why the KafkaConsumer is not honouring the `poll-timeout` 17 | wakeup-debug = false 18 | } 19 | 20 | kafka { 21 | brokers = "10.0.6.134:1025,10.0.7.217:1027,10.0.13.237:1026" 22 | brokers = ${?KAFKA_BROKERS} 23 | sourcetopic = "intelData" 24 | sourcegroup = "IntelDataGroup" 25 | modeltopic = "intelModel" 26 | modelgroup = "IntelModelGroup" 27 | servingtopic = "intelServing" 28 | servinggroup = "IntelServingGroup" 29 | } 30 | 31 | grafana { 32 | host = "10.0.14.207" 33 | host = ${?GRAFANA_HOST} 34 | port = "12504" 35 | port = ${?GRAFANA_PORT} 36 | } 37 | 38 | influxdb { 39 | host = "10.0.6.63" 40 | host = ${?INFLUXDB_HOST} 41 | port = "27436" 42 | port = ${?INFLUXDB_PORT} 43 | } 44 | 45 | modelServer{ 46 | port = 5501 47 | port = ${?MODELSERVER_PORT} 48 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/adspeculativemodelserver/src/main/resources/grafana-source.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "intelDemo", 3 | "type": "influxdb", 4 | "typeLogoUrl": "public/app/plugins/datasource/influxdb/img/influxdb_logo.svg", 5 | "access": "proxy", 6 | "url": "http://10.0.6.63:13299", 7 | "password": "root", 8 | "user": "root", 9 | "database": "intelDemo", 10 | "basicAuth": true, 11 | "isDefault": false, 12 | "jsonData": { 13 | "keepCookies": [] 14 | }, 15 | "readOnly": false 16 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/adspeculativemodelserver/src/main/scala/com/lightbend/intel/speculativemodelserver/processor/Decider.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.intel.speculativemodelserver.processor 2 | 3 | import akka.actor.ActorRef 4 | import com.lightbend.ad.model.speculative.ServingResponse 5 | 6 | import scala.collection.mutable.ListBuffer 7 | 8 | trait Decider { 9 | 10 | def decideResult(results: CurrentProcessingResults): Any 11 | } 12 | 13 | case class CurrentProcessingResults(models : Int, start : Long, reply: ActorRef, results : ListBuffer[ServingResponse]) -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/adspeculativemodelserver/src/main/scala/com/lightbend/intel/speculativemodelserver/processor/VotingDesider.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.ad.speculativemodelserver.processor 2 | 3 | import java.util.concurrent.TimeUnit 4 | 5 | import com.lightbend.ad.model.ServingResult 6 | import com.lightbend.intel.speculativemodelserver.processor.{CurrentProcessingResults, Decider} 7 | 8 | object VotingDesider extends Decider { 9 | 10 | // The simple voting decider for results 0 or 1. Returning 0 or 1 11 | override def decideResult(results: CurrentProcessingResults): Any = { 12 | 13 | var result = ServingResult.noModel 14 | var sum = .0 15 | var count = 0 16 | var source = 0 17 | results.results.foreach(res => res.result match { 18 | case r if(r.processed) => 19 | sum = sum + r.result.getOrElse(0) 20 | if(r.source != source) source = r.source 21 | count = count + 1 22 | case _ => 23 | }) 24 | if(count == 0) result else { 25 | val res = sum/count 26 | val intres = if(res < .5) 0 else 1 27 | ServingResult("voter model", source, Some(intres), TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - results.start)) 28 | } 29 | } 30 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/configuration/src/main/scala/com/lightbend/ad/package.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.ad 2 | 3 | import java.nio.charset.Charset 4 | 5 | package object configuration { 6 | final val CHARSET = Charset.forName("UTF-8") 7 | 8 | implicit def asFiniteDuration(d: java.time.Duration) = 9 | scala.concurrent.duration.Duration.fromNanos(d.toNanos) 10 | } 11 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/data/CPU_examples.csv: -------------------------------------------------------------------------------- 1 | "Time","CPU","Class" 2 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/data/CP_examples.csv: -------------------------------------------------------------------------------- 1 | "Time","CPU","Class" 2 | 1,0.4503367330541743,0 3 | 2,0.6688348300748301,0 4 | 3,0.5053504577433944,0 5 | 4,0.36305537648584196,0 6 | 5,0.25753239347072343,0 7 | 6,0.18322712228811658,0 8 | 7,0.4531029295344415,0 9 | 8,0.5016277466535272,0 10 | 9,0.3082826794643865,0 11 | 10,0.17606953143514895,0 12 | 11,0.4741293122970569,0 13 | 12,0.08766377905436781,0 14 | 13,0.2888081176901152,0 15 | 14,0.3386192949077981,0 16 | 15,0.473532260867383,0 17 | 16,0.3056351520742385,0 18 | 17,0.2970064141862538,0 19 | 18,0.2787435813323523,0 20 | 19,0.6436244739366674,0 21 | 20,0.21454079442634494,0 22 | 21,0.23629974424365274,0 23 | 22,0.31344238033354777,0 24 | 23,0.043260684303266794,0 25 | 24,0.33177803395109395,0 26 | 25,0.1290685178820158,0 27 | 26,0.20819397617963709,0 28 | 27,0.41282748547133574,0 29 | 28,0.249678092842258,0 30 | 29,0.2812354825594514,0 31 | 30,0.37002538828621834,0 32 | 31,0.12187453653021574,0 33 | 32,0.3860404369531608,0 34 | 33,0.24293611682854374,0 35 | 34,0.23079895998859068,0 36 | 35,0.04983692422963121,0 37 | 36,0.1909271064810082,0 38 | 37,0.3287731003123454,0 39 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/data/data_preparation_complete.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/anomaly-detection/source/core/data/data_preparation_complete.txt -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/data/last_timestamp.txt: -------------------------------------------------------------------------------- 1 | 2018-07-05T11:49:08.901Z -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/data/model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/anomaly-detection/source/core/data/model.pb -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/influxsupport/src/main/scala/com/lightbend/ad/influx/ServingData.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.ad.influx 2 | 3 | case class ServingData(served : Long, source : Long, model : String, duration : Long) 4 | case class ServingModelData(served : Long, model : String, duration : Long) 5 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/model/src/main/scala/com/lightbend/ad/model/DataRecord.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.ad.model 2 | 3 | import java.io.ByteArrayOutputStream 4 | 5 | import com.lightbend.model.cpudata.{CPUData, ServingResultMessage} 6 | 7 | import scala.util.Try 8 | 9 | /** 10 | * Created by boris on 5/8/17. 11 | */ 12 | object DataRecord { 13 | 14 | val bos = new ByteArrayOutputStream() 15 | 16 | def fromByteArray(message: Array[Byte]): Try[CPUData] = Try { 17 | CPUData.parseFrom(message) 18 | } 19 | 20 | def toByteArray(servingResult : ServingResultMessage) : Array[Byte] = { 21 | bos.reset() 22 | servingResult.writeTo(bos) 23 | bos.toByteArray 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/model/src/main/scala/com/lightbend/ad/model/Model.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.ad.model 2 | 3 | import com.lightbend.model.cpudata.CPUData 4 | 5 | /** 6 | * Created by boris on 5/9/17. 7 | * Basic trait for models. For simplicity, we assume the data to be scored are WineRecords. 8 | */ 9 | trait Model { 10 | def score(record: CPUData): Option[Int] 11 | def cleanup(): Unit 12 | def toBytes(): Array[Byte] 13 | } 14 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/model/src/main/scala/com/lightbend/ad/model/ModelFactory.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.ad.model 2 | 3 | /** 4 | * Created by boris on 5/9/17. 5 | * Basic trait for model factory 6 | */ 7 | trait ModelFactory { 8 | def create(input: ModelToServe): Model 9 | def restore(bytes: Array[Byte]): Model 10 | } 11 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/model/src/main/scala/com/lightbend/ad/model/speculative/RequestResponse.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.ad.model.speculative 2 | 3 | import scala.util.{Random, Try} 4 | import com.lightbend.ad.model.ServingResult 5 | import com.lightbend.speculative.speculativedescriptor.SpeculativeDescriptor 6 | 7 | // Because we are doing everything in memory, we implement local equivalent to protobufs 8 | 9 | case class ServingRequest(GUID : String, data : Any) 10 | 11 | case class ServingQualifier(key : String, value : String) 12 | 13 | case class ServingResponse(GUID : String, result : ServingResult, confidence : Option[Double] = None, qualifiers : List[ServingQualifier] = List.empty) 14 | 15 | object ServingResponse{ 16 | 17 | val gen = Random 18 | val qualifiers = List(ServingQualifier("key", "value")) 19 | 20 | def apply(GUID: String, result: ServingResult): ServingResponse = { 21 | new ServingResponse(GUID, result, Some(gen.nextDouble()), qualifiers) 22 | } 23 | } 24 | 25 | object SpeculativeConverter { 26 | def fromByteArray(message: Array[Byte]): Try[SpeculativeDescriptor] = Try { 27 | SpeculativeDescriptor.parseFrom(message) 28 | } 29 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/model/src/main/scala/com/lightbend/ad/model/speculative/SpeculativeExecutionStats.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.ad.model.speculative 2 | 3 | /** 4 | * Created by boris on 5/8/17. 5 | */ 6 | final case class SpeculativeExecutionStats( 7 | name: String, 8 | decider : String, 9 | tmout: Long, 10 | since: Long = System.currentTimeMillis(), 11 | usage: Long = 0, 12 | duration: Double = 0.0, 13 | min: Long = 0, 14 | max: Long = 0) { 15 | 16 | def incrementUsage(execution: Long): SpeculativeExecutionStats = { 17 | copy( 18 | usage = usage + 1, 19 | duration = duration + execution, 20 | min = if (execution < min) execution else min, 21 | max = if (execution > max) execution else max 22 | ) 23 | } 24 | 25 | def updateConfig(timeout : Long): SpeculativeExecutionStats = copy(tmout = timeout) 26 | } 27 | 28 | object SpeculativeExecutionStats{ 29 | 30 | val empty = SpeculativeExecutionStats("", "", 0l, 0l, 0l, .0, 0l, 0l) 31 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/persistence/cpu: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/anomaly-detection/source/core/persistence/cpu -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/project/Versions.scala: -------------------------------------------------------------------------------- 1 | object Versions { 2 | val Scala = "2.11.11" 3 | val JDK = "1.8" 4 | 5 | val reactiveKafkaVersion = "0.21.1" 6 | val akkaVersion = "2.5.13" 7 | val akkaHttpVersion = "10.1.3" 8 | val akkaHttpJsonVersion = "1.21.0" 9 | 10 | // val Curator = "4.0.0" 11 | 12 | val kafkaVersion = "1.1.0" 13 | 14 | val tensorflowVersion = "1.8.0" 15 | 16 | 17 | val influxDBClientVersion = "2.10" 18 | val ScalaHTTPVersion = "2.4.0" 19 | 20 | val TypesafeConfigVersion = "1.3.3" 21 | val FicusVersion = "1.4.3" 22 | 23 | val catsVersion = "1.1.0" 24 | val catsEffectVersion = "1.0.0-RC2" 25 | val monixVersion = "3.0.0-RC1" 26 | val guavaVersion = "25.1-jre" 27 | } 28 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version = 1.1.6 -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | logLevel := Level.Warn 2 | 3 | resolvers += Resolver.sonatypeRepo("releases") 4 | 5 | resolvers += "Bintray Repository" at "https://dl.bintray.com/shmishleniy/" 6 | 7 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.6") 8 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.4") 9 | addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.5.0") 10 | 11 | // Common settings plugin as a project 12 | lazy val root = project.in( file(".") ).dependsOn(RootProject(file("../../../sbt-common-settings").toURI)) 13 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/project/scalapb.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.thesamet" % "sbt-protoc" % "0.99.15") 2 | 3 | libraryDependencies += "com.trueaccord.scalapb" %% "compilerplugin" % "0.6.6" 4 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/protobufs/src/main/protobuf/cpudata.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option java_package = "com.lightbend.model"; 4 | 5 | // Description of the cpu message. 6 | message CPUData { 7 | double utilization = 1; // CPU utilization 8 | int32 class = 2; // Class - 0 normal; 1 abnormal. For training only 9 | string dataType = 3; // Data type for this record 10 | } 11 | 12 | // Description of the model serving message. 13 | message ServingResultMessage { 14 | int32 served = 1; // Served - 0 normal; 1 abnormal. For training only 15 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/protobufs/src/main/protobuf/modeldescriptor.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option java_package = "com.lightbend.model"; 4 | 5 | // Description of the extra parameters. 6 | message ModelPreprocessing { 7 | int32 width = 1; // model width 8 | double mean = 2; // mean used by model 9 | double std = 3; // STD used by model 10 | string input = 4; // Model input 11 | string output = 5; // Model output 12 | } 13 | 14 | 15 | // Description of the trained model. 16 | message ModelDescriptor { 17 | // Model name 18 | string name = 1; 19 | // Human readable description. 20 | string description = 2; 21 | // Data type for which this model is applied. 22 | string dataType = 3; 23 | oneof MessageContent { 24 | // Byte array containing the model 25 | bytes data = 4; 26 | string location = 5; 27 | } 28 | ModelPreprocessing preprocessing = 6; // Preprocessing parameters 29 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/protobufs/src/main/protobuf/servingrequest.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option java_package = "com.lightbend.speculative"; 4 | 5 | // Description of the model serving request. 6 | message ServingRequest { 7 | string uuid = 1; 8 | bytes data = 2; 9 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/protobufs/src/main/protobuf/servingresponse.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option java_package = "com.lightbend.speculative"; 4 | 5 | // Description of the model serving response. 6 | message ServingResponse { 7 | string uuid = 1; 8 | bytes data = 2; 9 | double confidence = 3; 10 | repeated ServingQualifier qualifiers = 4; 11 | } 12 | 13 | // Description of the model serving qualifier. 14 | message ServingQualifier{ 15 | string key = 1; 16 | string value = 2; 17 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/protobufs/src/main/protobuf/speculativedescriptor.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option java_package = "com.lightbend.speculative"; 4 | 5 | 6 | // Description of the speculative server. 7 | message SpeculativeDescriptor { 8 | // data type 9 | string datatype = 1; 10 | // timeout. 11 | int64 tmout = 2; 12 | } -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/training_data_ingestion/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | influxdb { 2 | # host = "10.0.6.63" 3 | host = localhost 4 | host = ${?INFLUXDB_HOST} 5 | 6 | # port = "13299" 7 | port = 8086 8 | port = ${?INFLUXDB_PORT} 9 | } 10 | 11 | influxTable { 12 | database = "intelDemo" 13 | } 14 | 15 | ingester { 16 | ingestInterval = "10 minutes" 17 | ingestInterval = ${?INGEST_INTERVAL} 18 | 19 | dataFileName = "data/CPU_examples.csv" 20 | dataFileName = ${?DATA_FILE_NAME} 21 | 22 | generationCompleteFileName = "data/data_preparation_complete.txt" 23 | generationCompleteFileName = ${?GENERATION_COMPLETE_FILE_NAME} 24 | 25 | lastTimestampFileName = "data/last_timestamp.txt" 26 | lastTimestampFileName = ${?LAST_TIMESTAMP_FILE_NAME} 27 | 28 | # don't generate data file for training if ingestion record count is below this threshold 29 | ingestThresholdCount = 256 30 | ingestThresholdCount = ${?INGEST_THRESHOLD_COUNT} 31 | } 32 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/training_data_ingestion/src/main/scala/com/lightbend/ad/training/ingestion/IOUtils.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.ad.training.ingestion 2 | 3 | import java.io._ 4 | import java.nio.file.{Files, Paths} 5 | import cats.effect.IO 6 | 7 | object IOUtils { 8 | /** 9 | * case 1: all ok, we get back a BufferedWriter 10 | * case 2: file creation fails somehow and we have an exception in IO 11 | * case 3: threshold count check fails 12 | */ 13 | def generateDataCompletionFile(name: String, ingestThresholdCount: Int, numberOfRecords: Int): IO[Boolean] = IO { 14 | if (numberOfRecords < ingestThresholdCount) false 15 | else { 16 | val _ = Files.newBufferedWriter(Paths.get(name), CHARSET) 17 | true 18 | } 19 | } 20 | 21 | def writeLastTimestamp(file: String, ingestThresholdCount: Int, numberOfRecords: Int, lastTime: String): IO[Unit] = { 22 | if (numberOfRecords < ingestThresholdCount) IO(()) 23 | else { 24 | IO(new BufferedWriter(new FileWriter(file))).bracket { out => 25 | IO (out.write(lastTime)) 26 | } { out => IO (out.close()) } 27 | } 28 | } 29 | 30 | def getLastTimestamp(file: String): IO[String] = IO(new BufferedReader(new FileReader(file))).bracket { in => 31 | IO { 32 | val line = in.readLine() 33 | if (line == null) "" 34 | else line 35 | } 36 | } { in => IO (in.close()) } 37 | } 38 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/training_data_ingestion/src/main/scala/com/lightbend/ad/training/package.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.ad.training 2 | 3 | import java.nio.charset.Charset 4 | 5 | package object ingestion { 6 | final val CHARSET = Charset.forName("UTF-8") 7 | 8 | implicit def asFiniteDuration(d: java.time.Duration) = 9 | scala.concurrent.duration.Duration.fromNanos(d.toNanos) 10 | } 11 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/training_model_publish/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | kafka { 2 | brokers = "kafka-0-broker.kafka.autoip.dcos.thisdcos.directory:1025" 3 | brokers = ${?KAFKA_BROKERS} 4 | 5 | modeltopic = "intelModel" 6 | modelgroup = "IntelModelGroup" 7 | } 8 | 9 | model { 10 | pbFileName = "data/model.pb" 11 | pbFileName = ${?MODEL_PB_FILE_NAME} 12 | 13 | attributesFileName = "data/model-attributes.properties" 14 | attributesFileName = ${?MODEL_ATTRIB_FILE_NAME} 15 | 16 | hyperparamsFileName = "data/hyperparams.properties" 17 | hyperparamsFileName = ${?HYPERPARAMS_FILE_NAME} 18 | } 19 | -------------------------------------------------------------------------------- /apps/anomaly-detection/source/core/training_model_publish/src/main/scala/com/lightbend/ad/training/publish/ModelPublisher.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.ad.training.publish 2 | 3 | import cats.effect.IO 4 | import com.typesafe.config.ConfigFactory 5 | import org.apache.kafka.clients.producer.RecordMetadata 6 | 7 | import com.lightbend.ad.kafka._ 8 | 9 | import ModelUtils._ 10 | import com.lightbend.ad.configuration.IntelConfig 11 | import IntelConfig._ 12 | 13 | object ModelPublisher { 14 | 15 | def publishToKafka(configData: IntelSettings, sender: KafkaMessageSender): IO[RecordMetadata] = { 16 | 17 | import configData._ 18 | for { 19 | 20 | md <- readModel(modelConfig.pbFileName, modelConfig.attributesFileName, modelConfig.hyperparamsFileName) 21 | metadata <- publishModelToKafka(sender, md, kafkaDataConfig.modeltopic) 22 | 23 | } yield metadata 24 | } 25 | 26 | def main(args: Array[String]): Unit = { 27 | 28 | // get config 29 | val configData = fromConfig(ConfigFactory.load()).get 30 | println(s"Starting publishing service with config: $configData") 31 | 32 | // get kafka brokers from config 33 | val kafkaBrokers = configData.kafkaDataConfig.brokers 34 | println(s"Kafka brokers found: $kafkaBrokers") 35 | 36 | // make a Kafka sender 37 | val sender = new KafkaMessageSender(kafkaBrokers) 38 | 39 | // publish to Kafka 40 | val metadata = publishToKafka(configData, sender).unsafeRunSync 41 | println(s"Metadata from Kafka [topic: ${metadata.topic}, timestamp: ${metadata.timestamp}]") 42 | 43 | () 44 | } 45 | } 46 | 47 | 48 | -------------------------------------------------------------------------------- /apps/flink/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | : ${NOOP:=} 5 | 6 | HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )" 7 | 8 | source $HERE/../../common.sh 9 | 10 | docker_task="docker" 11 | push_msg= 12 | while [[ $# -gt 0 ]] 13 | do 14 | case $1 in 15 | --push|--push-docker-images) 16 | docker_task="dockerBuildAndPush" 17 | push_msg="Pushed the docker images." 18 | ;; 19 | -v|--version*) 20 | shift 21 | VERSION=$(get_version $@) 22 | ;; 23 | *) 24 | error "Unrecognized argument $1" 25 | ;; 26 | esac 27 | shift 28 | done 29 | 30 | [[ -n $VERSION ]] || error "Version string can't be empty!" 31 | info2 "Using version $VERSION" 32 | 33 | cd ${HERE} 34 | $NOOP bats test/bin/*.bats 35 | 36 | cd ${HERE}/source/core 37 | for i in fdp-flink-ingestion fdp-flink-taxiride fdp-flink-resultprinter 38 | do 39 | $NOOP sbt -no-colors "set version in ThisBuild := \"$VERSION\"" "show version" $i/clean $i/$docker_task 40 | done 41 | 42 | echo "$PWD: built package and Docker images. $push_msg" 43 | -------------------------------------------------------------------------------- /apps/flink/helm/Chart.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: FDP_VERSION 3 | description: Flink Taxiride application 4 | name: Flink Taxiride application 5 | version: FDP_VERSION 6 | 7 | -------------------------------------------------------------------------------- /apps/flink/helm/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Flink taxiride application components ( {{ .Values.image.ingestion}}, {{ .Values.image.taxiride}} ) are installed 2 | 3 | 4 | -------------------------------------------------------------------------------- /apps/flink/helm/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "taxirideapp.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "taxirideapp.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "taxirideapp.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /apps/flink/helm/templates/taxirideingestioninstall.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | name: fdp-taxiride-ingestion 5 | labels: 6 | app: fdp-taxiride-ingestion 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: fdp-taxiride-ingestion 12 | strategy: 13 | type: RollingUpdate 14 | template: 15 | metadata: 16 | labels: 17 | app: fdp-taxiride-ingestion 18 | spec: 19 | containers: 20 | - name: taxirideingestion 21 | image: {{ .Values.image.ingestion}}:{{.Values.image.version }} 22 | imagePullPolicy: {{ .Values.image.pullPolicy }} 23 | volumeMounts: 24 | - name: datadir 25 | mountPath: {{ .Values.data.directorytomatch }} 26 | env: 27 | - name: "DIRECTORY_TO_WATCH" 28 | value: {{ .Values.data.directorytomatch }} 29 | - name: "KAFKA_BROKERS" 30 | value: {{ .Values.configuration.kafka.brokerlist}} 31 | # These container runs during pod initialization 32 | initContainers: 33 | - name: install 34 | image: busybox 35 | command: 36 | - sh 37 | - -c 38 | - wget {{ .Values.data.datadirectory }} -O {{ .Values.data.directorytomatch }}/nycTaxiRides.csv.tgz; tar xvfz {{ .Values.data.directorytomatch }}/nycTaxiRides.csv.tgz -C {{ .Values.data.directorytomatch }}; ls -l {{ .Values.data.directorytomatch }} 39 | volumeMounts: 40 | - name: datadir 41 | mountPath: {{ .Values.data.directorytomatch }} 42 | dnsPolicy: Default 43 | volumes: 44 | - name: datadir 45 | emptyDir: {} 46 | -------------------------------------------------------------------------------- /apps/flink/helm/templates/taxirideprint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | name: fdp-taxiride-print 5 | labels: 6 | app: fdp-taxiride-print 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: fdp-taxiride-print 12 | strategy: 13 | type: RollingUpdate 14 | template: 15 | metadata: 16 | labels: 17 | app: fdp-taxiride-print 18 | spec: 19 | containers: 20 | - name: taxirideingestion 21 | image: {{ .Values.image.print}}:{{.Values.image.version }} 22 | imagePullPolicy: {{ .Values.image.pullPolicy }} 23 | env: 24 | - name: "KAFKA_BROKERS" 25 | value: {{ .Values.configuration.kafka.brokerlist}} -------------------------------------------------------------------------------- /apps/flink/helm/values.yaml.template: -------------------------------------------------------------------------------- 1 | # Default values for anomaly detection chart. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | # data.zip location 6 | data: 7 | datadirectory: https://s3.eu-central-1.amazonaws.com/fdp-sample-apps-artifacts/nycTaxiRides.csv.tgz 8 | 9 | # docker image 10 | image: 11 | taxiride: lightbend/fdp-flink-taxiride 12 | ingestion: lightbend/fdp-flink-ingestion 13 | pullPolicy: Always 14 | version: FDP_VERSION 15 | 16 | # assembly jars and main classes 17 | taxiride: 18 | jar: fdp-flink-taxiride-assembly-FDP_VERSION.jar 19 | main: com.lightbend.fdp.sample.flink.app.TravelTimePrediction 20 | 21 | # Configuration parameters for an application 22 | configuration: 23 | kafka: 24 | brokerlist : broker.kafka.l4lb.thisdcos.directory:9092 25 | intopic : taxiin 26 | outtopic : taxiout 27 | flink: 28 | jm_rpc_address : ip-10-0-15-137.ec2.internal 29 | jm_rpc_port : 7199 30 | zk: 31 | url : "master.mesos:2181/dcos-service-kafka" 32 | -------------------------------------------------------------------------------- /apps/flink/manager-support/fdp-taxiride.json: -------------------------------------------------------------------------------- 1 | { 2 | "uid" : "taxiride", 3 | "name" : "Flink Taxiride application", 4 | "description" : "Flink Taxiride application", 5 | "group" : "group", 6 | "version" : "1.2.1", 7 | "defaultAdminLink" : false, 8 | "dependencies" : [ ], 9 | "optionalDeps" : [ ], 10 | "installable" : true, 11 | "resourcePath" : "file:///Users/debasishghosh/lightbend/fdp-manager/conf/charts/taxiride-1.2.1.tgz", 12 | "order" : 0 13 | } 14 | 15 | -------------------------------------------------------------------------------- /apps/flink/manager-support/fdp-taxiride.options.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | datadirectory: '{{ data|datadirectory }}' 3 | image: 4 | taxiride: '{{ image|taxiride }}' 5 | ingestion: '{{ image|ingestion }}' 6 | pullPolicy: '{{ image|pullPolicy }}' 7 | version: '{{ image|version }}' 8 | taxiride: 9 | jar: '{{ taxiride|jar }}' 10 | main: '{{ taxiride|main }}' 11 | configuration: 12 | kafka: 13 | brokerlist: '{{ configuration|kafka|brokerlist }}' 14 | intopic: '{{ configuration|kafka|intopic }}' 15 | outtopic: '{{ configuration|kafka|outtopic }}' 16 | flink: 17 | jm_rpc_address: '{{ configuration|flink|jm_rpc_address }}' 18 | jm_rpc_port: !!int '{{ configuration|flink|jm_rpc_port }}' 19 | zk: 20 | url: '{{ configuration|zk|url }}' 21 | 22 | -------------------------------------------------------------------------------- /apps/flink/source/Flink-operator/project/Versions.scala: -------------------------------------------------------------------------------- 1 | object Versions { 2 | val Scala = "2.11.12" 3 | val JDK = "1.8" 4 | 5 | val reactiveKafkaVersion = "0.22" 6 | val akkaVersion = "2.5.18" 7 | val akkaHttpVersion = "10.1.5" 8 | val akkaHttpJsonVersion = "1.22.0" 9 | 10 | val kafkaVersion = "2.0.0" 11 | val Curator = "4.0.0" 12 | 13 | val flinkVersion = "1.6.2" 14 | 15 | val sparkVersion = "2.4.0" 16 | 17 | 18 | val TypesafeConfigVersion = "1.3.3" 19 | val FicusVersion = "1.4.3" 20 | } 21 | -------------------------------------------------------------------------------- /apps/flink/source/Flink-operator/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | logLevel := Level.Warn 2 | 3 | resolvers += Resolver.sonatypeRepo("releases") 4 | 5 | resolvers += "Bintray Repository" at "https://dl.bintray.com/shmishleniy/" 6 | 7 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.6") 8 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.4") 9 | addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.5.0") 10 | -------------------------------------------------------------------------------- /apps/flink/source/core/app/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /apps/flink/source/core/app/src/main/scala/com/lightbend/fdp/sample/flink/app/PredictedTimeSchema.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.fdp.sample.flink.app 2 | 3 | import com.lightbend.fdp.sample.flink.models.PredictedTime 4 | import org.apache.flink.api.common.serialization.{DeserializationSchema, SerializationSchema} 5 | import org.apache.flink.api.common.typeinfo.TypeInformation 6 | import org.apache.flink.api.java.typeutils.TypeExtractor 7 | 8 | class PredictedTimeSchema extends DeserializationSchema[PredictedTime] with SerializationSchema[PredictedTime] { 9 | override def serialize(element: PredictedTime): Array[Byte] = element.toString.getBytes 10 | 11 | override def deserialize(message: Array[Byte]): PredictedTime = PredictedTime.fromString(new String(message, "UTF-8")).get 12 | 13 | override def isEndOfStream(nextElement: PredictedTime) = false 14 | 15 | override def getProducedType(): TypeInformation[PredictedTime] = TypeExtractor.getForClass(classOf[PredictedTime]) 16 | } 17 | -------------------------------------------------------------------------------- /apps/flink/source/core/app/src/main/scala/com/lightbend/fdp/sample/flink/app/TaxiRideSchema.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.fdp.sample.flink.app 2 | 3 | import com.lightbend.fdp.sample.flink.models.TaxiRide 4 | import org.apache.flink.api.common.serialization.{DeserializationSchema, SerializationSchema} 5 | import org.apache.flink.api.common.typeinfo.TypeInformation 6 | import org.apache.flink.api.java.typeutils.TypeExtractor 7 | 8 | class TaxiRideSchema extends DeserializationSchema[TaxiRide] with SerializationSchema[TaxiRide] { 9 | override def serialize(element: TaxiRide): Array[Byte] = element.toString.getBytes 10 | 11 | override def deserialize(message: Array[Byte]): TaxiRide = TaxiRide.fromString(new String(message)) 12 | 13 | override def isEndOfStream(nextElement: TaxiRide) = false 14 | 15 | override def getProducedType: TypeInformation[TaxiRide] = TypeExtractor.getForClass(classOf[TaxiRide]) 16 | } 17 | -------------------------------------------------------------------------------- /apps/flink/source/core/ingestion/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | dcos { 2 | 3 | kafka { 4 | brokers = "localhost:9092" 5 | brokers = ${?KAFKA_BROKERS} 6 | 7 | group = "group" 8 | group = ${?KAFKA_GROUP} 9 | 10 | intopic = "taxiin" 11 | intopic = ${?KAFKA_IN_TOPIC} 12 | 13 | outtopic = "taxiout" 14 | outtopic = ${?KAFKA_OUT_TOPIC} 15 | 16 | ## settings for data ingestion 17 | loader { 18 | sourcetopic = ${dcos.kafka.intopic} 19 | sourcetopic = ${?KAFKA_IN_TOPIC} 20 | 21 | directorytowatch = "/Users/boris/Downloads/taxi" 22 | directorytowatch = ${?DIRECTORY_TO_WATCH} 23 | 24 | pollinterval = 1 second 25 | } 26 | } 27 | } 28 | 29 | -------------------------------------------------------------------------------- /apps/flink/source/core/ingestion/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /apps/flink/source/core/project/PackagingTypePlugin.scala: -------------------------------------------------------------------------------- 1 | import sbt._ 2 | 3 | object PackagingTypePlugin extends AutoPlugin { 4 | override val buildSettings = { 5 | sys.props += "packaging.type" -> "jar" 6 | Nil 7 | } 8 | } -------------------------------------------------------------------------------- /apps/flink/source/core/project/Versions.scala: -------------------------------------------------------------------------------- 1 | object Versions { 2 | val flinkVersion = "1.8.0" 3 | val scalaLoggingVersion = "3.9.0" 4 | val alpakkaFileVersion = "1.0.2" 5 | val reactiveKafkaVersion = "1.0.3" 6 | val configVersion = "1.3.4" 7 | val catsVersion = "0.9.0" 8 | val logbackVersion = "1.2.3" 9 | val jodaTimeVersion = "2.10.1" 10 | val jodaConvertVersion = "2.1.2" 11 | val JDK = "1.8" 12 | val scalaVersion = "2.11.12" 13 | } 14 | -------------------------------------------------------------------------------- /apps/flink/source/core/project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.1.6 2 | -------------------------------------------------------------------------------- /apps/flink/source/core/project/plugin.sbt: -------------------------------------------------------------------------------- 1 | resolvers += "Bintray Repository" at "https://dl.bintray.com/shmishleniy/" 2 | 3 | resolvers += "JAnalyse Repository" at "http://www.janalyse.fr/repository/" 4 | 5 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.5") 6 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.4") 7 | addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.5.0") 8 | addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.2") -------------------------------------------------------------------------------- /apps/flink/source/core/resultprinter/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | dcos { 2 | 3 | kafka { 4 | brokers = "localhost:9092" 5 | brokers = ${?KAFKA_BROKERS} 6 | 7 | group = "group" 8 | group = ${?KAFKA_GROUP} 9 | 10 | intopic = "taxiin" 11 | intopic = ${?KAFKA_IN_TOPIC} 12 | 13 | outtopic = "taxiout" 14 | outtopic = ${?KAFKA_OUT_TOPIC} 15 | 16 | ## settings for data ingestion 17 | loader { 18 | sourcetopic = ${dcos.kafka.intopic} 19 | sourcetopic = ${?KAFKA_IN_TOPIC} 20 | 21 | directorytowatch = "/Users/boris/Projects/fdp-sample-apps/flink/source/core/data/" 22 | directorytowatch = ${?DIRECTORY_TO_WATCH} 23 | 24 | pollinterval = 1 second 25 | } 26 | } 27 | } 28 | 29 | -------------------------------------------------------------------------------- /apps/flink/source/core/resultprinter/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /apps/flink/source/core/support/src/main/scala/com/lightbend/fdp/sample/flink/models/PredictedTime.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.fdp.sample.flink.models 2 | 3 | import scala.util.Try 4 | 5 | case class PredictedTime(rideId: Long, predictedTimeInMins: Int) { 6 | override def toString() = s"$rideId,$predictedTimeInMins" 7 | } 8 | 9 | object PredictedTime { 10 | def fromString(str: String): Try[PredictedTime] = Try{ 11 | val arr = str.split(",") 12 | if (arr.length < 2) throw new Exception(s"Invalid source string for deserialization $str") 13 | else try { 14 | PredictedTime(arr(0).toLong, arr(1).toInt) 15 | } catch { 16 | case t: Throwable => 17 | throw new RuntimeException(s"Invalid record: $str. Error $t") 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /apps/flink/source/fdp-flink-taxiride/project/PackagingTypePlugin.scala: -------------------------------------------------------------------------------- 1 | import sbt._ 2 | 3 | object PackagingTypePlugin extends AutoPlugin { 4 | override val buildSettings = { 5 | sys.props += "packaging.type" -> "jar" 6 | Nil 7 | } 8 | } -------------------------------------------------------------------------------- /apps/flink/source/fdp-flink-taxiride/project/Versions.scala: -------------------------------------------------------------------------------- 1 | object Versions { 2 | val flinkVersion = "1.7.1" 3 | val scalaLoggingVersion = "3.9.0" 4 | val alpakkaFileVersion = "0.20" 5 | val reactiveKafkaVersion = "1.0-RC1" 6 | val configVersion = "1.3.3" 7 | val catsVersion = "0.9.0" 8 | val logbackVersion = "1.2.3" 9 | val jodaTimeVersion = "2.10.1" 10 | val jodaConvertVersion = "2.1.2" 11 | val JDK = "1.8" 12 | val scalaVersion = "2.11.12" 13 | } 14 | -------------------------------------------------------------------------------- /apps/flink/source/fdp-flink-taxiride/project/plugin.sbt: -------------------------------------------------------------------------------- 1 | resolvers += "Bintray Repository" at "https://dl.bintray.com/shmishleniy/" 2 | 3 | resolvers += "JAnalyse Repository" at "http://www.janalyse.fr/repository/" 4 | 5 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.5") 6 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.4") 7 | addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.5.0") -------------------------------------------------------------------------------- /apps/flink/source/fdp-flink-taxiride/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /apps/flink/test/bin/app-install-test.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | @test "todo" { 4 | echo "placeholder for flink app tests" 5 | 6 | # [ $status -eq 0 ] 7 | 8 | # [[ "${lines[0]}" =~ "ERROR" ]] 9 | } 10 | -------------------------------------------------------------------------------- /apps/flink/test/support/fake.app-install.properties: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/flink/test/support/fake.app-install.properties -------------------------------------------------------------------------------- /apps/flink/test/support/fake1.app-install.properties: -------------------------------------------------------------------------------- 1 | ## dcos kafka package - valid values : confluent-kafka | kafka 2 | kafka-dcos-package=kafka 3 | 4 | ## dcos service name. beta-kafka is installed as kafka by default. default is value of kafka-dcos-package 5 | kafka-dcos-service-name=kafka 6 | 7 | ## whether to skip creation of kafka topics - valid values : true | false 8 | skip-create-topics=true 9 | 10 | ## kafka topic partition : default 1 11 | kafka-topic-partitions=2 12 | 13 | ## kafka topic replication factor : default 1 14 | kafka-topic-replication-factor=2 15 | 16 | ## security mode in cluster - valid values : strict | permissive | none 17 | security-mode=permissive 18 | 19 | -------------------------------------------------------------------------------- /apps/flink/test/support/normal.app-install.properties: -------------------------------------------------------------------------------- 1 | ## docker username in docker hub from where to download artifacts 2 | docker-username=lightbend 3 | 4 | ## dcos kafka package - valid values : kafka | confluent-kafka 5 | kafka-dcos-package=kafka 6 | 7 | ## whether to skip creation of kafka topics - valid values : true | false 8 | skip-create-topics=false 9 | 10 | ## whether to use iam roles - valid values : true | false 11 | with-iam-role=false 12 | 13 | ## kafka topic partition : default 1 14 | kafka-topic-partitions=1 15 | 16 | ## kafka topic replication factor : default 1 17 | kafka-topic-replication-factor=1 18 | 19 | ## S3 bucket to use to get Spark job artifacts 20 | s3-bucket-url=http://fdp-kdd-network-intrusion.s3.amazonaws.com 21 | 22 | -------------------------------------------------------------------------------- /apps/killrweather/Jenkinsfile: -------------------------------------------------------------------------------- 1 | 2 | pipeline { 3 | agent any 4 | 5 | stages { 6 | stage('Build') { 7 | steps { 8 | echo "Compiling..." 9 | echo "tool: ${tool name: 'sbt-1.1.0', type: 'org.jvnet.hudson.plugins.SbtPluginBuilder$SbtInstallation'}" 10 | sh "${tool name: 'sbt-1.1.0', type: 'org.jvnet.hudson.plugins.SbtPluginBuilder$SbtInstallation'}/bin/sbt -Dsbt.log.noformat=true clean compile" 11 | } 12 | } 13 | stage('Unit Test') { 14 | steps { 15 | echo "Testing..." 16 | sh "${tool name: 'sbt-1.1.0', type: 'org.jvnet.hudson.plugins.SbtPluginBuilder$SbtInstallation'}/bin/sbt -Dsbt.log.noformat=true test" 17 | junit "target/test-reports/*.xml" 18 | } 19 | } 20 | stage('Docker Publish') { 21 | steps { 22 | echo "TODO: Docker Publish stage" 23 | // Run the Docker tool to build the image 24 | script { 25 | sh "${tool name: 'sbt-1.1.0', type: 'org.jvnet.hudson.plugins.SbtPluginBuilder$SbtInstallation'}/bin/sbt -Dsbt.log.noformat=true dockerBuildAndPush" 26 | } 27 | } 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /apps/killrweather/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | : ${NOOP:=} 5 | 6 | HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )" 7 | 8 | source $HERE/../../common.sh 9 | 10 | docker_task="docker" 11 | push_msg= 12 | while [[ $# -gt 0 ]] 13 | do 14 | case $1 in 15 | --push|--push-docker-images) 16 | docker_task="dockerBuildAndPush" 17 | push_msg="Pushed the docker images." 18 | ;; 19 | -v|--version*) 20 | shift 21 | VERSION=$(get_version $@) 22 | ;; 23 | *) 24 | error "Unrecognized argument $1" 25 | ;; 26 | esac 27 | shift 28 | done 29 | 30 | [[ -n $VERSION ]] || error "Version string can't be empty!" 31 | info2 "Using version $VERSION" 32 | 33 | cd ${HERE}/source/core 34 | $NOOP sbt -no-colors "set version in ThisBuild := \"$VERSION\"" "show version" clean package $docker_task 35 | 36 | echo "$PWD: built package and Docker images. $push_msg" 37 | -------------------------------------------------------------------------------- /apps/killrweather/data/load/ny-2008.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/data/load/ny-2008.csv.gz -------------------------------------------------------------------------------- /apps/killrweather/data/load/ny-sf-2008.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/data/load/ny-sf-2008.csv.gz -------------------------------------------------------------------------------- /apps/killrweather/data/load/sf-2008.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/data/load/sf-2008.csv.gz -------------------------------------------------------------------------------- /apps/killrweather/data/load/sfo-nyc-mia-lax-chi-2008-2014.csv.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/data/load/sfo-nyc-mia-lax-chi-2008-2014.csv.zip -------------------------------------------------------------------------------- /apps/killrweather/data/use-with-cassandra-2.0.11-or-2.1.1.cql: -------------------------------------------------------------------------------- 1 | // This will use the new compaction strategy DateTieredCompactionStrategy (DTCS) 2 | // available in Cassandra 2.0.11 and 2.1.1 3 | // Use this alter table before loading data. If used 4 | // after, use the nodetool updatesstables command to re-write your sstables 5 | 6 | use isd_weather_data; 7 | 8 | alter TABLE raw_weather_data with compaction = {'class': 'DateTieredCompactionStrategy'}; 9 | 10 | -------------------------------------------------------------------------------- /apps/killrweather/diagrams/Architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/diagrams/Architecture.png -------------------------------------------------------------------------------- /apps/killrweather/diagrams/Asynchronous_Data_Ingestion_External.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/diagrams/Asynchronous_Data_Ingestion_External.png -------------------------------------------------------------------------------- /apps/killrweather/diagrams/Asynchronous_Data_Ingestion_Internal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/diagrams/Asynchronous_Data_Ingestion_Internal.png -------------------------------------------------------------------------------- /apps/killrweather/diagrams/Asynchronous_Fault_Tolerant_Data_Pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/diagrams/Asynchronous_Fault_Tolerant_Data_Pipeline.png -------------------------------------------------------------------------------- /apps/killrweather/diagrams/KillrWeather.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/diagrams/KillrWeather.png -------------------------------------------------------------------------------- /apps/killrweather/diagrams/Primary_Component_Initialization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/diagrams/Primary_Component_Initialization.png -------------------------------------------------------------------------------- /apps/killrweather/diagrams/Streaming_Data_Pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/diagrams/Streaming_Data_Pipeline.png -------------------------------------------------------------------------------- /apps/killrweather/helm-hdfs/Chart.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: FDP_VERSION 3 | description: killrweather Sample Application 4 | maintainers: 5 | - name: Fast Data Team 6 | name: killrweather Sample Application 7 | version: FDP_VERSION 8 | -------------------------------------------------------------------------------- /apps/killrweather/helm-hdfs/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Killrweather{{ if eq .Values.components.app "killrweather" }}{{else}}_structured{{ end }} and supporting applications (loader{{ if eq .Values.components.http "yes" }}, httpclient{{ end }}{{ if eq .Values.components.grpc "yes" }}, grpcclient{{ end }}) are installed 2 | -------------------------------------------------------------------------------- /apps/killrweather/helm-hdfs/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "modelserverchart.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "modelserverchart.fullname" -}} 14 | {{- $name := default .Chart.Name .Values.nameOverride -}} 15 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 16 | {{- end -}} 17 | -------------------------------------------------------------------------------- /apps/killrweather/helm-pvc/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /apps/killrweather/helm-pvc/Chart.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: FDP_VERSION 3 | description: killrweather Sample Application 4 | maintainers: 5 | - name: Fast Data Team 6 | name: killrweather Sample Application 7 | version: FDP_VERSION 8 | -------------------------------------------------------------------------------- /apps/killrweather/helm-pvc/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Killrweather{{ if eq .Values.components.app "killrweather" }}{{else}}_structured{{ end }} and supporting applications (loader{{ if eq .Values.components.http "yes" }}, httpclient{{ end }}{{ if eq .Values.components.grpc "yes" }}, grpcclient{{ end }}) are installed 2 | -------------------------------------------------------------------------------- /apps/killrweather/helm-pvc/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "modelserverchart.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "modelserverchart.fullname" -}} 14 | {{- $name := default .Chart.Name .Values.nameOverride -}} 15 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 16 | {{- end -}} 17 | -------------------------------------------------------------------------------- /apps/killrweather/helm-pvc/templates/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRole 3 | metadata: 4 | name: spark-role 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: ["pods" , "services", "configmaps" ] 9 | verbs: 10 | - "*" 11 | --- 12 | apiVersion: rbac.authorization.k8s.io/v1beta1 13 | kind: ClusterRoleBinding 14 | metadata: 15 | name: spark-role-default-binding 16 | subjects: 17 | - kind: ServiceAccount 18 | name: default 19 | namespace: sample 20 | roleRef: 21 | kind: ClusterRole 22 | name: spark-role 23 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /apps/killrweather/helm-spark-checkpointing-pvc-creation/Chart.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: FDP_VERSION 3 | description: Spark Checkpointing Support 4 | maintainers: 5 | - name: Fast Data Team 6 | name: Spark Checkpointing Support 7 | version: FDP_VERSION 8 | -------------------------------------------------------------------------------- /apps/killrweather/helm-spark-checkpointing-pvc-creation/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | PVC for Spark checkpointing with name {{ .Values.pvc.name}}, size {{ .Values.pvc.size}}, type {{ .Values.pvc.type}} is created -------------------------------------------------------------------------------- /apps/killrweather/helm-spark-checkpointing-pvc-creation/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "checkpointingchart.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "checkpointingchart.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "checkpointingchart.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /apps/killrweather/helm-spark-checkpointing-pvc-creation/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | annotations: 5 | volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/glusterfs 6 | name: {{ .Values.pvc.name}} 7 | namespace: default 8 | spec: 9 | accessModes: 10 | - ReadWriteMany 11 | resources: 12 | requests: 13 | storage: {{ .Values.pvc.size}} 14 | storageClassName: {{ .Values.pvc.type}} -------------------------------------------------------------------------------- /apps/killrweather/helm-spark-checkpointing-pvc-creation/values-metadata.yaml: -------------------------------------------------------------------------------- 1 | pvc: 2 | __metadata: 3 | label: "Information about pvc" 4 | name: 5 | __metadata: 6 | label: "PVC name" 7 | description: "PVC name" 8 | type: "string" 9 | required: true 10 | size: 11 | __metadata: 12 | label: "PVC size" 13 | description: "PVC size" 14 | type: "string" 15 | required: true 16 | type: 17 | __metadata: 18 | label: "PVC storage class" 19 | description: "PVC storage class" 20 | type: "string" 21 | required: true 22 | -------------------------------------------------------------------------------- /apps/killrweather/process-templates.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )" 6 | cd $HERE 7 | 8 | . "$HERE/../../version.sh" 9 | 10 | # The only allowed argument is the optional version string 11 | [[ $# -gt 0 ]] && VERSION=$1 12 | echo "$0: Using version $VERSION" 13 | 14 | function process_templates { 15 | for t in "$@" 16 | do 17 | if [[ -f "$t" ]] 18 | then 19 | echo " Processing template: $t" 20 | file=${t%.template} 21 | cat "$t" | sed -e "s/FDP_VERSION/$VERSION/g" > "$file" 22 | fi 23 | done 24 | } 25 | 26 | # Find the template files and change the version, generating the corresponding file. 27 | process_templates *.template 28 | # Ignore templates that end up in target directories: 29 | find "$HERE" -path '*/target' -prune -o -name '*.template' | while read f 30 | do 31 | process_templates "$f" 32 | done 33 | 34 | -------------------------------------------------------------------------------- /apps/killrweather/project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.1.6 2 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app-local/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | include "local.conf" 2 | 3 | spark { 4 | serializer = "org.apache.spark.serializer.KryoSerializer" 5 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app-local/src/main/resources/cluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "broker.kafka.l4lb.thisdcos.directory:9092" 4 | brokers = ${?KAFKA_BROKERS} 5 | } 6 | 7 | spark { 8 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory, node-1-server.cassandra.autoip.dcos.thisdcos.directory, node-2-server.cassandra.autoip.dcos.thisdcos.directory" 9 | cassandra.connection.host = ${?CASSANDRA_HOSTS} 10 | } 11 | 12 | streaming{ 13 | batchInterval = "4 second" 14 | batchInterval = ${?SPARK_BATCH_INTERVAL} 15 | checkpointDir = "/usr/checkpoint/data/" 16 | checkpointDir = ${?CHECKPOINT_DIRECTORY} 17 | } 18 | 19 | loader{ 20 | publish_interval = 1 second 21 | publish_interval = ${?PUBLISH_INTERVAL} 22 | data_dir = "/usr/share/data/load/" 23 | data_dir = ${?DATA_DIRECTORY} 24 | batch_size = 10 25 | batch_size = ${?DATA_BATCH_SIZE} 26 | } 27 | 28 | influx { 29 | enabled = true 30 | server = "http://influxdb.marathon.l4lb.thisdcos.directory" 31 | server = ${?INFLUXDB_HOST} 32 | port = 8086 33 | port = ${?INFLUXDB_PORT} 34 | } 35 | 36 | grafana { 37 | server = "grafana.marathon.l4lb.thisdcos.directory" 38 | server = ${?GRAFANA_HOST} 39 | port = 3000 40 | port =${?GRAFANA_PORT} 41 | } 42 | 43 | usingCluster = "from cluster.conf" 44 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app-local/src/main/resources/grafana-source.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "weather", 3 | "type": "influxdb", 4 | "typeLogoUrl": "public/app/plugins/datasource/influxdb/img/influxdb_logo.svg", 5 | "access": "proxy", 6 | "url": "http://influxdb.marathon.l4lb.thisdcos.directory:8086", 7 | "password": "root", 8 | "user": "root", 9 | "database": "weather", 10 | "basicAuth": true, 11 | "isDefault": false, 12 | "jsonData": {} 13 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app-local/src/main/resources/local.conf: -------------------------------------------------------------------------------- 1 | # Configuration entries in this stanza are passed directly to the spark configuration builder 2 | spark { 3 | master = "local[4]" 4 | 5 | cassandra { 6 | connection.host = "172.17.0.2" 7 | } 8 | } 9 | 10 | kafka { 11 | brokers = "172.17.0.3:9092" 12 | } 13 | 14 | grafana { 15 | server = "localhost" 16 | port = 3000 17 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app-local/src/main/resources/localWithCluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "10.0.9.234:1025" 4 | } 5 | 6 | spark { 7 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory, node-1-server.cassandra.autoip.dcos.thisdcos.directory, node-2-server.cassandra.autoip.dcos.thisdcos.directory" 8 | } 9 | 10 | influx { 11 | enabled = true 12 | server = "http://10.0.4.61" 13 | port = 18559 14 | } 15 | 16 | grafana { 17 | server = "10.0.4.61" 18 | port = 20749 19 | } 20 | 21 | streaming{ 22 | batchInterval = "4 second" 23 | checkpointDir = "./checkpoints/" 24 | checkpointDir = ${?CHECKPOINT_DIRECTORY} 25 | } 26 | 27 | usingCluster = "from cluster.conf" 28 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | include "local.conf" 2 | 3 | spark { 4 | serializer = "org.apache.spark.serializer.KryoSerializer" 5 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app/src/main/resources/cluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "broker.kafka.l4lb.thisdcos.directory:9092" 4 | brokers = ${?KAFKA_BROKERS} 5 | } 6 | 7 | spark { 8 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory, node-1-server.cassandra.autoip.dcos.thisdcos.directory, node-2-server.cassandra.autoip.dcos.thisdcos.directory" 9 | cassandra.connection.host = ${?CASSANDRA_HOSTS} 10 | } 11 | 12 | streaming{ 13 | batchInterval = "4 second" 14 | batchInterval = ${?SPARK_BATCH_INTERVAL} 15 | checkpointDir = "/usr/checkpoint/data/" 16 | checkpointDir = ${?CHECKPOINT_DIRECTORY} 17 | } 18 | 19 | loader{ 20 | publish_interval = 1 second 21 | publish_interval = ${?PUBLISH_INTERVAL} 22 | data_dir = "/usr/share/data/load/" 23 | data_dir = ${?DATA_DIRECTORY} 24 | batch_size = 10 25 | batch_size = ${?DATA_BATCH_SIZE} 26 | } 27 | 28 | influx { 29 | enabled = true 30 | server = "http://influxdb.marathon.l4lb.thisdcos.directory" 31 | server = ${?INFLUXDB_HOST} 32 | port = 8086 33 | port = ${?INFLUXDB_PORT} 34 | } 35 | 36 | grafana { 37 | server = "grafana.marathon.l4lb.thisdcos.directory" 38 | server = ${?GRAFANA_HOST} 39 | port = 3000 40 | port =${?GRAFANA_PORT} 41 | } 42 | 43 | usingCluster = "from cluster.conf" 44 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app/src/main/resources/grafana-source.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "weather", 3 | "type": "influxdb", 4 | "typeLogoUrl": "public/app/plugins/datasource/influxdb/img/influxdb_logo.svg", 5 | "access": "proxy", 6 | "url": "http://influxdb.marathon.l4lb.thisdcos.directory:8086", 7 | "password": "root", 8 | "user": "root", 9 | "database": "weather", 10 | "basicAuth": true, 11 | "isDefault": false, 12 | "jsonData": {} 13 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app/src/main/resources/local.conf: -------------------------------------------------------------------------------- 1 | # Configuration entries in this stanza are passed directly to the spark configuration builder 2 | spark { 3 | master = "local[4]" 4 | 5 | cassandra { 6 | connection.host = "172.17.0.2" 7 | } 8 | } 9 | 10 | kafka { 11 | brokers = "172.17.0.3:9092" 12 | } 13 | 14 | grafana { 15 | server = "localhost" 16 | port = 3000 17 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app/src/main/resources/localWithCluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "10.0.9.234:1025" 4 | } 5 | 6 | spark { 7 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory, node-1-server.cassandra.autoip.dcos.thisdcos.directory, node-2-server.cassandra.autoip.dcos.thisdcos.directory" 8 | } 9 | 10 | influx { 11 | enabled = true 12 | server = "http://10.0.4.61" 13 | port = 18559 14 | } 15 | 16 | grafana { 17 | server = "10.0.4.61" 18 | port = 20749 19 | } 20 | 21 | streaming{ 22 | batchInterval = "4 second" 23 | checkpointDir = "./checkpoints/" 24 | checkpointDir = ${?CHECKPOINT_DIRECTORY} 25 | } 26 | 27 | usingCluster = "from cluster.conf" 28 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app/src/main/scala/com/lightbend/killrweather/app/cassandra/CassandraSetup.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.app.cassandra 2 | 3 | import com.datastax.spark.connector.cql.CassandraConnector 4 | import org.apache.spark.SparkConf 5 | 6 | import scala.collection.mutable.ListBuffer 7 | import scala.io.Source 8 | 9 | class CassandraSetup(sparkConf: SparkConf) { 10 | 11 | val connector = CassandraConnector.apply(sparkConf) 12 | 13 | def setup(file: String = "/create-timeseries.cql"): Unit = { 14 | 15 | val commands = readFile(file) 16 | connector.withSessionDo { session => 17 | commands.foreach(command => session.execute(command)) 18 | } 19 | } 20 | 21 | def readFile(name: String): ListBuffer[String] = { 22 | val commands = new ListBuffer[String] 23 | val command = StringBuilder.newBuilder 24 | for (line <- Source.fromInputStream(getClass.getResourceAsStream(name)).getLines) { 25 | if (command.length > 0 || line.toUpperCase().startsWith("CREATE") || line.toUpperCase.startsWith("USE")) { 26 | val code = line.split("//") 27 | command.append(code(0)) 28 | } 29 | if (line.endsWith(";") && command.length > 0) { 30 | commands.append(command.toString()) 31 | command.clear() 32 | } 33 | } 34 | commands 35 | } 36 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app_structured/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | include "local.conf" 2 | 3 | # Configuration of Kafka broker to subscribe for events 4 | kafka { 5 | # Override group information for this application 6 | group = "killrweather.group.structured" 7 | } 8 | 9 | spark { 10 | serializer = "org.apache.spark.serializer.KryoSerializer" 11 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app_structured/src/main/resources/cluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "broker.kafka.l4lb.thisdcos.directory:9092" // for DC/OS - only works in the cluster! 4 | brokers = ${?KAFKA_BROKERS} 5 | } 6 | 7 | spark { 8 | # Cassandra cluster address 9 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory" 10 | cassandra.connection.host = ${?CASSANDRA_HOSTS} 11 | } 12 | 13 | influx { 14 | server = "http://influxdb.marathon.l4lb.thisdcos.directory" 15 | server = ${?INFLUXDB_HOST} 16 | port = 8086 17 | port = ${?INFLUXDB_PORT} 18 | } 19 | 20 | grafana { 21 | server = "grafana.marathon.l4lb.thisdcos.directory" 22 | server = ${?GRAFANA_HOST} 23 | port = 3000 24 | port =${?GRAFANA_PORT} 25 | } 26 | 27 | usingCluster = "from cluster.conf" 28 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app_structured/src/main/resources/grafana-source.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "weather", 3 | "type": "influxdb", 4 | "typeLogoUrl": "public/app/plugins/datasource/influxdb/img/influxdb_logo.svg", 5 | "access": "proxy", 6 | "url": "http://influxdb.marathon.l4lb.thisdcos.directory:8086", 7 | "password": "root", 8 | "user": "root", 9 | "database": "weather", 10 | "basicAuth": true, 11 | "isDefault": false, 12 | "jsonData": {} 13 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app_structured/src/main/resources/local.conf: -------------------------------------------------------------------------------- 1 | # Configuration for a local running test env 2 | # Configuration entries in this stanza are passed directly to the spark configuration builder 3 | spark { 4 | master = "local[4]" 5 | 6 | cassandra { 7 | connection.host = "172.17.0.2" 8 | } 9 | } 10 | 11 | kafka { 12 | brokers = "172.17.0.3:9092" 13 | } 14 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app_structured/src/main/scala/com/lightbend/killrweather/app/structured/cassandra/CassandraSetup.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.app.structured.cassandra 2 | 3 | import com.datastax.spark.connector.cql.CassandraConnector 4 | import org.apache.spark.sql.SparkSession 5 | 6 | import scala.collection.mutable.ListBuffer 7 | import scala.io.Source 8 | 9 | class CassandraSetup(sparkSession: SparkSession) { 10 | 11 | val connector = CassandraConnector.apply(sparkSession.sparkContext.getConf) 12 | 13 | def setup(file: String = "/create-timeseries.cql"): Unit = { 14 | 15 | val commands = readFile(file) 16 | connector.withSessionDo { session => 17 | commands.foreach(command => session.execute(command)) 18 | } 19 | } 20 | 21 | def readFile(name: String): ListBuffer[String] = { 22 | val commands = new ListBuffer[String] 23 | val command = StringBuilder.newBuilder 24 | for (line <- Source.fromInputStream(getClass.getResourceAsStream(name)).getLines) { 25 | if (command.length > 0 || line.toUpperCase().startsWith("CREATE") || line.toUpperCase.startsWith("USE")) { 26 | val code = line.split("//") 27 | command.append(code(0)) 28 | } 29 | if (line.endsWith(";") && command.length > 0) { 30 | commands.append(command.toString()) 31 | command.clear() 32 | } 33 | } 34 | commands 35 | } 36 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app_structured/src/main/scala/com/lightbend/killrweather/app/structured/cassandra/streaming/CassandraSinkProvider.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.app.structured.cassandra.streaming 2 | 3 | import org.apache.spark.sql.sources.StreamSinkProvider 4 | import org.apache.spark.sql.streaming.OutputMode 5 | import org.apache.spark.sql.SQLContext 6 | 7 | /** 8 | * From Holden Karau's High Performance Spark 9 | * https://github.com/holdenk/spark-structured-streaming-ml/blob/master/src/main/scala/com/high-performance-spark-examples/structuredstreaming/CustomSink.scala#L66 10 | * 11 | */ 12 | class CassandraRawSinkProvider extends StreamSinkProvider { 13 | override def createSink( 14 | sqlContext: SQLContext, 15 | parameters: Map[String, String], 16 | partitionColumns: Seq[String], 17 | outputMode: OutputMode 18 | ): CassandraRawSink = { 19 | new CassandraRawSink(sqlContext) 20 | } 21 | } 22 | 23 | class CassandraDailySinkProvider extends StreamSinkProvider { 24 | override def createSink( 25 | sqlContext: SQLContext, 26 | parameters: Map[String, String], 27 | partitionColumns: Seq[String], 28 | outputMode: OutputMode 29 | ): CassandraDailySink = { 30 | new CassandraDailySink(sqlContext) 31 | } 32 | } 33 | 34 | class CassandraMonthlySinkProvider extends StreamSinkProvider { 35 | override def createSink( 36 | sqlContext: SQLContext, 37 | parameters: Map[String, String], 38 | partitionColumns: Seq[String], 39 | outputMode: OutputMode 40 | ): CassandraMonthlySink = { 41 | new CassandraMonthlySink(sqlContext) 42 | } 43 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-app_structured/src/main/scala/com/lightbend/killrweather/app/structured/influxDB/streaming/InfluxDBSinkProvider.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.app.structured.influxDB.streaming 2 | 3 | import org.apache.spark.sql.SQLContext 4 | import org.apache.spark.sql.sources.StreamSinkProvider 5 | import org.apache.spark.sql.streaming.OutputMode 6 | 7 | class InfluxDBRawSinkProvider extends StreamSinkProvider { 8 | override def createSink( 9 | sqlContext: SQLContext, 10 | parameters: Map[String, String], 11 | partitionColumns: Seq[String], 12 | outputMode: OutputMode 13 | ): InfluxDBRawSink = { 14 | new InfluxDBRawSink(sqlContext) 15 | } 16 | } 17 | 18 | class InfluxDBDailySinkProvider extends StreamSinkProvider { 19 | override def createSink( 20 | sqlContext: SQLContext, 21 | parameters: Map[String, String], 22 | partitionColumns: Seq[String], 23 | outputMode: OutputMode 24 | ): InfluxDBDailySink = { 25 | new InfluxDBDailySink(sqlContext) 26 | } 27 | } 28 | 29 | class InfluxDBMonthlySinkProvider extends StreamSinkProvider { 30 | override def createSink( 31 | sqlContext: SQLContext, 32 | parameters: Map[String, String], 33 | partitionColumns: Seq[String], 34 | outputMode: OutputMode 35 | ): InfluxDBRMonthlySink = { 36 | new InfluxDBRMonthlySink(sqlContext) 37 | } 38 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-beam/src/main/resources/grafana-source.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "weather", 3 | "type": "influxdb", 4 | "typeLogoUrl": "public/app/plugins/datasource/influxdb/img/influxdb_logo.svg", 5 | "access": "proxy", 6 | "url": "http://influx-db.marathon.l4lb.thisdcos.directory:8086", 7 | "password": "root", 8 | "user": "root", 9 | "database": "weather", 10 | "basicAuth": true, 11 | "isDefault": false, 12 | "jsonData": {} 13 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-beam/src/main/scala/com/lightbend/killrweater/beam/cassandra/CassandraSetup.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweater.beam.cassandra 2 | 3 | import com.datastax.driver.core.Session 4 | 5 | import scala.collection.mutable.ListBuffer 6 | import scala.io.Source 7 | 8 | // Some info on Cassandra can be found here http://www.baeldung.com/cassandra-with-java 9 | 10 | object CassandraSetup { 11 | 12 | def setup(session: Session, file: String = "/create-timeseries.cql"): Unit = { 13 | 14 | readFile(file).foreach(command => session.execute(command)) 15 | } 16 | 17 | def readFile(name: String): ListBuffer[String] = { 18 | val commands = new ListBuffer[String] 19 | val command = StringBuilder.newBuilder 20 | for (line <- Source.fromInputStream(getClass.getResourceAsStream(name)).getLines) { 21 | if (command.length > 0 || line.toUpperCase().startsWith("CREATE") || line.toUpperCase.startsWith("USE")) { 22 | val code = line.split("//") 23 | command.append(code(0)) 24 | } 25 | if (line.endsWith(";") && command.length > 0) { 26 | commands.append(command.toString()) 27 | command.clear() 28 | } 29 | } 30 | commands 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-beam/src/main/scala/com/lightbend/killrweater/beam/grafana/GrafanaSetup.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweater.beam.grafana 2 | 3 | import scalaj.http.Http 4 | import scala.io.Source 5 | 6 | class GrafanaSetup(port: Int = 3000, host: String = "grafana.marathon.l4lb.thisdcos.directory", user: String = "admin", password: String = "admin") { 7 | 8 | val datasource = s"http://$host:$port/api/datasources" 9 | val dashboard = s"http://$host:$port/api/dashboards/db" 10 | 11 | def setGrafana(): Unit = { 12 | 13 | import GrafanaSetup._ 14 | 15 | // Set data source 16 | val dt = getData(dsfile) 17 | Http(datasource).auth(user, password).postData(dt).header("content-type", "application/json").asString 18 | // set dashboard 19 | val dash = getData(dashfile) 20 | val _ = Http(dashboard).auth(user, password).postData(dash).header("content-type", "application/json").asString 21 | } 22 | } 23 | 24 | object GrafanaSetup { 25 | val dsfile = "/grafana-source.json" 26 | val dashfile = "/grafana-dashboard.json" 27 | 28 | def getData(name: String): String = { 29 | val stream = getClass.getResourceAsStream(name) 30 | val data = Source.fromInputStream(stream).getLines.mkString 31 | stream.close() 32 | data 33 | } 34 | 35 | def main(args: Array[String]): Unit = { 36 | 37 | val client = new GrafanaSetup(27359) 38 | client.setGrafana() 39 | } 40 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-beam/src/main/scala/com/lightbend/killrweater/beam/kafka/KafkaOptions.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweater.beam.kafka 2 | 3 | import org.apache.beam.runners.direct.DirectOptions 4 | import org.apache.beam.sdk.options.{Default, Description} 5 | 6 | trait KafkaOptions extends DirectOptions { 7 | 8 | @Description("The Kafka topic to read data from") 9 | @Default.String("killrweather.raw") def getKafkaDataTopic: String 10 | 11 | def setKafkaDataTopic(value: String): Unit 12 | 13 | @Description("The Kafka Broker to read from") 14 | @Default.String("localhost:9092") def getBroker: String 15 | 16 | def setBroker(value: String): Unit 17 | 18 | @Description("The Data Reading groupId") 19 | @Default.String("killrweather.group") def getDataGroup: String 20 | 21 | def setDataGroup(value: String): Unit 22 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-beam/src/main/scala/com/lightbend/killrweater/beam/processors/CassandraTransformFn.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweater.beam.processors 2 | 3 | import org.apache.beam.sdk.transforms.DoFn 4 | import org.apache.beam.sdk.transforms.DoFn.ProcessElement 5 | import org.apache.beam.sdk.values.KV 6 | 7 | class CassandraTransformFn[InputT, OutputT](convertData : KV[String, InputT] => OutputT) extends DoFn[KV[String, InputT], OutputT]{ 8 | 9 | @ProcessElement 10 | def processElement(ctx: DoFn[KV[String, InputT], OutputT]#ProcessContext): Unit = { 11 | ctx.output(convertData(ctx.element())) 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-beam/src/main/scala/com/lightbend/killrweater/beam/processors/ConvertDataRecordFn.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweater.beam.processors 2 | 3 | import com.lightbend.killrweather.WeatherClient.WeatherRecord 4 | import org.apache.beam.sdk.transforms.DoFn 5 | import org.apache.beam.sdk.transforms.DoFn.ProcessElement 6 | import org.apache.beam.sdk.values.KV 7 | 8 | import com.lightbend.killrweater.beam.data.RawWeatherData 9 | 10 | 11 | class ConvertDataRecordFn extends DoFn[KV[Array[Byte], Array[Byte]], KV[String, RawWeatherData]]{ 12 | 13 | @ProcessElement 14 | def processElement(ctx: DoFn[KV[Array[Byte], Array[Byte]], KV[String, RawWeatherData]]#ProcessContext) : Unit = { 15 | // Unmarshall record and convert it to BeamRecord 16 | try { 17 | val record = WeatherRecord.parseFrom(ctx.element.getValue) 18 | ctx.output(KV.of(record.wsid, RawWeatherData(record))) 19 | } 20 | catch { 21 | case t: Throwable => 22 | println(s"Error parsing weather Record ${t.printStackTrace()}") 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-beam/src/main/scala/com/lightbend/killrweater/beam/processors/SimplePrintFn.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweater.beam.processors 2 | 3 | import org.apache.beam.sdk.transforms.DoFn 4 | import org.apache.beam.sdk.transforms.DoFn.ProcessElement 5 | import org.apache.beam.sdk.values.KV 6 | 7 | 8 | class SimplePrintFn[T](msg : String) extends DoFn[KV[String, T], KV[String, T]]{ 9 | 10 | @ProcessElement 11 | def processElement(ctx: DoFn[KV[String, T], KV[String, T]]#ProcessContext) : Unit = { 12 | println(s"$msg : key ${ctx.element.getKey} value ${ctx.element.getValue}") 13 | ctx.output(KV.of(ctx.element().getKey, ctx.element().getValue)) 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-core/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | [%level] [%date{ISO8601}] [%logger]: %msg%n 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-core/src/main/scala/com/lightbend/killrweather/kafka/RecordProcessorTrait.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.kafka 2 | 3 | import org.apache.kafka.clients.consumer.ConsumerRecord 4 | 5 | trait RecordProcessorTrait[K, V] { 6 | 7 | def processRecord(record: ConsumerRecord[K, V]): Unit 8 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-core/src/main/scala/com/lightbend/killrweather/utils/DailyPrecipitation.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.utils 2 | 3 | /** 4 | * Created by boris on 7/19/17. 5 | */ 6 | case class DailyPrecipitation( 7 | wsid: String, 8 | year: Int, 9 | month: Int, 10 | day: Int, 11 | precipitation: Double 12 | ) extends Serializable 13 | object DailyPrecipitation { 14 | def apply(daily: DailyWeatherData): DailyPrecipitation = 15 | new DailyPrecipitation(daily.wsid, daily.year, daily.month, daily.day, daily.precip) 16 | } 17 | 18 | case class MonthlyPrecipitation( 19 | wsid: String, 20 | year: Int, 21 | month: Int, 22 | high: Double, 23 | low: Double, 24 | mean: Double, 25 | variance: Double, 26 | stdev: Double 27 | ) extends Serializable 28 | object MonthlyPrecipitation { 29 | def apply(monthly: MonthlyWeatherData): MonthlyPrecipitation = 30 | new MonthlyPrecipitation(monthly.wsid, monthly.year, monthly.month, monthly.highPrecip, monthly.lowPrecip, 31 | monthly.meanPrecip, monthly.variancePrecip, monthly.stdevPrecip) 32 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-core/src/main/scala/com/lightbend/killrweather/utils/DailyPressure.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.utils 2 | 3 | /** 4 | * Created by boris on 7/19/17. 5 | */ 6 | case class DailyPressure( 7 | wsid: String, 8 | year: Int, 9 | month: Int, 10 | day: Int, 11 | high: Double, 12 | low: Double, 13 | mean: Double, 14 | variance: Double, 15 | stdev: Double 16 | ) extends Serializable 17 | 18 | object DailyPressure { 19 | def apply(daily: DailyWeatherData): DailyPressure = 20 | new DailyPressure(daily.wsid, daily.year, daily.month, daily.day, daily.highPressure, daily.lowPressure, 21 | daily.meanPressure, daily.variancePressure, daily.stdevPressure) 22 | } 23 | 24 | case class MonthlyPressure( 25 | wsid: String, 26 | year: Int, 27 | month: Int, 28 | high: Double, 29 | low: Double, 30 | mean: Double, 31 | variance: Double, 32 | stdev: Double 33 | ) extends Serializable 34 | 35 | object MonthlyPressure { 36 | def apply(monthly: MonthlyWeatherData): MonthlyPressure = 37 | new MonthlyPressure(monthly.wsid, monthly.year, monthly.month, monthly.highPressure, monthly.lowPressure, 38 | monthly.meanPressure, monthly.variancePressure, monthly.stdevPressure) 39 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-core/src/main/scala/com/lightbend/killrweather/utils/DailyTemperature.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.utils 2 | 3 | /** 4 | * Created by boris on 7/19/17. 5 | */ 6 | case class DailyTemperature( 7 | wsid: String, 8 | year: Int, 9 | month: Int, 10 | day: Int, 11 | high: Double, 12 | low: Double, 13 | mean: Double, 14 | variance: Double, 15 | stdev: Double 16 | ) extends Serializable 17 | object DailyTemperature { 18 | def apply(daily: DailyWeatherData): DailyTemperature = 19 | new DailyTemperature(daily.wsid, daily.year, daily.month, daily.day, daily.highTemp, daily.lowTemp, 20 | daily.meanTemp, daily.varianceTemp, daily.stdevTemp) 21 | } 22 | 23 | case class MonthlyTemperature( 24 | wsid: String, 25 | year: Int, 26 | month: Int, 27 | high: Double, 28 | low: Double, 29 | mean: Double, 30 | variance: Double, 31 | stdev: Double 32 | ) extends Serializable 33 | object MonthlyTemperature { 34 | def apply(monthly: MonthlyWeatherData): MonthlyTemperature = 35 | new MonthlyTemperature(monthly.wsid, monthly.year, monthly.month, monthly.highTemp, monthly.lowTemp, 36 | monthly.meanTemp, monthly.varianceTemp, monthly.stdevTemp) 37 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-core/src/main/scala/com/lightbend/killrweather/utils/DailyWeatherData.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.utils 2 | 3 | /** 4 | * Created by boris on 7/19/17. 5 | */ 6 | case class DailyWeatherData(wsid: String, year: Int, month: Int, day: Int, 7 | highTemp: Double, lowTemp: Double, meanTemp: Double, stdevTemp: Double, varianceTemp: Double, 8 | highWind: Double, lowWind: Double, meanWind: Double, stdevWind: Double, varianceWind: Double, 9 | highPressure: Double, lowPressure: Double, meanPressure: Double, stdevPressure: Double, variancePressure: Double, 10 | precip: Double) extends Serializable 11 | 12 | case class DailyWeatherDataProcess(wsid: String, year: Int, month: Int, temp: Double, wind: Double, 13 | pressure: Double, precip: Double) extends Serializable 14 | 15 | object DailyWeatherDataProcess { 16 | def apply(daily: DailyWeatherData): DailyWeatherDataProcess = 17 | new DailyWeatherDataProcess(daily.wsid, daily.year, daily.month, daily.meanTemp, daily.meanWind, daily.meanPressure, daily.precip) 18 | } 19 | 20 | case class MonthlyWeatherData(wsid: String, year: Int, month: Int, 21 | highTemp: Double, lowTemp: Double, meanTemp: Double, stdevTemp: Double, varianceTemp: Double, 22 | highWind: Double, lowWind: Double, meanWind: Double, stdevWind: Double, varianceWind: Double, 23 | highPressure: Double, lowPressure: Double, meanPressure: Double, stdevPressure: Double, variancePressure: Double, 24 | highPrecip: Double, lowPrecip: Double, meanPrecip: Double, stdevPrecip: Double, variancePrecip: Double) extends Serializable 25 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-core/src/main/scala/com/lightbend/killrweather/utils/DailyWindSpeed.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.utils 2 | 3 | /** 4 | * Created by boris on 7/19/17. 5 | */ 6 | case class DailyWindSpeed( 7 | wsid: String, 8 | year: Int, 9 | month: Int, 10 | day: Int, 11 | high: Double, 12 | low: Double, 13 | mean: Double, 14 | variance: Double, 15 | stdev: Double 16 | ) extends Serializable 17 | 18 | object DailyWindSpeed { 19 | def apply(daily: DailyWeatherData): DailyWindSpeed = 20 | new DailyWindSpeed(daily.wsid, daily.year, daily.month, daily.day, daily.highWind, daily.lowWind, 21 | daily.meanWind, daily.varianceWind, daily.stdevWind) 22 | } 23 | 24 | case class MonthlyWindSpeed( 25 | wsid: String, 26 | year: Int, 27 | month: Int, 28 | high: Double, 29 | low: Double, 30 | mean: Double, 31 | variance: Double, 32 | stdev: Double 33 | ) extends Serializable 34 | 35 | object MonthlyWindSpeed { 36 | def apply(monthly: MonthlyWeatherData): MonthlyWindSpeed = 37 | new MonthlyWindSpeed(monthly.wsid, monthly.year, monthly.month, monthly.highWind, monthly.lowWind, 38 | monthly.meanWind, monthly.varianceWind, monthly.stdevWind) 39 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-core/src/main/scala/com/lightbend/killrweather/utils/WeatherStation.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.utils 2 | 3 | /** 4 | * @param id Composite of Air Force Datsav3 station number and NCDC WBAN number 5 | * @param name Name of reporting station 6 | * @param countryCode 2 letter ISO Country ID // TODO restrict 7 | * @param callSign International station call sign 8 | * @param lat Latitude in decimal degrees 9 | * @param long Longitude in decimal degrees 10 | * @param elevation Elevation in meters 11 | */ 12 | case class WeatherStation( 13 | id: String, 14 | name: String, 15 | countryCode: String, 16 | callSign: String, 17 | lat: Double, 18 | long: Double, 19 | elevation: Double 20 | ) extends Serializable 21 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-core/src/main/scala/org/apache/spark/sql/catalyst.scala: -------------------------------------------------------------------------------- 1 | package org.apache.spark.sql 2 | 3 | /** 4 | * Catalyst is a library for manipulating relational query plans. All classes in catalyst are 5 | * considered an internal API to Spark SQL and are subject to change between minor releases. 6 | * This is based on https://datastax-oss.atlassian.net/browse/SPARKC-530 7 | * The Cassandra Spark Connector does not work correctly under Spark 2.3, potentially due to a change 8 | * in the reflection lock used by Spark according to richard@datastax.com. Same code does work under 9 | * Spark 2.2. As a temporary fix in my project, I added back the package object that was removed in Spark 2.3: 10 | * https://github.com/apache/spark/blob/v2.2.1/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/package.scala 11 | 12 | Assignee: Unassigned 13 | Reporter: timfpark Tim Park 14 | Votes: 1 Vote for this issue 15 | Watchers: 4 Start watching this issue 16 | Created: 07/Mar/18 6:56 AM 17 | 18 | */ 19 | package object catalyst { 20 | /** 21 | * A JVM-global lock that should be used to prevent thread safety issues when using things in 22 | * scala.reflect.*. Note that Scala Reflection API is made thread-safe in 2.11, but not yet for 23 | * 2.10.* builds. See SI-6240 for more details. 24 | */ 25 | protected[sql] object ScalaReflectionLock 26 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-core/src/test/resources/application.conf: -------------------------------------------------------------------------------- 1 | kafka { 2 | brokers = "unit-test-host" // for unit testing 3 | } 4 | 5 | grafana { 6 | server = "server" 7 | port = 3000 8 | } 9 | 10 | spark { 11 | master = "local[2]" 12 | } 13 | 14 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-grpclient/src/main/resources/cluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "broker.kafka.l4lb.thisdcos.directory:9092" // for DC/OS - only works in the cluster! 4 | brokers = ${?KAFKA_BROKERS} 5 | } 6 | 7 | spark { 8 | # Cassandra cluster address 9 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory" 10 | cassandra.connection.host = ${?CASSANDRA_HOSTS} 11 | } 12 | 13 | influx { 14 | server = "http://influxdb.marathon.l4lb.thisdcos.directory" 15 | server = ${?INFLUXDB_HOST} 16 | port = 8086 17 | port = ${?INFLUXDB_PORT} 18 | } 19 | 20 | grafana { 21 | server = "grafana.marathon.l4lb.thisdcos.directory" 22 | server = ${?GRAFANA_HOST} 23 | port = 3000 24 | port =${?GRAFANA_PORT} 25 | } 26 | 27 | usingCluster = "from cluster.conf" 28 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-grpclient/src/main/resources/localWithCluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "10.0.7.196:1025" 4 | } 5 | 6 | spark { 7 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory" 8 | } 9 | 10 | influx { 11 | server = "http://influxdb.marathon.l4lb.thisdcos.directory" 12 | port = 8086 13 | } 14 | 15 | grafana { 16 | server = "10.0.7.216" 17 | port = 23735 18 | } 19 | 20 | usingCluster = "from cluster.conf" 21 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-httpclient/src/main/resources/cluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "broker.kafka.l4lb.thisdcos.directory:9092" // for DC/OS - only works in the cluster! 4 | brokers = ${?KAFKA_BROKERS} 5 | } 6 | 7 | spark { 8 | # Cassandra cluster address 9 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory" 10 | cassandra.connection.host = ${?CASSANDRA_HOSTS} 11 | } 12 | 13 | influx { 14 | server = "http://influxdb.marathon.l4lb.thisdcos.directory" 15 | server = ${?INFLUXDB_HOST} 16 | port = 8086 17 | port = ${?INFLUXDB_PORT} 18 | } 19 | 20 | grafana { 21 | server = "grafana.marathon.l4lb.thisdcos.directory" 22 | server = ${?GRAFANA_HOST} 23 | port = 3000 24 | port =${?GRAFANA_PORT} 25 | } 26 | 27 | usingCluster = "from cluster.conf" 28 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-httpclient/src/main/resources/localWithCluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "10.0.7.196:1025" 4 | } 5 | 6 | spark { 7 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory" 8 | } 9 | 10 | influx { 11 | server = "http://influxdb.marathon.l4lb.thisdcos.directory" 12 | port = 8086 13 | } 14 | 15 | grafana { 16 | server = "10.0.7.216" 17 | port = 23735 18 | } 19 | 20 | usingCluster = "from cluster.conf" 21 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-httpclient/src/main/scala/com/lightbend/killrweather/client/http/resources/WeatherReportResource.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.client.http.resources 2 | 3 | /** 4 | * Created by boris on 7/17/17. 5 | * 6 | * based on https://github.com/DanielaSfregola/quiz-management-service/blob/master/akka-http-crud/src/main/scala/com/danielasfregola/quiz/management/resources/QuestionResource.scala 7 | */ 8 | 9 | import akka.http.scaladsl.server.Route 10 | import com.lightbend.killrweather.utils.RawWeatherData 11 | import akka.http.scaladsl.model.StatusCodes._ 12 | import com.lightbend.killrweather.client.http.routing.JSONResource 13 | import com.lightbend.killrweather.client.http.services.RequestService 14 | 15 | import scala.concurrent.ExecutionContext 16 | 17 | trait WeatherReportResource extends JSONResource { 18 | 19 | def requestRoutes(requestService: RequestService)(implicit executionContext: ExecutionContext): Route = pathPrefix("weather") { 20 | pathEnd { 21 | post { 22 | entity(as[RawWeatherData]) { request => 23 | complete(requestService.processRequest(request).map(_ => OK)) 24 | } 25 | } 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-httpclient/src/main/scala/com/lightbend/killrweather/client/http/serializers/JSONSupport.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.client.http.serializers 2 | 3 | /** 4 | * Created by boris on 7/17/17. 5 | * based on 6 | * https://github.com/DanielaSfregola/akka-tutorials/blob/master/akka-http-client/src/main/scala/com/danielasfregola/akka/tutorials/serializers/JsonSupport.scala 7 | */ 8 | import org.json4s.{ DefaultFormats, Formats } 9 | import org.json4s.native 10 | 11 | trait JsonSupport extends Json4sSupport { 12 | 13 | implicit val serialization = native.Serialization 14 | 15 | implicit def json4sFormats: Formats = DefaultFormats 16 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/main/resources/cluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "broker.kafka.l4lb.thisdcos.directory:9092" // for DC/OS - only works in the cluster! 4 | brokers = ${?KAFKA_BROKERS} 5 | } 6 | 7 | spark { 8 | # Cassandra cluster address 9 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory" 10 | cassandra.connection.host = ${?CASSANDRA_HOSTS} 11 | } 12 | 13 | influx { 14 | server = "http://influxdb.marathon.l4lb.thisdcos.directory" 15 | server = ${?INFLUXDB_HOST} 16 | port = 8086 17 | port = ${?INFLUXDB_PORT} 18 | } 19 | 20 | grafana { 21 | server = "grafana.marathon.l4lb.thisdcos.directory" 22 | server = ${?GRAFANA_HOST} 23 | port = 3000 24 | port =${?GRAFANA_PORT} 25 | } 26 | 27 | loader{ 28 | publish_interval = 1 second 29 | publish_interval = ${?PUBLISH_INTERVAL} 30 | data_dir = "/usr/share/data/load/" 31 | data_dir = ${?DATA_DIRECTORY} 32 | batch_size = 10 33 | batch_size = ${?DATA_BATCH_SIZE} 34 | } 35 | 36 | usingCluster = "from cluster.conf" 37 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/main/resources/local.conf: -------------------------------------------------------------------------------- 1 | # Configuration entries in this stanza are passed directly to the spark configuration builder 2 | spark { 3 | master = "local[4]" 4 | 5 | cassandra { 6 | connection.host = "172.17.0.2" 7 | } 8 | } 9 | 10 | kafka { 11 | brokers = "172.17.0.3:9092" 12 | } 13 | 14 | grafana { 15 | server = "localhost" 16 | port = 3000 17 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/main/resources/localWithCluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "10.0.9.234:1025" 4 | } 5 | 6 | loader{ 7 | publish_interval = 1 second 8 | data_dir = "data/load/" 9 | batch_size = 10 10 | } 11 | 12 | spark { 13 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory" 14 | } 15 | 16 | influx { 17 | server = "http://influxdb.marathon.l4lb.thisdcos.directory" 18 | port = 8086 19 | } 20 | 21 | grafana { 22 | server = "10.0.7.216" 23 | port = 23735 24 | } 25 | 26 | usingCluster = "from cluster.conf" 27 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/main/scala/com/lightbend/killrweather/loader/utils/BufferedReaderIterator.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.loader.utils 2 | 3 | import java.io.BufferedReader 4 | 5 | class BufferedReaderIterator(reader: BufferedReader) extends Iterator[String] { 6 | override def hasNext: Boolean = reader.ready() 7 | override def next: String = reader.readLine() 8 | } 9 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/main/scala/com/lightbend/killrweather/loader/utils/DataConvertor.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.loader.utils 2 | 3 | import java.io.ByteArrayOutputStream 4 | 5 | import com.lightbend.killrweather.WeatherClient.WeatherRecord 6 | import com.lightbend.killrweather.utils.RawWeatherData 7 | import org.json4s.DefaultFormats 8 | import org.json4s.jackson.Serialization.write 9 | 10 | object DataConvertor { 11 | 12 | implicit val formats = DefaultFormats 13 | private val bos = new ByteArrayOutputStream() 14 | 15 | def convertToJson(string: String): String = { 16 | val report = RawWeatherData(string.split(",")) 17 | write(report) 18 | } 19 | 20 | def convertToRecord(string: String): WeatherRecord = { 21 | val report = RawWeatherData(string.split(",")) 22 | WeatherRecord( 23 | wsid = report.wsid, 24 | year = report.year, 25 | month = report.month, 26 | day = report.day, 27 | hour = report.hour, 28 | temperature = report.temperature, 29 | dewpoint = report.dewpoint, 30 | pressure = report.pressure, 31 | windDirection = report.windDirection, 32 | windSpeed = report.windSpeed, 33 | skyCondition = report.skyCondition, 34 | skyConditionText = report.skyConditionText, 35 | oneHourPrecip = report.oneHourPrecip, 36 | sixHourPrecip = report.sixHourPrecip 37 | ) 38 | } 39 | 40 | def convertToGPB(string: String): Array[Byte] = { 41 | bos.reset 42 | convertToRecord(string).writeTo(bos) 43 | bos.toByteArray 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/main/scala/com/lightbend/killrweather/loader/utils/FileContentIterator.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.loader.utils 2 | 3 | import java.io.File 4 | import scala.io.Source 5 | 6 | trait FileContentIterator { 7 | def apply(file: File, encoding: String = "UTF-8"): Iterator[String] 8 | } 9 | 10 | object TextFileIterator extends FileContentIterator { 11 | def apply(file: java.io.File, encoding: String): Iterator[String] = Source.fromFile(file.getPath, encoding).getLines() 12 | } 13 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/main/scala/com/lightbend/killrweather/loader/utils/FilesIterator.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.loader.utils 2 | 3 | import java.io.File 4 | 5 | object FilesIterator extends FileContentIterator { 6 | def apply(file: File, encoding: String = "UTF-8"): Iterator[String] = new FilesIterator(file, encoding) 7 | } 8 | 9 | class FilesIterator(file: File, encoding: String) extends Iterator[String] { 10 | 11 | require(file.exists()) 12 | 13 | val Empty: Iterator[String] = Iterator.empty 14 | 15 | def process(file: File): Option[Iterator[String]] = { 16 | file.getName match { 17 | case name if name.endsWith("csv.gz") => Some(GzFileIterator(file, encoding)) 18 | case name if name.endsWith("csv.zip") => Some(ZipFileIterator(file, encoding)) 19 | case name if name.endsWith("csv") => Some(TextFileIterator(file, encoding)) 20 | case _ => None 21 | } 22 | } 23 | 24 | private val iterator: Iterator[String] = if (file.isDirectory) file.listFiles.flatMap(process).reduce(_ ++ _) else 25 | process(file).getOrElse(Empty) 26 | 27 | override def hasNext: Boolean = iterator.hasNext 28 | 29 | override def next: String = iterator.next 30 | } 31 | 32 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/main/scala/com/lightbend/killrweather/loader/utils/GzFileIterator.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.loader.utils 2 | 3 | import java.io.{ BufferedReader, FileInputStream, InputStreamReader } 4 | import java.util.zip.GZIPInputStream 5 | 6 | object GzFileIterator extends FileContentIterator { 7 | def apply(file: java.io.File, encoding: String) = { 8 | new BufferedReaderIterator( 9 | new BufferedReader( 10 | new InputStreamReader(new GZIPInputStream(new FileInputStream(file)), encoding) 11 | ) 12 | ) 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/main/scala/com/lightbend/killrweather/loader/utils/ZipFileIterator.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.loader.utils 2 | 3 | import java.io.{ File, BufferedReader, InputStreamReader } 4 | import java.util.zip.{ ZipEntry, ZipFile } 5 | import scala.collection.JavaConverters._ 6 | 7 | object ZipFileIterator extends FileContentIterator { 8 | def apply(file: File, encoding: String) = new ZipFileIterator(file, encoding) 9 | } 10 | 11 | class ZipFileIterator(file: File, encoding: String) extends Iterator[String] { 12 | val zipFile = new ZipFile(file) 13 | val iterator: Iterator[String] = zipFile.entries().asScala.map(entryIterator).reduce(_ ++ _) 14 | 15 | override def hasNext: Boolean = iterator.hasNext 16 | 17 | override def next: String = iterator.next() 18 | 19 | private def entryIterator(entry: ZipEntry): Iterator[String] = { 20 | new BufferedReaderIterator( 21 | new BufferedReader(new InputStreamReader(zipFile.getInputStream(entry), encoding)) 22 | ) 23 | } 24 | 25 | } 26 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/test/resources/samples/sample-1-2-csv.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/source/core/killrweather-loader/src/test/resources/samples/sample-1-2-csv.zip -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/test/resources/samples/sample1.csv: -------------------------------------------------------------------------------- 1 | time,power,temp,humidity,light,CO2,dust 2 | "2015-08-01 00:00:28",0,32,40,0,973,27.8 3 | "2015-08-01 00:00:58",0,32,40,0,973,27.09 4 | "2015-08-01 00:01:28",0,32,40,0,973,34.5 5 | "2015-08-01 00:01:58",0,32,40,0,973,28.43 6 | "2015-08-01 00:02:28",0,32,40,0,973,27.58 7 | "2015-08-01 00:02:59",0,32,40,0,971,29.35 8 | "2015-08-01 00:03:29",0,32,40,0,971,26.46 9 | "2015-08-01 00:03:59",0,32,40,0,971,23.35 10 | "2015-08-01 00:04:29",0,32,40,0,973,11.67 11 | "2015-08-01 00:04:59",0,32,40,0,975,9.06 12 | "2015-08-01 00:05:29",0,32,40,0,973,11.28 13 | "2015-08-01 00:05:59",0,32,40,0,973,22.82 14 | "2015-08-01 00:06:29",0,32,40,0,973,23.39 15 | "2015-08-01 00:06:59",0,32,40,0,975,34.68 16 | "2015-08-01 00:07:30",0,32,40,0,975,32.91 17 | "2015-08-01 00:08:00",0,32,40,0,975,25.68 18 | "2015-08-01 00:08:30",0,32,40,0,978,23.63 19 | "2015-08-01 00:09:00",0,32,40,0,975,37.32 20 | "2015-08-01 00:09:30",0,32,40,0,978,42.26 21 | "2015-08-01 00:10:00",0,32,40,0,978,26.07 22 | "2015-08-01 00:10:30",0,32,41,0,978,42.3 23 | "2015-08-01 00:11:00",0,32,40,0,978,37.36 24 | "2015-08-01 00:11:30",0,32,41,0,975,27.23 25 | "2015-08-01 00:12:01",0,32,41,0,978,18.48 26 | "2015-08-01 00:12:31",0,32,40,0,978,27.37 27 | "2015-08-01 00:13:01",0,32,40,0,975,30.05 28 | "2015-08-01 00:13:31",0,32,41,0,975,32.59 29 | "2015-08-01 00:14:01",0,32,41,0,975,22.05 30 | "2015-08-01 00:14:31",0,32,41,0,975,38.13 31 | "2015-08-01 00:15:01",0,32,41,0,975,32.14 32 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/test/resources/samples/sample2.csv: -------------------------------------------------------------------------------- 1 | time,power,temp,humidity,light,CO2,dust 2 | "2015-08-01 00:12:01",0,32,41,0,978,18.48 3 | "2015-08-01 00:12:31",0,32,40,0,978,27.37 4 | "2015-08-01 00:13:01",0,32,40,0,975,30.05 5 | "2015-08-01 00:13:31",0,32,41,0,975,32.59 6 | "2015-08-01 00:14:01",0,32,41,0,975,22.05 7 | "2015-08-01 00:14:31",0,32,41,0,975,38.13 8 | "2015-08-01 00:15:01",0,32,41,0,975,32.14 9 | "2015-08-01 00:15:31",0,32,40,0,978,19.12 10 | "2015-08-01 00:16:02",0,32,41,0,978,34.96 11 | "2015-08-01 00:16:32",0,32,41,0,978,25.36 12 | "2015-08-01 00:17:02",0,32,41,0,978,16.72 13 | "2015-08-01 00:17:32",0,32,41,0,975,24.41 14 | "2015-08-01 00:18:02",0,32,41,0,975,32.49 15 | "2015-08-01 00:18:32",0,32,41,0,978,23.14 16 | "2015-08-01 00:19:02",0,32,41,0,975,24.09 17 | "2015-08-01 00:19:32",0,32,41,0,978,29.91 18 | "2015-08-01 00:20:02",0,32,41,0,975,30.19 19 | "2015-08-01 00:20:33",0,32,41,0,975,35.13 20 | "2015-08-01 00:21:03",0,32,41,0,975,29.21 21 | "2015-08-01 00:21:33",0,32,41,0,975,23.21 22 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/test/resources/samples/sample3.csv: -------------------------------------------------------------------------------- 1 | time,power,temp,humidity,light,CO2,dust 2 | "2015-08-01 00:00:28",0,32,40,0,973,27.8 3 | "2015-08-01 00:00:58",0,32,40,0,973,27.09 -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/test/scala/com/lightbend/killrweather/loader/utils/FilesIteratorTests.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.loader.utils 2 | 3 | import org.scalatest.{ Matchers, WordSpec } 4 | import ResourceLocator._ 5 | 6 | import scala.io.Source 7 | 8 | class FilesIteratorTests extends WordSpec with Matchers { 9 | "FilesIterator" should { 10 | "iterate through all matching files in a directory" in { 11 | val resources = locate("samples") 12 | val fileData = Seq("samples/sample1.csv", "samples/sample2.csv", "samples/sample3.csv").map(locate(_)).map(Source.fromFile(_).getLines()).flatten 13 | val zipData = ZipFileIterator(locate("samples/sample-1-2-csv.zip")) 14 | val allData = fileData ++ zipData 15 | FilesIterator(resources).size should be(allData.size) 16 | } 17 | 18 | "properly consume a single file" in { 19 | val file = locate("samples/sample1.csv") 20 | val contents = Source.fromFile(file).getLines().toList 21 | 22 | (FilesIterator(file) zip contents.iterator).foreach { case (t1, t2) => t1 should be(t2) } 23 | FilesIterator(file).size should be(contents.size) 24 | } 25 | 26 | } 27 | 28 | } 29 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/test/scala/com/lightbend/killrweather/loader/utils/ResourceLocator.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.loader.utils 2 | 3 | import java.io.File 4 | 5 | object ResourceLocator { 6 | def locate(filename: String): File = { 7 | val classLoader = this.getClass.getClassLoader 8 | new File(classLoader.getResource(filename).getFile) 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/test/scala/com/lightbend/killrweather/loader/utils/TextFileIteratorTests.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.loader.utils 2 | 3 | import org.scalatest.{ Matchers, WordSpec } 4 | import ResourceLocator._ 5 | 6 | import scala.io.Source 7 | 8 | class TextFileIteratorTests extends WordSpec with Matchers { 9 | 10 | "TextFileIterator" should { 11 | "iterate through the contents of a file" in { 12 | val file = locate("samples/sample1.csv") 13 | val contents = Source.fromFile(file).getLines().toList 14 | 15 | (TextFileIterator(file) zip contents.iterator).foreach { case (t1, t2) => t1 should be(t2) } 16 | TextFileIterator(file).size should be(contents.size) 17 | } 18 | } 19 | 20 | } 21 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-loader/src/test/scala/com/lightbend/killrweather/loader/utils/ZipFileIteratorTests.scala: -------------------------------------------------------------------------------- 1 | package com.lightbend.killrweather.loader.utils 2 | 3 | import org.scalatest.{ Matchers, WordSpec } 4 | import ResourceLocator._ 5 | 6 | import scala.io.Source 7 | 8 | class ZipFileIteratorTests extends WordSpec with Matchers { 9 | "ZipFileIterator" should { 10 | "iterate through the contents of a zip file" in { 11 | val zipFile = locate("samples/sample-1-2-csv.zip") 12 | val src1 = locate("samples/sample1.csv") 13 | val src2 = locate("samples/sample2.csv") 14 | 15 | val sources = (Source.fromFile(src1).getLines() ++ Source.fromFile(src2).getLines()).toList 16 | (sources.iterator zip ZipFileIterator(zipFile)).foreach { case (l1, l2) => l1 should be(l2) } 17 | ZipFileIterator(zipFile).size should be(sources.size) 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-manager-support/fdp-killrweather.json: -------------------------------------------------------------------------------- 1 | { 2 | "id" : "id", 3 | "name" : "killrweather Sample Application", 4 | "description" : "killrweather Sample Application", 5 | "group" : "group", 6 | "version" : "0.1.0", 7 | "packageType" : "helmchart", 8 | "defaultAdminLink" : false, 9 | "dependencies" : [ ], 10 | "optionalDeps" : [ ], 11 | "installable" : true, 12 | "resourcePath" : "file:////Users/boris/Projects/killrweatherLightbend/killrweather-deployment-chart-hdfs", 13 | "order" : 0 14 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-manager-support/fdp-killrweather.options.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | datadirectory: '{{ data|datadirectory }}' 3 | image: 4 | app: '{{ image|app }}' 5 | appstructured: '{{ image|appstructured }}' 6 | loader: '{{ image|loader }}' 7 | http: '{{ image|http }}' 8 | grpc: '{{ image|grpc }}' 9 | pullPolicy: '{{ image|pullPolicy }}' 10 | version: '{{ image|version }}' 11 | app: 12 | jar: '{{ app|jar }}' 13 | main: '{{ app|main }}' 14 | structured: 15 | jar: '{{ structured|jar }}' 16 | main: '{{ structured|main }}' 17 | components: 18 | app: '{{ components|app }}' 19 | http: '{{ components|http }}' 20 | grpc: '{{ components|grpc }}' 21 | configuration: 22 | checkpointing: 23 | pvc: '{{ configuration|checkpointing|pvc }}' 24 | streaming: 25 | batchInterval: '{{ configuration|streaming|batchInterval }}' 26 | checkpointDir: '{{ configuration|streaming|checkpointDir }}' 27 | kafka: 28 | brokerlist: '{{ configuration|kafka|brokerlist }}' 29 | cassandra: 30 | hosts: '{{ configuration|cassandra|hosts }}' 31 | grafana: 32 | host: '{{ configuration|grafana|host }}' 33 | port: !!int '{{ configuration|grafana|port }}' 34 | influx: 35 | host: '{{ configuration|influx|host }}' 36 | port: !!int '{{ configuration|influx|port }}' 37 | loader: 38 | publish_interval: '{{ configuration|loader|publish_interval }}' 39 | data_mount: '{{ configuration|loader|data_mount }}' 40 | batch_size: !!int '{{ configuration|loader|batch_size }}' 41 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-structured-app-local/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | include "local.conf" 2 | 3 | spark { 4 | serializer = "org.apache.spark.serializer.KryoSerializer" 5 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-structured-app-local/src/main/resources/cluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "broker.kafka.l4lb.thisdcos.directory:9092" 4 | brokers = ${?KAFKA_BROKERS} 5 | } 6 | 7 | spark { 8 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory, node-1-server.cassandra.autoip.dcos.thisdcos.directory, node-2-server.cassandra.autoip.dcos.thisdcos.directory" 9 | cassandra.connection.host = ${?CASSANDRA_HOSTS} 10 | } 11 | 12 | influx { 13 | enabled = true 14 | server = "http://influxdb.marathon.l4lb.thisdcos.directory" 15 | server = ${?INFLUXDB_HOST} 16 | port = 8086 17 | port = ${?INFLUXDB_PORT} 18 | } 19 | 20 | grafana { 21 | server = "grafana.marathon.l4lb.thisdcos.directory" 22 | server = ${?GRAFANA_HOST} 23 | port = 3000 24 | port =${?GRAFANA_PORT} 25 | } 26 | 27 | usingCluster = "from cluster.conf" 28 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-structured-app-local/src/main/resources/grafana-source.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "weather", 3 | "type": "influxdb", 4 | "typeLogoUrl": "public/app/plugins/datasource/influxdb/img/influxdb_logo.svg", 5 | "access": "proxy", 6 | "url": "http://influxdb.marathon.l4lb.thisdcos.directory:8086", 7 | "password": "root", 8 | "user": "root", 9 | "database": "weather", 10 | "basicAuth": true, 11 | "isDefault": false, 12 | "jsonData": {} 13 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-structured-app-local/src/main/resources/local.conf: -------------------------------------------------------------------------------- 1 | # Configuration entries in this stanza are passed directly to the spark configuration builder 2 | spark { 3 | master = "local[4]" 4 | 5 | cassandra { 6 | connection.host = "172.17.0.2" 7 | } 8 | } 9 | 10 | kafka { 11 | brokers = "172.17.0.3:9092" 12 | } 13 | 14 | grafana { 15 | server = "localhost" 16 | port = 3000 17 | } -------------------------------------------------------------------------------- /apps/killrweather/source/core/killrweather-structured-app-local/src/main/resources/localWithCluster.conf: -------------------------------------------------------------------------------- 1 | # Configuration of Kafka broker to subscribe for events 2 | kafka { 3 | brokers = "10.0.9.234:1025" 4 | } 5 | 6 | spark { 7 | cassandra.connection.host = "node-0-server.cassandra.autoip.dcos.thisdcos.directory, node-1-server.cassandra.autoip.dcos.thisdcos.directory, node-2-server.cassandra.autoip.dcos.thisdcos.directory" 8 | } 9 | 10 | influx { 11 | enabled = true 12 | server = "http://10.0.4.61" 13 | port = 18559 14 | } 15 | 16 | grafana { 17 | server = "10.0.4.61" 18 | port = 20749 19 | } 20 | 21 | usingCluster = "from cluster.conf" 22 | usingCluster = ${?USE_CLUSTER_CONFIG} -------------------------------------------------------------------------------- /apps/killrweather/source/core/project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.1.6 2 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | logLevel := Level.Warn 2 | 3 | resolvers += Resolver.sonatypeRepo("releases") 4 | 5 | resolvers += "Bintray Repository" at "https://dl.bintray.com/shmishleniy/" 6 | 7 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.6") 8 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.4") 9 | addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.5.0") 10 | 11 | // Common settings plugin as a project 12 | lazy val root = project.in( file(".") ).dependsOn(RootProject(file("../../../sbt-common-settings").toURI)) 13 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/project/scalapb.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.thesamet" % "sbt-protoc" % "0.99.15") 2 | 3 | libraryDependencies += "com.trueaccord.scalapb" %% "compilerplugin" % "0.6.6" 4 | -------------------------------------------------------------------------------- /apps/killrweather/source/core/protobufs/src/main/protobuf/WeatherClient.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package com.lightbend.killrweather; 4 | 5 | service WeatherListener { 6 | // Sends a greeting 7 | rpc GetWeatherReport (WeatherRecord) returns (Reply) {} 8 | } 9 | 10 | message Reply { 11 | bool status = 1; 12 | } 13 | 14 | message WeatherRecord { 15 | string wsid = 1; 16 | int32 year = 2; 17 | int32 month =3; 18 | int32 day = 4; 19 | int32 hour = 5; 20 | double temperature = 7; 21 | double dewpoint = 8; 22 | double pressure = 9; 23 | int32 windDirection = 10; 24 | double windSpeed = 11; 25 | int32 skyCondition = 12; 26 | string skyConditionText = 13; 27 | double oneHourPrecip = 14; 28 | double sixHourPrecip = 15; 29 | } -------------------------------------------------------------------------------- /apps/killrweather/spark-checkpointing-chart.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/apps/killrweather/spark-checkpointing-chart.tgz -------------------------------------------------------------------------------- /apps/sbt-common-settings/build.sbt: -------------------------------------------------------------------------------- 1 | // Artifact 2 | organization := "lightbend" 3 | 4 | name := "sbt-common-settings" 5 | 6 | version := "0.0.1" 7 | 8 | sbtPlugin := true 9 | 10 | 11 | // Sources 12 | scalaSource in Compile := baseDirectory.value / "settings" 13 | 14 | libraryDependencies ++= Seq("se.marcuslonnberg" % "sbt-docker" % "1.5.0", "com.typesafe.sbt" % "sbt-native-packager" % "1.3.4") 15 | 16 | addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.5.0") 17 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.4") 18 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.5") 19 | -------------------------------------------------------------------------------- /apps/sbt-common-settings/project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.1.6 2 | -------------------------------------------------------------------------------- /apps/sbt-common-settings/settings/DockerPackagerSettings.scala: -------------------------------------------------------------------------------- 1 | import sbt.Keys._ 2 | import sbt._ 3 | 4 | object DockerProjectSpecificPackagerPlugin extends AutoPlugin { 5 | 6 | import sbtdocker.DockerPlugin 7 | import DockerPlugin.autoImport._ 8 | import com.typesafe.sbt.packager.archetypes.JavaAppPackaging 9 | import JavaAppPackaging.autoImport._ 10 | 11 | override def trigger = allRequirements 12 | override def requires = DockerPlugin && JavaAppPackaging 13 | 14 | // base project settings 15 | def projectBase(id: String)(base: String = id) = Project(id, base = file(base)) 16 | .settings( 17 | fork in run := true, 18 | ) 19 | 20 | // settings for a native-packager based docker project based on sbt-docker plugin 21 | def sbtdockerPackagerBase(id: String, 22 | applDir: TaskKey[sbt.File], 23 | executableScriptName: SettingKey[String], 24 | dockerBaseImage: String = "openjdk:8u151-jre")(base: String = id) = projectBase(id)(base) 25 | 26 | .enablePlugins(sbtdocker.DockerPlugin) 27 | .settings( 28 | dockerfile in docker := { 29 | val targetDir = s"/$base" 30 | 31 | new Dockerfile { 32 | from(dockerBaseImage) 33 | entryPoint(s"$targetDir/bin/${executableScriptName.value}") 34 | copy(applDir.value, targetDir) 35 | } 36 | }, 37 | 38 | // Set name for the image 39 | imageNames in docker := Seq( 40 | ImageName(namespace = Some(organization.value), 41 | repository = name.value.toLowerCase, 42 | tag = Some(version.value)) 43 | ), 44 | 45 | buildOptions in docker := BuildOptions(cache = false) 46 | ) 47 | } 48 | -------------------------------------------------------------------------------- /common.sh: -------------------------------------------------------------------------------- 1 | # Common functions used by other shell scripts in this project. 2 | # Scripts that source this file should define a help function (used by error). 3 | set -eu 4 | : ${NOOP:=} 5 | 6 | PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )" 7 | 8 | . "$PROJECT_ROOT/version.sh" 9 | 10 | # The help function that works for all the individual app build.sh scripts. The top-level 11 | # build.sh overrides it. 12 | function help { 13 | cat <&2 28 | help 1>&2 29 | exit 1 30 | } 31 | function warn { 32 | echo "WARN: $0: $@" 1>&2 33 | } 34 | # Called info2, because info is a *nix command 35 | function info2 { 36 | echo "INFO: $0: $@" 37 | } 38 | 39 | function get_version { 40 | [[ $# -eq 0 ]] || [[ -z $1 ]] && error "No value specified for the version!" 41 | echo $1 42 | } 43 | -------------------------------------------------------------------------------- /process-templates.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )" 6 | cd $HERE 7 | 8 | . ./version.sh 9 | 10 | help() { 11 | cat < "$file" 49 | fi 50 | done 51 | } 52 | 53 | echo "Processing templates:" 54 | for d in * 55 | do 56 | case $d in 57 | release|target|build-plugin) ;; # skip 58 | *) 59 | find "$d" -name '*.yaml.template' | while read f 60 | do 61 | process_templates "$f" 62 | done 63 | find "$d" -name 'training-pod.json.template' | while read f 64 | do 65 | process_templates "$f" 66 | done 67 | ;; 68 | esac 69 | done 70 | 71 | -------------------------------------------------------------------------------- /supportingcharts/cassandrachart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | # Common backup files 9 | *.swp 10 | *.bak 11 | *.tmp 12 | *~ 13 | # Various IDEs 14 | .project 15 | .idea/ 16 | *.tmproj 17 | OWNERS -------------------------------------------------------------------------------- /supportingcharts/cassandrachart/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: cassandra 2 | version: 0.9.1 3 | appVersion: 3.11.3 4 | description: Apache Cassandra is a free and open-source distributed database management 5 | system designed to handle large amounts of data across many commodity servers, providing 6 | high availability with no single point of failure. 7 | icon: https://upload.wikimedia.org/wikipedia/commons/5/5e/Cassandra_logo.svg 8 | keywords: 9 | - cassandra 10 | - database 11 | - nosql 12 | home: http://cassandra.apache.org 13 | maintainers: 14 | - name: Fast data team 15 | engine: gotpl -------------------------------------------------------------------------------- /supportingcharts/cassandrachart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "cassandra.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "cassandra.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "cassandra.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Create the name of the service account to use 36 | */}} 37 | {{- define "cassandra.serviceAccountName" -}} 38 | {{- if .Values.serviceAccount.create -}} 39 | {{ default (include "cassandra.fullname" .) .Values.serviceAccount.name }} 40 | {{- else -}} 41 | {{ default "default" .Values.serviceAccount.name }} 42 | {{- end -}} 43 | {{- end -}} -------------------------------------------------------------------------------- /supportingcharts/cassandrachart/templates/cassandra-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: cassandra-service-account 5 | -------------------------------------------------------------------------------- /supportingcharts/cassandrachart/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podDisruptionBudget -}} 2 | apiVersion: policy/v1beta1 3 | kind: PodDisruptionBudget 4 | metadata: 5 | labels: 6 | app: {{ template "cassandra.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | heritage: {{ .Release.Service }} 9 | release: {{ .Release.Name }} 10 | name: {{ template "cassandra.fullname" . }} 11 | spec: 12 | selector: 13 | matchLabels: 14 | app: {{ template "cassandra.name" . }} 15 | release: {{ .Release.Name }} 16 | {{ toYaml .Values.podDisruptionBudget | indent 2 }} 17 | {{- end -}} -------------------------------------------------------------------------------- /supportingcharts/cassandrachart/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "cassandra.name" . }} 5 | labels: 6 | app: {{ template "cassandra.name" . }} 7 | chart: {{ template "cassandra.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | clusterIP: None 12 | type: {{ .Values.service.type }} 13 | ports: 14 | - name: intra 15 | port: 7000 16 | targetPort: 7000 17 | - name: tls 18 | port: 7001 19 | targetPort: 7001 20 | - name: jmx 21 | port: 7199 22 | targetPort: 7199 23 | - name: cql 24 | port: {{ default 9042 .Values.config.ports.cql }} 25 | targetPort: {{ default 9042 .Values.config.ports.cql }} 26 | - name: thrift 27 | port: {{ default 9160 .Values.config.ports.thrift }} 28 | targetPort: {{ default 9160 .Values.config.ports.thrift }} 29 | {{- if .Values.config.ports.agent }} 30 | - name: agent 31 | port: {{ .Values.config.ports.agent }} 32 | targetPort: {{ .Values.config.ports.agent }} 33 | {{- end }} 34 | selector: 35 | app: {{ template "cassandra.name" . }} 36 | release: {{ .Release.Name }} -------------------------------------------------------------------------------- /supportingcharts/cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1alpha1 2 | kind: Kafka 3 | metadata: 4 | name: sample-cluster 5 | spec: 6 | kafka: 7 | replicas: 3 8 | listeners: 9 | plain: {} 10 | tls: {} 11 | readinessProbe: 12 | initialDelaySeconds: 15 13 | timeoutSeconds: 5 14 | livenessProbe: 15 | initialDelaySeconds: 15 16 | timeoutSeconds: 5 17 | config: 18 | offsets.topic.replication.factor: 1 19 | transaction.state.log.replication.factor: 1 20 | transaction.state.log.min.isr: 1 21 | storage: 22 | type: ephemeral 23 | metrics: {} 24 | zookeeper: 25 | replicas: 1 26 | readinessProbe: 27 | initialDelaySeconds: 15 28 | timeoutSeconds: 5 29 | livenessProbe: 30 | initialDelaySeconds: 15 31 | timeoutSeconds: 5 32 | storage: 33 | type: ephemeral 34 | metrics: {} 35 | entityOperator: 36 | topicOperator: {} 37 | userOperator: {} -------------------------------------------------------------------------------- /supportingcharts/extendedcassandrachart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /supportingcharts/extendedcassandrachart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: Cassandra Helm chart for Kubernetes 3 | name: cassandra 4 | version: 0.9.2 -------------------------------------------------------------------------------- /supportingcharts/extendedcassandrachart/templates/NOTES.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/supportingcharts/extendedcassandrachart/templates/NOTES.txt -------------------------------------------------------------------------------- /supportingcharts/extendedcassandrachart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "kubernetes.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "kubernetes.fullname" -}} 14 | {{- $name := default .Chart.Name .Values.nameOverride -}} 15 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 16 | {{- end -}} 17 | -------------------------------------------------------------------------------- /supportingcharts/extendedcassandrachart/templates/cassandraExporterConfigmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.cassandraAlertmanager.enableAlerts }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ template "kubernetes.name" . }}-alerts 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: {{ template "kubernetes.name" . }}-alerts 9 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 10 | heritage: {{ .Release.Service }} 11 | prometheus: {{ .Release.Namespace }} 12 | release: {{ .Release.Name }} 13 | role: alert-rules 14 | data: 15 | elasticsearch.rules: |- 16 | {{- include "cassandra.rules.yaml.tpl" . | indent 4}} 17 | {{ end }} -------------------------------------------------------------------------------- /supportingcharts/extendedcassandrachart/templates/cassandraExporterServicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.cassandraPrometheusScrap.enableScrap }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: cassandra-{{ .Release.Namespace }} 6 | namespace: observability 7 | labels: 8 | app: cassandra 9 | prometheus: observability 10 | spec: 11 | selector: 12 | matchLabels: 13 | app: cassandra 14 | namespaceSelector: 15 | matchNames: 16 | - {{ .Release.Namespace }} 17 | endpoints: 18 | - port: metrics 19 | interval: 60s 20 | scrapeTimeout: 59s 21 | {{- end }} -------------------------------------------------------------------------------- /supportingcharts/extendedcassandrachart/templates/cassandraReaperSecret.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.cassandraReaper.enableReaper }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: cassandra-reaper 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: cassandra-reaper 9 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 10 | release: {{ .Release.Name }} 11 | heritage: {{ .Release.Service }} 12 | type: Opaque 13 | data: 14 | REAPER_JMX_AUTH_PASSWORD: "{{ .Values.cassandraReaper.jmxAuth.password | b64enc }}" 15 | {{ end }} -------------------------------------------------------------------------------- /supportingcharts/extendedcassandrachart/templates/cassandraReaperService.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.cassandraReaper.enableReaper }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: cassandra-reaper 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: cassandra-reaper 9 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 10 | release: {{ .Release.Name }} 11 | heritage: {{ .Release.Service }} 12 | spec: 13 | ports: 14 | - name: app 15 | port: 80 16 | targetPort: {{ .Values.cassandraReaper.envVariables.REAPER_SERVER_APP_PORT }} 17 | - name: admin 18 | port: {{ .Values.cassandraReaper.envVariables.REAPER_SERVER_ADMIN_PORT }} 19 | targetPort: {{ .Values.cassandraReaper.envVariables.REAPER_SERVER_ADMIN_PORT }} 20 | - name: jmx 21 | port: 7199 22 | targetPort: 7199 23 | selector: 24 | app: cassandra-reaper 25 | {{- end }} -------------------------------------------------------------------------------- /supportingcharts/extendedcassandrachart/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: {{ template "kubernetes.name" . }} 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | app: {{ template "kubernetes.name" . }} 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 9 | release: {{ .Release.Name }} 10 | heritage: {{ .Release.Service }} 11 | spec: 12 | maxUnavailable: {{ .Values.cassandraMaxUnavailableNodes }} 13 | selector: 14 | matchLabels: 15 | app: {{ template "kubernetes.name" . }} -------------------------------------------------------------------------------- /supportingcharts/extendedcassandrachart/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.cassandraBackup.enableBackups }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "kubernetes.name" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: {{ template "kubernetes.name" . }} 9 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 10 | release: {{ .Release.Name }} 11 | heritage: {{ .Release.Service }} 12 | type: Opaque 13 | data: 14 | AWS_ACCESS_KEY_ID: "{{ .Values.cassandraBackup.awsAccessKeyId | b64enc }}" 15 | AWS_SECRET_ACCESS_KEY: "{{ .Values.cassandraBackup.awsSecretAccessKey | b64enc }}" 16 | AWS_PASSPHRASE: "{{ .Values.cassandraBackup.awsPassphrase | b64enc }}" 17 | {{ end }} -------------------------------------------------------------------------------- /supportingcharts/extendedcassandrachart/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "kubernetes.name" . }} 5 | namespace: {{ .Release.Namespace }} 6 | annotations: 7 | # https://github.com/kubernetes/examples/issues/89 8 | service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" 9 | labels: 10 | app: {{ template "kubernetes.name" . }} 11 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 12 | release: {{ .Release.Name }} 13 | heritage: {{ .Release.Service }} 14 | spec: 15 | clusterIP: None 16 | ports: 17 | - port: 9042 18 | name: cassandra 19 | - name: jmx 20 | port: 7199 21 | {{- if .Values.cassandraExporter.enableExporter }} 22 | - name: metrics 23 | port: 8080 24 | targetPort: {{ .Values.cassandraExporter.config.listenPort }} 25 | protocol: TCP 26 | - name: jmx-exporter 27 | port: 5555 28 | targetPort: 5555 29 | {{- end }} 30 | selector: 31 | app: {{ template "kubernetes.name" . }} -------------------------------------------------------------------------------- /supportingcharts/grafanachart/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: grafana 2 | version: 1.17.0 3 | appVersion: 5.3.0 4 | kubeVersion: "^1.8.0-0" 5 | description: The leading tool for querying and visualizing time series and metrics. 6 | home: https://grafana.net 7 | icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png 8 | sources: 9 | - https://github.com/grafana/grafana 10 | maintainers: 11 | - name: zanhsieh 12 | email: zanhsieh@gmail.com 13 | - name: rtluckie 14 | email: rluckie@cisco.com 15 | engine: gotpl 16 | -------------------------------------------------------------------------------- /supportingcharts/grafanachart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "grafana.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | {{- define "grafana.fullname" -}} 9 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 10 | {{- end -}} 11 | 12 | {{/* 13 | Create a default fully qualified app name. 14 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 15 | If release name contains chart name it will be used as a full name. 16 | */}} 17 | {{- define "grafana.oldfullname" -}} 18 | {{- if .Values.fullnameOverride -}} 19 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 20 | {{- else -}} 21 | {{- $name := default .Chart.Name .Values.nameOverride -}} 22 | {{- if contains $name .Release.Name -}} 23 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 24 | {{- else -}} 25 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 26 | {{- end -}} 27 | {{- end -}} 28 | {{- end -}} 29 | 30 | {{/* 31 | Create chart name and version as used by the chart label. 32 | */}} 33 | {{- define "grafana.chart" -}} 34 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 35 | {{- end -}} 36 | 37 | {{/* 38 | Create the name of the service account 39 | */}} 40 | {{- define "grafana.serviceAccountName" -}} 41 | {{- if .Values.serviceAccount.create -}} 42 | {{ default (include "grafana.fullname" .) .Values.serviceAccount.name }} 43 | {{- else -}} 44 | {{ default "default" .Values.serviceAccount.name }} 45 | {{- end -}} 46 | {{- end -}} 47 | -------------------------------------------------------------------------------- /supportingcharts/grafanachart/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | labels: 6 | app: {{ template "grafana.name" . }} 7 | chart: {{ template "grafana.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | {{- with .Values.annotations }} 11 | annotations: 12 | {{ toYaml . | indent 4 }} 13 | {{- end }} 14 | name: {{ template "grafana.fullname" . }}-clusterrole 15 | {{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }} 16 | rules: 17 | - apiGroups: [""] # "" indicates the core API group 18 | resources: ["configmaps"] 19 | verbs: ["get", "watch", "list"] 20 | {{- else }} 21 | rules: [] 22 | {{- end}} 23 | {{- end}} 24 | -------------------------------------------------------------------------------- /supportingcharts/grafanachart/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-clusterrolebinding 6 | labels: 7 | app: {{ template "grafana.name" . }} 8 | chart: {{ template "grafana.chart" . }} 9 | release: {{ .Release.Name }} 10 | heritage: {{ .Release.Service }} 11 | {{- with .Values.annotations }} 12 | annotations: 13 | {{ toYaml . | indent 4 }} 14 | {{- end }} 15 | subjects: 16 | - kind: ServiceAccount 17 | name: {{ template "grafana.serviceAccountName" . }} 18 | namespace: {{ .Release.Namespace }} 19 | roleRef: 20 | kind: ClusterRole 21 | name: {{ template "grafana.fullname" . }}-clusterrole 22 | apiGroup: rbac.authorization.k8s.io 23 | {{- end}} 24 | -------------------------------------------------------------------------------- /supportingcharts/grafanachart/templates/configmap-dashboard-provider.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.sidecar.dashboards.enabled }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | labels: 6 | app: {{ template "grafana.name" . }} 7 | chart: {{ template "grafana.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | {{- with .Values.annotations }} 11 | annotations: 12 | {{ toYaml . | indent 4 }} 13 | {{- end }} 14 | name: {{ template "grafana.fullname" . }}-config-dashboards 15 | data: 16 | provider.yaml: |- 17 | apiVersion: 1 18 | providers: 19 | - name: 'default' 20 | orgId: 1 21 | folder: '' 22 | type: file 23 | disableDeletion: false 24 | options: 25 | path: {{ .Values.sidecar.dashboards.folder }} 26 | {{- end}} 27 | -------------------------------------------------------------------------------- /supportingcharts/grafanachart/templates/dashboards-json-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.dashboards }} 2 | {{- range $provider, $dashboards := .Values.dashboards }} 3 | --- 4 | apiVersion: v1 5 | kind: ConfigMap 6 | metadata: 7 | name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }} 8 | labels: 9 | app: {{ template "grafana.name" $ }} 10 | chart: {{ template "grafana.chart" $ }} 11 | release: {{ $.Release.Name }} 12 | heritage: {{ $.Release.Service }} 13 | dashboard-provider: {{ $provider }} 14 | data: 15 | {{- range $key, $value := $dashboards }} 16 | {{- if hasKey $value "json" }} 17 | {{ $key }}.json: | 18 | {{ $value.json | indent 4 }} 19 | {{- end }} 20 | {{- end }} 21 | {{- end }} 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /supportingcharts/grafanachart/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "grafana.fullname" . -}} 3 | {{- $servicePort := .Values.service.port -}} 4 | {{- $ingressPath := .Values.ingress.path -}} 5 | apiVersion: extensions/v1beta1 6 | kind: Ingress 7 | metadata: 8 | name: {{ $fullName }} 9 | labels: 10 | app: {{ template "grafana.name" . }} 11 | chart: {{ template "grafana.chart" . }} 12 | release: {{ .Release.Name }} 13 | heritage: {{ .Release.Service }} 14 | {{- if .Values.ingress.labels }} 15 | {{ toYaml .Values.ingress.labels | indent 4 }} 16 | {{- end }} 17 | {{- with .Values.ingress.annotations }} 18 | annotations: 19 | {{ toYaml . | indent 4 }} 20 | {{- end }} 21 | spec: 22 | {{- if .Values.ingress.tls }} 23 | tls: 24 | {{- range .Values.ingress.tls }} 25 | - hosts: 26 | {{- range .hosts }} 27 | - {{ . | quote }} 28 | {{- end }} 29 | secretName: {{ .secretName }} 30 | {{- end }} 31 | {{- end }} 32 | rules: 33 | {{- range .Values.ingress.hosts }} 34 | - host: {{ . }} 35 | http: 36 | paths: 37 | - path: {{ $ingressPath }} 38 | backend: 39 | serviceName: {{ $fullName }} 40 | servicePort: {{ $servicePort }} 41 | {{- end }} 42 | {{- end }} 43 | -------------------------------------------------------------------------------- /supportingcharts/grafanachart/templates/podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.pspEnabled }} 2 | apiVersion: extensions/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | labels: 7 | app: {{ template "grafana.name" . }} 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 9 | heritage: {{ .Release.Service }} 10 | release: {{ .Release.Name }} 11 | annotations: 12 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default' 13 | apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' 14 | seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' 15 | apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' 16 | spec: 17 | privileged: false 18 | allowPrivilegeEscalation: false 19 | requiredDropCapabilities: 20 | - ALL 21 | volumes: 22 | - 'configMap' 23 | - 'emptyDir' 24 | - 'projected' 25 | - 'secret' 26 | - 'downwardAPI' 27 | - 'persistentVolumeClaim' 28 | hostNetwork: false 29 | hostIPC: false 30 | hostPID: false 31 | runAsUser: 32 | rule: 'RunAsAny' 33 | seLinux: 34 | rule: 'RunAsAny' 35 | supplementalGroups: 36 | rule: 'RunAsAny' 37 | fsGroup: 38 | rule: 'RunAsAny' 39 | readOnlyRootFilesystem: false 40 | {{- end }} 41 | -------------------------------------------------------------------------------- /supportingcharts/grafanachart/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | labels: 7 | app: {{ template "grafana.name" . }} 8 | chart: {{ template "grafana.chart" . }} 9 | release: {{ .Release.Name }} 10 | heritage: {{ .Release.Service }} 11 | {{- with .Values.persistence.annotations }} 12 | annotations: 13 | {{ toYaml . | indent 4 }} 14 | {{- end }} 15 | spec: 16 | accessModes: 17 | {{- range .Values.persistence.accessModes }} 18 | - {{ . | quote }} 19 | {{- end }} 20 | resources: 21 | requests: 22 | storage: {{ .Values.persistence.size | quote }} 23 | storageClassName: {{ .Values.persistence.storageClassName }} 24 | {{- end -}} 25 | -------------------------------------------------------------------------------- /supportingcharts/grafanachart/templates/role.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: Role 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | labels: 7 | app: {{ template "grafana.name" . }} 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 9 | heritage: {{ .Release.Service }} 10 | release: {{ .Release.Name }} 11 | {{- if .Values.rbac.pspEnabled }} 12 | rules: 13 | - apiGroups: ['extensions'] 14 | resources: ['podsecuritypolicies'] 15 | verbs: ['use'] 16 | resourceNames: [{{ template "grafana.fullname" . }}] 17 | {{- end }} 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /supportingcharts/grafanachart/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: RoleBinding 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | labels: 7 | app: {{ template "grafana.name" . }} 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 9 | heritage: {{ .Release.Service }} 10 | release: {{ .Release.Name }} 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: Role 14 | name: {{ template "grafana.fullname" . }} 15 | subjects: 16 | - kind: ServiceAccount 17 | name: {{ template "grafana.serviceAccountName" . }} 18 | {{- end -}} 19 | -------------------------------------------------------------------------------- /supportingcharts/grafanachart/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: {{ template "grafana.fullname" . }} 5 | labels: 6 | app: {{ template "grafana.name" . }} 7 | chart: {{ template "grafana.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | type: Opaque 11 | data: 12 | admin-user: {{ .Values.adminUser | b64enc | quote }} 13 | {{- if .Values.adminPassword }} 14 | admin-password: {{ .Values.adminPassword | b64enc | quote }} 15 | {{- else }} 16 | admin-password: {{ randAlphaNum 40 | b64enc | quote }} 17 | {{- end }} 18 | {{- if not .Values.ldap.existingSecret }} 19 | ldap-toml: {{ .Values.ldap.config | b64enc | quote }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /supportingcharts/grafanachart/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app: {{ template "grafana.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | heritage: {{ .Release.Service }} 9 | release: {{ .Release.Name }} 10 | name: {{ template "grafana.serviceAccountName" . }} 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /supportingcharts/influxdbchart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /supportingcharts/influxdbchart/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: influxdb 2 | version: 0.1.0 3 | description: InfluxDB. 4 | home: https://www.influxdata.com/time-series-platform/influxdb/ 5 | sources: 6 | - https://github.com/influxdata/influxdb -------------------------------------------------------------------------------- /supportingcharts/influxdbchart/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | InfluxDB can be accessed via port {{ .Values.config.http.bind_address }} on the following DNS name from within your cluster: 2 | 3 | - http://{{ template "name" . }}.{{ .Release.Namespace }}:{{ .Values.config.http.bind_address }} 4 | 5 | You can easily connect to the remote instance with your local influx cli. To forward the API port to localhost:8086 run the following: 6 | 7 | - kubectl port-forward --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "name" . }} -o jsonpath='{ .items[0].metadata.name }') 8086:{{ .Values.config.http.bind_address }} 8 | 9 | You can also connect to the influx cli from inside the container. To open a shell session in the InfluxDB pod run the following: 10 | 11 | - kubectl exec -i -t --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "name" . }} -o jsonpath='{.items[0].metadata.name}') /bin/sh 12 | 13 | To tail the logs for the InfluxDB pod run the following: 14 | 15 | - kubectl logs -f --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "name" . }} -o jsonpath='{ .items[0].metadata.name }') 16 | -------------------------------------------------------------------------------- /supportingcharts/influxdbchart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "fullname" -}} 14 | {{- $name := default .Chart.Name .Values.nameOverride -}} 15 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 16 | {{- end -}} -------------------------------------------------------------------------------- /supportingcharts/influxdbchart/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "name" . }} 5 | labels: 6 | app: {{ template "name" . }} 7 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 8 | release: "{{ .Release.Name }}" 9 | heritage: "{{ .Release.Service }}" 10 | spec: 11 | ports: 12 | - name: api 13 | port: {{ .Values.config.http.bind_address }} 14 | targetPort: {{ .Values.config.http.bind_address }} 15 | selector: 16 | app: {{ template "name" . }} -------------------------------------------------------------------------------- /supportingcharts/nfschart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /supportingcharts/nfschart/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: nfs 2 | version: 0.1.0 3 | appVersion: 0.1.0 4 | description: A Helm chart for NFS server 5 | sources: 6 | - https://github.com/kubernetes/examples/tree/master/staging/volumes/nfs 7 | keywords: 8 | - nfs 9 | maintainers: 10 | - name: yuchaoran2011 11 | email: yuchaoran2011@gmail.com 12 | -------------------------------------------------------------------------------- /supportingcharts/nfschart/nfs-pv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lightbend/fdp-sample-applications/e605fa95f68b03ace521bf56d91c4e21ae55da37/supportingcharts/nfschart/nfs-pv.png -------------------------------------------------------------------------------- /supportingcharts/nfschart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "fullname" -}} 14 | {{- $name := default .Chart.Name .Values.nameOverride -}} 15 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 16 | {{- end -}} -------------------------------------------------------------------------------- /supportingcharts/nfschart/templates/nfs-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: {{ .Values.pvName }} 5 | namespace: {{ .Values.namespace }} 6 | spec: 7 | capacity: 8 | storage: {{ .Values.pvStorage }} 9 | accessModes: 10 | - ReadWriteMany 11 | nfs: 12 | server: {{ template "name" . }}.{{ .Values.namespace }}.svc.cluster.local 13 | path: "/" 14 | -------------------------------------------------------------------------------- /supportingcharts/nfschart/templates/nfs-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: {{ .Values.pvcName }} 5 | namespace: {{ .Values.namespace }} 6 | spec: 7 | accessModes: 8 | - ReadWriteMany 9 | storageClassName: "" 10 | resources: 11 | requests: 12 | storage: {{ .Values.pvcStorage }} 13 | -------------------------------------------------------------------------------- /supportingcharts/nfschart/templates/nfs-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: nfs-service-account 5 | namespace: {{ .Values.namespace }} 6 | -------------------------------------------------------------------------------- /supportingcharts/nfschart/templates/nfs-server-local-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: nfs-pv-local 5 | namespace: {{ .Values.namespace }} 6 | spec: 7 | accessModes: [ "ReadWriteOnce" ] 8 | resources: 9 | requests: 10 | storage: {{ .Values.localStorage }} 11 | -------------------------------------------------------------------------------- /supportingcharts/nfschart/templates/nfs-server-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: {{ template "name" . }} 5 | namespace: {{ .Values.namespace }} 6 | labels: 7 | app: {{ template "name" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | release: "{{ .Release.Name }}" 10 | spec: 11 | replicas: 1 12 | selector: 13 | app: {{ template "name" . }} 14 | template: 15 | metadata: 16 | labels: 17 | app: {{ template "name" . }} 18 | spec: 19 | # serviceAccountName: nfs-service-account 20 | containers: 21 | - name: {{ .Values.nfsImage }} 22 | image: k8s.gcr.io/volume-nfs:0.8 23 | ports: 24 | - name: nfs 25 | containerPort: 2049 26 | - name: mountd 27 | containerPort: 20048 28 | - name: rpcbind 29 | containerPort: 111 30 | securityContext: 31 | privileged: true 32 | volumeMounts: 33 | - mountPath: /exports 34 | name: nfs-local 35 | volumes: 36 | - name: nfs-local 37 | volumeClaimTemplates: 38 | metadata: 39 | name: nfs-local 40 | labels: 41 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 42 | release: {{ .Release.Name }} 43 | heritage: {{ .Release.Service }} 44 | spec: 45 | accessModes: [ "ReadWriteOnce" ] 46 | resources: 47 | requests: 48 | storage: {{ .Values.localStorage | quote }} 49 | 50 | -------------------------------------------------------------------------------- /supportingcharts/nfschart/templates/nfs-server-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: {{ template "name" . }} 5 | namespace: {{ .Values.namespace }} 6 | labels: 7 | app: {{ template "name" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | release: "{{ .Release.Name }}" 10 | spec: 11 | ports: 12 | - name: nfs 13 | port: 2049 14 | - name: mountd 15 | port: 20048 16 | - name: rpcbind 17 | port: 111 18 | selector: 19 | app: {{ template "name" . }} 20 | -------------------------------------------------------------------------------- /supportingcharts/nfschart/values.yaml: -------------------------------------------------------------------------------- 1 | namespace: sample 2 | 3 | localStorage: 6Gi 4 | pvcStorage: 5Gi 5 | pvStorage: 5Gi 6 | 7 | pvName: nfs-persistent-volume 8 | pvcName: nfs-persistent-volume-claim 9 | 10 | nfsImage: nfs-server 11 | -------------------------------------------------------------------------------- /supportingcharts/scc.yaml: -------------------------------------------------------------------------------- 1 | kind: SecurityContextConstraints 2 | apiVersion: security.openshift.io/v1 3 | metadata: 4 | name: support-scc 5 | allowPrivilegedContainer: true 6 | runAsUser: 7 | type: RunAsAny 8 | seLinuxContext: 9 | type: RunAsAny 10 | fsGroup: 11 | type: RunAsAny 12 | supplementalGroups: 13 | type: RunAsAny 14 | volumes: 15 | - configMap 16 | - downwardAPI 17 | - emptyDir 18 | - persistentVolumeClaim 19 | - projected 20 | - secret 21 | users: 22 | - system:serviceaccount:killrweather:nfs-service-account 23 | - system:serviceaccount:killrweather:cassandra-service-account 24 | - system:serviceaccount:killrweather:grafana-service-account 25 | -------------------------------------------------------------------------------- /supportingcharts/zeppelinchart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /supportingcharts/zeppelinchart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: Web-based notebook that enables data-driven, interactive data analytics and collaborative documents with SQL, Scala and more. 3 | name: zeppelin 4 | version: 1.0.1 5 | appVersion: 0.7.2 6 | home: https://zeppelin.apache.org/ 7 | sources: 8 | - https://github.com/apache/zeppelin 9 | icon: https://zeppelin.apache.org/assets/themes/zeppelin/img/zeppelin_classic_logo.png 10 | maintainers: 11 | - name: danisla 12 | email: disla@google.com 13 | -------------------------------------------------------------------------------- /supportingcharts/zeppelinchart/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Create a port-forward to the zeppelin pod: 2 | kubectl port-forward -n {{ .Release.Namespace }} $(kubectl get pod -n {{ .Release.Namespace }} --selector=app={{ template "zeppelin.name" . }} -o jsonpath='{.items...metadata.name}') 8080:8080 3 | 4 | Open browser to UI: 5 | 6 | open http://localhost:8080 -------------------------------------------------------------------------------- /supportingcharts/zeppelinchart/templates/_helpers.yaml: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "zeppelin.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 24 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 24 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "zeppelin.fullname" -}} 14 | {{- $name := default .Chart.Name .Values.nameOverride -}} 15 | {{- printf "%s-%s" .Release.Name $name | trunc 24 | trimSuffix "-" -}} 16 | {{- end -}} -------------------------------------------------------------------------------- /supportingcharts/zeppelinchart/templates/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Release.Name }}-zeppelin 5 | labels: 6 | app: {{ template "zeppelin.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | type: ClusterIP 12 | ports: 13 | - port: 8080 14 | name: web 15 | selector: 16 | app: {{ template "zeppelin.name" . }} 17 | release: {{ .Release.Name }} -------------------------------------------------------------------------------- /supportingcharts/zeppelinchart/values.yaml: -------------------------------------------------------------------------------- 1 | zeppelin: 2 | image: apache/zeppelin:0.8.0 3 | resources: 4 | limits: 5 | memory: "4096Mi" 6 | cpu: "2000m" 7 | 8 | hadoop: 9 | useConfigMap: false 10 | configMapName: hadoop-hadoop 11 | configPath: /usr/hadoop-2.7.3/etc/hadoop 12 | 13 | spark: 14 | driverMemory: 1g 15 | executorMemory: 1g 16 | numExecutors: 2 17 | -------------------------------------------------------------------------------- /version.sh: -------------------------------------------------------------------------------- 1 | : ${VERSION:=2.1.1-OpenShift} 2 | export VERSION 3 | --------------------------------------------------------------------------------