├── project ├── build.properties ├── project │ └── typesafe.sbt └── plugins.sbt ├── public ├── images │ └── favicon.png └── javascripts │ └── md5.min.js ├── README.md ├── tutorial ├── frontend-region.jssequence ├── client-server.jssequence └── index.html.script ├── .gitignore ├── test └── assets │ ├── SetupMocha.js │ └── javascripts │ └── map │ ├── MapSpec.coffee │ └── MarkerSpec.coffee ├── conf ├── routes └── application.conf ├── app ├── assets │ ├── javascripts │ │ ├── services │ │ │ ├── storage.coffee │ │ │ ├── mockGps.coffee │ │ │ └── gps.coffee │ │ ├── main.coffee │ │ ├── map │ │ │ ├── markerRenderer.coffee │ │ │ ├── marker.coffee │ │ │ └── map.coffee │ │ └── models │ │ │ └── mainPage.coffee │ └── stylesheets │ │ └── main.less ├── controllers │ └── Application.scala ├── actors │ ├── RegionManagerClient.scala │ ├── Actors.scala │ ├── GeoJsonBot.scala │ ├── PositionSubscriber.scala │ └── ClientConnection.scala ├── views │ ├── main.scala.html │ └── index.scala.html ├── models │ └── backend │ │ └── Backend.scala └── backend │ ├── Main.scala │ ├── Region.scala │ ├── BotManager.scala │ ├── SummaryRegion.scala │ ├── Settings.scala │ ├── RegionManager.scala │ └── GeoFunctions.scala ├── LICENSE ├── .travis.yml └── activator.properties /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=0.13.9 2 | -------------------------------------------------------------------------------- /public/images/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/typesafehub/ReactiveMaps/HEAD/public/images/favicon.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Example archived 2 | 3 | This example project is outdated and no longer useful. 4 | 5 | Find example projects for Lightbend technologies at 6 | [Lightbend Tech Hub](https://developer.lightbend.com/start/) 7 | -------------------------------------------------------------------------------- /tutorial/frontend-region.jssequence: -------------------------------------------------------------------------------- 1 | participant Frontend 2 | participant PubSub 3 | participant Region 4 | 5 | Frontend->Region: UpdateUserPosition 6 | Frontend->PubSub: Subscribe 7 | Frontend->PubSub: Unsubscribe 8 | Region->PubSub: RegionPoints 9 | PubSub->Frontend: RegionPoints -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | logs 2 | project/typesafe.properties 3 | project/activator-tutorial-generator.sbt 4 | project/target 5 | target 6 | tmp 7 | .history 8 | dist 9 | /.idea 10 | /*.iml 11 | /out 12 | /.idea_modules 13 | /.classpath 14 | /.project 15 | /RUNNING_PID 16 | /.settings 17 | .target 18 | .cache 19 | bin 20 | .DS_Store 21 | activator-sbt-*-shim.sbt -------------------------------------------------------------------------------- /test/assets/SetupMocha.js: -------------------------------------------------------------------------------- 1 | // Setup requirejs to have the right baseUrl 2 | global.requirejs = require("requirejs"); 3 | 4 | requirejs.config({ 5 | nodeRequire: require, 6 | baseUrl: __dirname 7 | }); 8 | 9 | // A few modules that all tests will use 10 | global.Squire = requirejs("lib/squirejs/Squire"); 11 | global.assert = require("assert"); 12 | -------------------------------------------------------------------------------- /tutorial/client-server.jssequence: -------------------------------------------------------------------------------- 1 | Client->Server: UserMoved 2 | Note right of Server: Sent every 2-10 seconds,\nwhen the user moves\ntheir physical position. 3 | Client->Server: ViewingArea 4 | Note right of Server: Sent when the user zooms\nor changes map position. 5 | Server->Client: UserPositions 6 | Note right of Server: Sent when the server has\nupdated positions for\nusers in the currently\nviewed area. -------------------------------------------------------------------------------- /conf/routes: -------------------------------------------------------------------------------- 1 | # Routes 2 | # This file defines all application routes (Higher priority routes first) 3 | # ~~~~ 4 | 5 | # Home page 6 | GET / controllers.Application.index 7 | 8 | # The websocket 9 | GET /stream/:email controllers.Application.stream(email) 10 | 11 | # Static assets 12 | GET /assets/*file controllers.Assets.versioned(path="/public", file: Asset) 13 | -------------------------------------------------------------------------------- /app/assets/javascripts/services/storage.coffee: -------------------------------------------------------------------------------- 1 | # 2 | # Reactive maps client side storage 3 | # 4 | define () -> 5 | return { 6 | 7 | # Get the last viewed area 8 | lastArea: -> 9 | if (localStorage.lastArea) 10 | try 11 | lastArea = JSON.parse localStorage.lastArea 12 | return lastArea 13 | catch e 14 | localStorage.removeItem("lastArea") 15 | 16 | # Set the last viewed area 17 | setLastArea: (lastArea) -> 18 | localStorage.lastArea = JSON.stringify lastArea 19 | 20 | } -------------------------------------------------------------------------------- /project/project/typesafe.sbt: -------------------------------------------------------------------------------- 1 | // Update this when a new patch of Reactive Platform is available 2 | val rpVersion = "15v09p04" 3 | 4 | // Update this when a major version of Reactive Platform is available 5 | val rpUrl = "https://repo.typesafe.com/typesafe/for-subscribers-only/AEE4D829FC38A3247F251ED25BA45ADD675D48EB" 6 | 7 | addSbtPlugin("com.typesafe.rp" % "sbt-typesafe-rp" % rpVersion) 8 | 9 | // The resolver name must start with typesafe-rp 10 | resolvers += "typesafe-rp-mvn" at rpUrl 11 | 12 | // The resolver name must start with typesafe-rp 13 | resolvers += Resolver.url("typesafe-rp-ivy", url(rpUrl))(Resolver.ivyStylePatterns) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This software is licensed under the Apache 2 license, quoted below. 2 | 3 | Copyright 2009-2013 Typesafe Inc. [http://www.typesafe.com] 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); you may not 6 | use this file except in compliance with the License. You may obtain a copy of 7 | the License at 8 | 9 | [http://www.apache.org/licenses/LICENSE-2.0] 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | License for the specific language governing permissions and limitations under 15 | the License. 16 | -------------------------------------------------------------------------------- /app/controllers/Application.scala: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import javax.inject.Inject 4 | 5 | import akka.actor.Props 6 | import play.api.mvc._ 7 | import actors.ClientConnection 8 | import play.api.Play.current 9 | import actors.ClientConnection.ClientEvent 10 | 11 | class Application @Inject() ( 12 | clientConnectionFactory: ClientConnection.Factory 13 | ) extends Controller { 14 | 15 | /** 16 | * The index page. 17 | */ 18 | def index = Action { implicit req => 19 | Ok(views.html.index()) 20 | } 21 | 22 | /** 23 | * The WebSocket 24 | */ 25 | def stream(email: String) = WebSocket.acceptWithActor[ClientEvent, ClientEvent] { _ => upstream => 26 | Props(clientConnectionFactory(email, upstream)) 27 | } 28 | } -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: trusty 2 | sudo: required 3 | 4 | language: scala 5 | 6 | jdk: oraclejdk8 7 | 8 | script: 9 | - sbt test activatorRunTutorial activatorGenerateTutorial 10 | - git diff --quiet tutorial/index.html || (echo "index.html has been updated directly, do not do this, edit tutorial/index.html, then use https://github.com/typesafehub/activator-tutorial-generator to generate the new index.html" && false) 11 | 12 | before_script: 13 | - printf "resolvers += \"Typesafe repository\" at \"http://repo.typesafe.com/typesafe/releases/\"\n\naddSbtPlugin(\"com.typesafe.sbt\" %%%% \"sbt-activator-tutorial-generator\" %% \"1.0.3\")" > project/activator-tutorial-generator.sbt 14 | - echo "typesafe.subscription=reactive-maps-ci" > project/typesafe.properties 15 | -------------------------------------------------------------------------------- /app/assets/javascripts/main.coffee: -------------------------------------------------------------------------------- 1 | # 2 | # The main entry point into the client side. Creates a new main page model and binds it to the page. 3 | # 4 | require.config { 5 | paths: { 6 | mainPage: "./models/mainPage" 7 | map: "./map/map" 8 | marker: "./map/marker" 9 | markerRenderer: "./map/markerRenderer" 10 | gps: "./services/gps" 11 | mockGps: "./services/mockGps" 12 | storage: "./services/storage" 13 | md5: "./md5.min" 14 | bootstrap: "../lib/bootstrap/js/bootstrap" 15 | jquery: "../lib/jquery/jquery" 16 | knockout: "../lib/knockout/knockout" 17 | leaflet: "../lib/leaflet/leaflet" 18 | } 19 | shim: { 20 | bootstrap: { 21 | deps: ["jquery"], 22 | exports: "$" 23 | } 24 | jquery: { 25 | exports: "$" 26 | } 27 | knockout: { 28 | exports: "ko" 29 | } 30 | } 31 | } 32 | 33 | require ["knockout", "mainPage", "bootstrap"], (ko, MainPageModel) -> 34 | 35 | model = new MainPageModel 36 | ko.applyBindings(model) 37 | 38 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | import sbt.Defaults.sbtPluginExtra 2 | 3 | // Comment to get more information during initialization 4 | logLevel := Level.Warn 5 | 6 | // The Typesafe repository 7 | resolvers += "Typesafe repository" at "http://repo.typesafe.com/typesafe/releases/" 8 | 9 | // Use the Play sbt plugin for Play projects 10 | libraryDependencies += sbtPluginExtra( 11 | TypesafeLibrary.playSbtPlugin.value, 12 | (sbtBinaryVersion in update).value, 13 | (scalaBinaryVersion in update).value 14 | ) 15 | 16 | addSbtPlugin("com.typesafe.sbt" % "sbt-less" % "1.0.6") 17 | addSbtPlugin("com.typesafe.sbt" % "sbt-coffeescript" % "1.0.0") 18 | addSbtPlugin("com.typesafe.sbt" % "sbt-rjs" % "1.0.7") 19 | addSbtPlugin("com.typesafe.sbt" % "sbt-digest" % "1.1.0") 20 | addSbtPlugin("com.typesafe.sbt" % "sbt-gzip" % "1.0.0") 21 | addSbtPlugin("com.typesafe.sbt" % "sbt-mocha" % "1.1.0") 22 | 23 | addSbtPlugin("com.typesafe.sbt" % "sbt-bintray-bundle" % "1.2.0") 24 | addSbtPlugin("com.lightbend.conductr" % "sbt-conductr" % "2.2.9") 25 | -------------------------------------------------------------------------------- /app/actors/RegionManagerClient.scala: -------------------------------------------------------------------------------- 1 | package actors 2 | 3 | import akka.actor.Actor 4 | import backend._ 5 | import akka.actor.Props 6 | import backend.RegionManager.UpdateUserPosition 7 | import akka.routing.FromConfig 8 | import models.backend.UserPosition 9 | 10 | object RegionManagerClient { 11 | def props(): Props = Props(new RegionManagerClient) 12 | } 13 | 14 | /** 15 | * A client for the region manager, handles routing of position updates to the 16 | * regionManager on the right backend node. 17 | */ 18 | class RegionManagerClient extends Actor { 19 | 20 | val regionManagerRouter = context.actorOf(Props.empty.withRouter(FromConfig), "router") 21 | 22 | val settings = Settings(context.system) 23 | 24 | def receive = { 25 | case p: UserPosition => 26 | // Calculate the regionId for the users position 27 | val regionId = settings.GeoFunctions.regionForPoint(p.position) 28 | // And send the update to the that region 29 | regionManagerRouter ! UpdateUserPosition(regionId, p) 30 | } 31 | } -------------------------------------------------------------------------------- /app/views/main.scala.html: -------------------------------------------------------------------------------- 1 | @(content: Html)(implicit req: RequestHeader) 2 | 3 | 4 | 5 | 6 | 7 | Reactive Maps 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | @helper.javascriptRouter("jsRoutes")( 19 | routes.javascript.Application.stream 20 | ) 21 | 22 | 23 | 24 | @content 25 | 26 | 27 | -------------------------------------------------------------------------------- /app/assets/stylesheets/main.less: -------------------------------------------------------------------------------- 1 | html, body { 2 | height: 100%; 3 | } 4 | 5 | .container { 6 | display: table; 7 | width: 100%; 8 | height: 100%; 9 | > div { 10 | display: table-row; 11 | } 12 | .maps { 13 | height: 100%; 14 | #map { 15 | height: 100%; 16 | } 17 | .mockGpsContainer { 18 | #mockGps { 19 | height: 320px; 20 | } 21 | } 22 | } 23 | } 24 | 25 | .cluster-marker-small { 26 | background-color: rgba(181, 226, 140, 0.6); 27 | div { 28 | background-color: rgba(110, 204, 57, 0.6); 29 | } 30 | } 31 | 32 | .cluster-marker-medium { 33 | background-color: rgba(241, 211, 87, 0.6); 34 | div { 35 | background-color: rgba(240, 194, 12, 0.6); 36 | } 37 | } 38 | .cluster-marker-large { 39 | background-color: rgba(253, 156, 115, 0.6); 40 | div { 41 | background-color: rgba(241, 128, 23, 0.6); 42 | } 43 | } 44 | 45 | .cluster-marker { 46 | background-clip: padding-box; 47 | border-radius: 20px; 48 | div { 49 | width: 30px; 50 | height: 30px; 51 | margin-left: 5px; 52 | margin-top: 5px; 53 | 54 | text-align: center; 55 | border-radius: 15px; 56 | font: 12px "Helvetica Neue", Arial, Helvetica, sans-serif; 57 | } 58 | span { 59 | line-height: 30px; 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /activator.properties: -------------------------------------------------------------------------------- 1 | name=reactive-maps 2 | title=Reactive Maps 3 | description=This template and tutorial shows how the Typesafe Reactive Platform can be used to implement scalable, resilient, responsive event driven apps. The application shows the location of every user currently connected on a map, updated in real time. Akka clustering with distributed pub sub allows it to scale horizontally, we tested with 50000 simulated concurrent users, each sending position updates every second, on 10 nodes on Google Compute Engine. The in context tutorial guides you through design features and code of the app, and then gently introduces you to some of the more detailed topics with instructions for adding new features. 4 | tags=reactive-platform,akka,playframework,scala,reactive 5 | authorName=Typesafe 6 | authorLink=http://typesafe.com/ 7 | authorTwitter=typesafe 8 | authorBio=Typesafe is dedicated to helping developers build reactive applications on the JVM. With the Typesafe Reactive Platform, including Play Framework, Akka, and Scala, developers can deliver highly responsive user experiences backed by a resilient and event-driven application stack that scales effortlessly on multicore and cloud computing architectures. 9 | authorLogo=http://typesafe.com/assets/images/activator/logo-typesafe-activator.png 10 | -------------------------------------------------------------------------------- /app/assets/javascripts/services/mockGps.coffee: -------------------------------------------------------------------------------- 1 | # 2 | # The mock GPS interface. This is provided by providing a second map where you can 3 | # position a marker to fake a GPS location. 4 | # 5 | # Used to manually specify your position if you are not using a GPS enabled device. 6 | # 7 | define ["leaflet"], (Leaflet) -> 8 | class MockGps 9 | constructor: (ws) -> 10 | @ws = ws 11 | 12 | @map = Leaflet.map("mockGps") 13 | new Leaflet.TileLayer("http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", 14 | minZoom: 1 15 | maxZoom: 16 16 | attribution: "Map data © OpenStreetMap contributors" 17 | ).addTo(@map) 18 | 19 | position 20 | if localStorage.lastGps 21 | try 22 | position = JSON.parse localStorage.lastGps 23 | catch e 24 | localStorage.removeItem("lastGps") 25 | position = [0, 0] 26 | else 27 | position = [0, 0] 28 | @map.setView(position, 4) 29 | 30 | @marker = new Leaflet.Marker(position, 31 | draggable: true 32 | ).addTo(@map) 33 | 34 | @marker.on "dragend", => 35 | @sendPosition() 36 | 37 | @sendPosition() 38 | 39 | sendPosition: -> 40 | position = @marker.getLatLng() 41 | localStorage.lastGps = JSON.stringify position 42 | @ws.send(JSON.stringify 43 | event: "user-moved" 44 | position: 45 | type: "Point" 46 | coordinates: [position.lng, position.lat] 47 | ) 48 | 49 | 50 | destroy: -> 51 | try 52 | @map.remove() 53 | catch e 54 | 55 | return MockGps -------------------------------------------------------------------------------- /app/actors/Actors.scala: -------------------------------------------------------------------------------- 1 | package actors 2 | 3 | import javax.inject._ 4 | 5 | import akka.actor.{ActorRef, ActorSystem} 6 | import com.google.inject.AbstractModule 7 | import play.api._ 8 | import play.api.libs.concurrent.AkkaGuiceSupport 9 | import backend._ 10 | import akka.cluster.Cluster 11 | import java.net.URL 12 | 13 | /** 14 | * Guice module that provides actors. 15 | * 16 | * Registered in application.conf. 17 | */ 18 | class Actors extends AbstractModule with AkkaGuiceSupport { 19 | 20 | def configure() = { 21 | // Bind the region manager client 22 | bindActor[RegionManagerClient]("regionManagerClient") 23 | // Bind the client connection factory 24 | bindActorFactory[ClientConnection, ClientConnection.Factory] 25 | // Bind the backend actors as an eager singleton 26 | bind(classOf[BackendActors]).asEagerSingleton() 27 | } 28 | } 29 | 30 | /** 31 | * Manages the creation of actors in the web front end. 32 | */ 33 | class BackendActors @Inject() (system: ActorSystem, configuration: Configuration, environment: Environment, 34 | @Named("regionManagerClient") regionManagerClient: ActorRef) { 35 | if (Cluster(system).selfRoles.exists(r => r.startsWith("backend"))) { 36 | system.actorOf(RegionManager.props(), "regionManager") 37 | } 38 | 39 | if (Settings(system).BotsEnabled) { 40 | def findUrls(id: Int): List[URL] = { 41 | val url = environment.resource("bots/" + id + ".json") 42 | url.map(url => url :: findUrls(id + 1)).getOrElse(Nil) 43 | } 44 | system.actorOf(BotManager.props(regionManagerClient, findUrls(1))) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /app/assets/javascripts/services/gps.coffee: -------------------------------------------------------------------------------- 1 | # 2 | # The GPS interface. Uses the HTML5 location API to watch the devices current position, 3 | # and sends updates to the server. 4 | # 5 | define () -> 6 | class Gps 7 | # @ws The WebSocket to send updates to 8 | constructor: (ws) -> 9 | @ws = ws 10 | 11 | # When we last sent our position 12 | @lastSent = 0 13 | 14 | # The last position that we saw 15 | @lastPosition = null 16 | 17 | # Schedule a task to send the last position every 10 seconds 18 | @intervalId = setInterval(=> 19 | @sendPosition(@lastPosition) if @lastPosition 20 | , 10000) 21 | 22 | # Watch our position using the HTML5 geo location APIs 23 | @watchId = navigator.geolocation.watchPosition((position) => 24 | @sendPosition(position) 25 | ) 26 | 27 | # Send the given position 28 | sendPosition: (position) -> 29 | @lastPosition = position 30 | time = new Date().getTime() 31 | 32 | # Only send our position if we haven't sent a position update for 2 seconds 33 | if time - @lastSent > 2000 34 | @lastSent = time 35 | 36 | # Send the position update through the WebSocket 37 | @ws.send(JSON.stringify 38 | event: "user-moved" 39 | position: 40 | type: "Point" 41 | coordinates: [position.coords.longitude, position.coords.latitude] 42 | ) 43 | 44 | # Stop sending our position and stop watching for position updates 45 | destroy: -> 46 | navigator.geolocation.clearWatch(@watchId) 47 | clearInterval(@intervalId) 48 | 49 | return Gps -------------------------------------------------------------------------------- /app/models/backend/Backend.scala: -------------------------------------------------------------------------------- 1 | package models.backend 2 | 3 | import scala.collection.immutable.IndexedSeq 4 | import play.extras.geojson.LatLng 5 | 6 | /** 7 | * A point of interest, either a user position or a cluster of positions 8 | */ 9 | sealed trait PointOfInterest { 10 | /** 11 | * The id of the point of interest 12 | */ 13 | def id: String 14 | /** 15 | * When the point of interest was created 16 | */ 17 | def timestamp: Long 18 | /** 19 | * The position of the point of interest 20 | */ 21 | def position: LatLng 22 | } 23 | 24 | /** 25 | * A user position 26 | */ 27 | case class UserPosition(id: String, timestamp: Long, position: LatLng) extends PointOfInterest 28 | 29 | /** 30 | * A cluster of user positions 31 | */ 32 | case class Cluster(id: String, timestamp: Long, position: LatLng, count: Long) extends PointOfInterest 33 | 34 | /** 35 | * @param southWest The south western most point 36 | * @param northEast The north eastern most point 37 | */ 38 | case class BoundingBox(southWest: LatLng, northEast: LatLng) { 39 | require(southWest.lat < northEast.lat, "South west bound point is north of north east point") 40 | } 41 | 42 | /** 43 | * The points of interest for a given regionId. 44 | */ 45 | case class RegionPoints(regionId: RegionId, points: IndexedSeq[PointOfInterest]) 46 | 47 | /** 48 | * A region id. 49 | * 50 | * The zoomLevel indicates how deep this region is zoomed, a zoom level of 8 means that there are 2 ^^ 8 steps on the 51 | * axis of this zoomLevel, meaning the zoomLevel contains a total of 2 ^^ 16 regions. 52 | * 53 | * The x value starts at 0 at -180 West, and goes to 2 ^^ zoomLevel at 180 East. The y value starts at 0 at -90 South, 54 | * and goes to 2 ^^ zoomLevel at 90 North. 55 | */ 56 | case class RegionId(zoomLevel: Int, x: Int, y: Int) { 57 | val name = s"region-$zoomLevel-$x-$y" 58 | } 59 | -------------------------------------------------------------------------------- /app/assets/javascripts/map/markerRenderer.coffee: -------------------------------------------------------------------------------- 1 | # 2 | # Handles actually rendering a marker, including DOM and CSS 3 | # 4 | define ["leaflet", "md5", "jquery"], (Leaflet, md5) -> 5 | # Escape the given unsafe user input 6 | escapeHtml = (unsafe) -> 7 | return unsafe.replace(/&/g, "&") 8 | .replace(//g, ">") 10 | .replace(/"/g, """) 11 | .replace(/'/g, "'") 12 | 13 | { 14 | # Render the popup for the given user 15 | renderPopup: (userId) -> 16 | "

" + escapeHtml(userId) + "

" 18 | 19 | # Create the cluster marker icon 20 | createClusterMarkerIcon: (count) -> 21 | # Style according to the number of users in the cluster 22 | className = if count < 10 23 | "cluster-marker-small" 24 | else if count < 100 25 | "cluster-marker-medium" 26 | else 27 | "cluster-marker-large" 28 | return new Leaflet.DivIcon( 29 | html: "
" + count + "
" 30 | className: "cluster-marker " + className 31 | iconSize: new Leaflet.Point(40, 40) 32 | ) 33 | 34 | 35 | # Reset the transition properties for the given element so that it doesn't animate 36 | resetTransition: (element) -> 37 | updateTransition = (element, prefix) -> 38 | element.style[prefix + "transition"] = "" 39 | updateTransition element, "-webkit-" 40 | updateTransition element, "-moz-" 41 | updateTransition element, "-o-" 42 | updateTransition element, "" 43 | 44 | # Reset the transition properties for the given element so that it animates when it moves 45 | transition: (element, time) -> 46 | updateTransition = (element, prefix) -> 47 | element.style[prefix + "transition"] = prefix + "transform " + time + "ms linear" 48 | updateTransition element, "-webkit-" 49 | updateTransition element, "-moz-" 50 | updateTransition element, "-o-" 51 | updateTransition element, "" 52 | 53 | } -------------------------------------------------------------------------------- /app/views/index.scala.html: -------------------------------------------------------------------------------- 1 | @()(implicit req: RequestHeader) 2 | 3 | @main { 4 | 5 |
6 | 23 | 24 |
25 |
26 |
27 | 28 |
29 |
30 | 31 |
32 | 33 |
34 | 35 |
36 | 37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 | } 45 | -------------------------------------------------------------------------------- /app/backend/Main.scala: -------------------------------------------------------------------------------- 1 | package backend 2 | 3 | import akka.actor.ActorSystem 4 | import actors.RegionManagerClient 5 | import java.net.URL 6 | import akka.cluster.Cluster 7 | import com.typesafe.conductr.lib.akka.ConnectionContext 8 | import com.typesafe.conductr.bundlelib.akka.{StatusService, Env} 9 | import com.typesafe.config.ConfigFactory 10 | 11 | /** 12 | * Main class for starting a backend node. 13 | * A backend node can have two roles: "backend-region" and/or "backend-summary". 14 | * The lowest level regions run on nodes with role "backend-region". 15 | * Summary level regions run on nodes with role "backend-summary". 16 | * 17 | * The roles can be specified on the sbt command line as: 18 | * {{{ 19 | * sbt -Dakka.remote.netty.tcp.port=0 -Dakka.cluster.roles.1=backend-region -Dakka.cluster.roles.2=backend-summary "run-main backend.Main" 20 | * }}} 21 | * 22 | * If the node has role "frontend" it starts the simulation bots. 23 | */ 24 | object Main { 25 | def main(args: Array[String]): Unit = { 26 | val config = Env.asConfig 27 | val systemName = sys.env.getOrElse("BUNDLE_SYSTEM", "application") 28 | val systemVersion = sys.env.getOrElse("BUNDLE_SYSTEM_VERSION", "1") 29 | implicit val system = ActorSystem(s"$systemName-$systemVersion", config.withFallback(ConfigFactory.load())) 30 | 31 | if (Cluster(system).selfRoles.exists(r => r.startsWith("backend"))) { 32 | system.actorOf(RegionManager.props(), "regionManager") 33 | } 34 | 35 | if (Settings(system).BotsEnabled && Cluster(system).selfRoles.contains("frontend")) { 36 | val regionManagerClient = system.actorOf(RegionManagerClient.props(), "regionManagerClient") 37 | 38 | def findUrls(id: Int): List[URL] = { 39 | val url = Option(this.getClass.getClassLoader.getResource("bots/" + id + ".json")) 40 | url.map(url => url :: findUrls(id + 1)).getOrElse(Nil) 41 | } 42 | 43 | system.actorOf(BotManager.props(regionManagerClient, findUrls(1))) 44 | } 45 | 46 | implicit val cc = ConnectionContext() 47 | StatusService.signalStartedOrExit() 48 | } 49 | } -------------------------------------------------------------------------------- /app/assets/javascripts/map/marker.coffee: -------------------------------------------------------------------------------- 1 | # 2 | # A marker class 3 | # 4 | define ["leaflet", "markerRenderer"], (Leaflet, renderer) -> 5 | 6 | class Marker 7 | constructor: (map, feature, latLng) -> 8 | @map = map 9 | @feature = feature 10 | 11 | # If it has a count, it's a cluster 12 | if feature.properties.count 13 | @marker = new Leaflet.Marker(latLng, 14 | icon: renderer.createClusterMarkerIcon(feature.properties.count) 15 | ) 16 | # Otherwise it's a user 17 | else 18 | userId = feature.id 19 | @marker = new Leaflet.Marker(latLng, 20 | title: feature.id 21 | ) 22 | 23 | # The popup should contain the gravatar of the user and their id 24 | @marker.bindPopup(renderer.renderPopup(userId)) 25 | 26 | @lastSeen = new Date().getTime() 27 | @marker.addTo(map) 28 | 29 | # Update a marker with the given feature and latLng coordinates 30 | update: (feature, latLng) -> 31 | # Update the position 32 | @marker.setLatLng(latLng) 33 | 34 | # If it's a cluster, check if the size of the cluster has changed 35 | if feature.properties.count 36 | if feature.properties.count != @feature.properties.count 37 | @marker.setIcon(renderer.createClusterMarkerIcon(feature.properties.count)) 38 | 39 | # Animate the marker - calculate how long it took to get from its last position 40 | # to current, and then set the CSS3 transition time to equal that 41 | lastUpdate = @feature.properties.timestamp 42 | updated = feature.properties.timestamp 43 | time = (updated - lastUpdate) 44 | if time > 0 45 | if time > 10000 46 | time = 10000 47 | renderer.transition(@marker._icon, time) 48 | renderer.transition(@marker._shadow, time) if @marker._shadow 49 | 50 | # Finally update feature 51 | @feature = feature 52 | @lastSeen = new Date().getTime() 53 | 54 | # Snap the marker to where it should be, ie stop animating 55 | snap: -> 56 | renderer.resetTransition @marker._icon 57 | renderer.resetTransition @marker._shadow if @marker._shadow 58 | 59 | # Remove the marker from the map 60 | remove: -> 61 | @map.removeLayer(@marker) 62 | 63 | return Marker 64 | 65 | -------------------------------------------------------------------------------- /app/actors/GeoJsonBot.scala: -------------------------------------------------------------------------------- 1 | package actors 2 | 3 | import akka.actor.{ ActorRef, Actor } 4 | import scala.concurrent.duration._ 5 | import play.extras.geojson.{LineString, LatLng} 6 | import models.backend.UserPosition 7 | import akka.actor.Props 8 | import models.backend.BoundingBox 9 | import scala.concurrent.forkjoin.ThreadLocalRandom 10 | import actors.PositionSubscriber.PositionSubscriberUpdate 11 | import scala.language.postfixOps 12 | 13 | object GeoJsonBot { 14 | def props(trail: LineString[LatLng], offset: (Double, Double), userId: String, regionManagerClient: ActorRef): Props = 15 | Props(classOf[GeoJsonBot], trail, offset, userId, regionManagerClient) 16 | 17 | private case object Step 18 | private case object Zoom 19 | } 20 | 21 | /** 22 | * A bot that walks back and forth along a GeoJSON LineString. 23 | */ 24 | class GeoJsonBot(trail: LineString[LatLng], offset: (Double, Double), userId: String, 25 | regionManagerClient: ActorRef) extends Actor { 26 | 27 | import GeoJsonBot._ 28 | 29 | val (latOffset, lngOffset) = offset 30 | var pos = 0 31 | var direction = -1 32 | var stepCount = 0 33 | 34 | import context.dispatcher 35 | val stepTask = context.system.scheduler.schedule(1 second, 1 second, context.self, Step) 36 | 37 | val positionSubscriber: ActorRef = context.actorOf(PositionSubscriber.props(self)) 38 | 39 | override def postStop(): Unit = { 40 | stepTask.cancel() 41 | } 42 | 43 | def receive = { 44 | case Step => 45 | if (pos == trail.coordinates.size - 1 || pos == 0) { 46 | direction = -direction 47 | } 48 | pos += direction 49 | val c = trail.coordinates(pos) 50 | val userPos = UserPosition(userId, System.currentTimeMillis, LatLng(c.lat + latOffset, c.lng + lngOffset)) 51 | regionManagerClient ! userPos 52 | 53 | stepCount += 1 54 | if (stepCount % 30 == 0) { 55 | val w = ThreadLocalRandom.current.nextDouble() * 10.0 56 | val h = ThreadLocalRandom.current.nextDouble() * 20.0 57 | val southWest = LatLng(c.lat + latOffset - w / 2, c.lng + lngOffset - h / 2) 58 | val northEast = LatLng(c.lat + latOffset + w / 2, c.lng + lngOffset + h / 2) 59 | positionSubscriber ! BoundingBox(southWest, northEast) 60 | } 61 | 62 | case _: PositionSubscriberUpdate => 63 | // Ignore 64 | 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /app/backend/Region.scala: -------------------------------------------------------------------------------- 1 | package backend 2 | 3 | import akka.actor.Actor 4 | import akka.contrib.pattern.DistributedPubSubExtension 5 | import akka.contrib.pattern.DistributedPubSubMediator.Publish 6 | import scala.concurrent.duration.Deadline 7 | import models.backend.{ RegionId, RegionPoints, BoundingBox, UserPosition } 8 | import akka.actor.Props 9 | import akka.actor.ActorLogging 10 | 11 | object Region { 12 | 13 | def props(regionId: RegionId): Props = Props(new Region(regionId)) 14 | 15 | private case object Tick 16 | } 17 | 18 | /** 19 | * These sit at the lowest level, and hold all the users in that region, and publish their summaries up. 20 | * User position updates are published to subscribers of the topic with the region id. 21 | */ 22 | class Region(regionId: RegionId) extends Actor with ActorLogging { 23 | import Region._ 24 | 25 | val mediator = DistributedPubSubExtension(context.system).mediator 26 | val settings = Settings(context.system) 27 | 28 | val regionBounds: BoundingBox = settings.GeoFunctions.boundingBoxForRegion(regionId) 29 | var activeUsers = Map.empty[String, (UserPosition, Deadline)] 30 | 31 | import context.dispatcher 32 | val tickTask = context.system.scheduler.schedule(settings.SummaryInterval / 2, settings.SummaryInterval, self, Tick) 33 | 34 | override def postStop(): Unit = { 35 | tickTask.cancel() 36 | log.debug("Stopped region: {}", regionId.name) 37 | } 38 | 39 | def receive = { 40 | case p @ UserPosition(userId, _, _) => 41 | activeUsers += (userId -> (p, Deadline.now + settings.ExpiryInterval)) 42 | // publish new user position to subscribers 43 | mediator ! Publish(regionId.name, p) 44 | 45 | case Tick => 46 | // expire inactive users 47 | val obsolete = activeUsers.collect { 48 | case (userId, (position, deadline)) if deadline.isOverdue() => userId 49 | } 50 | activeUsers --= obsolete 51 | 52 | // Cluster 53 | val points = RegionPoints(regionId, settings.GeoFunctions.cluster(regionId.name, regionBounds, 54 | activeUsers.collect { case (_, (position, _)) => position }(collection.breakOut))) 55 | 56 | // propagate the points to the summary region via the parent manager 57 | context.parent ! points 58 | 59 | // stop the actor when no active users 60 | if (activeUsers.isEmpty) 61 | context.stop(self) 62 | 63 | } 64 | 65 | } 66 | -------------------------------------------------------------------------------- /app/actors/PositionSubscriber.scala: -------------------------------------------------------------------------------- 1 | package actors 2 | 3 | import scala.collection.immutable.Seq 4 | import akka.actor.{ActorRef, Actor, ActorLogging, Props} 5 | import akka.contrib.pattern.DistributedPubSubExtension 6 | import akka.contrib.pattern.DistributedPubSubMediator.Subscribe 7 | import akka.contrib.pattern.DistributedPubSubMediator.Unsubscribe 8 | import models.backend._ 9 | import backend.Settings 10 | 11 | object PositionSubscriber { 12 | 13 | def props(subscriber: ActorRef): Props = Props(new PositionSubscriber(subscriber)) 14 | 15 | case class PositionSubscriberUpdate(area: Option[BoundingBox], updates: Seq[PointOfInterest]) 16 | 17 | private case object Tick 18 | } 19 | 20 | /** 21 | * A subscriber to position data. 22 | */ 23 | class PositionSubscriber(subscriber: ActorRef) extends Actor with ActorLogging { 24 | import PositionSubscriber._ 25 | 26 | val mediator = DistributedPubSubExtension(context.system).mediator 27 | val settings = Settings(context.system) 28 | 29 | /** 30 | * The current regions subscribed to 31 | */ 32 | var regions = Set.empty[RegionId] 33 | 34 | /** 35 | * The current bounding box subscribed to 36 | */ 37 | var currentArea: Option[BoundingBox] = None 38 | 39 | /** 40 | * The unpublished position updates 41 | */ 42 | var updates: Map[String, PointOfInterest] = Map.empty 43 | 44 | import context.dispatcher 45 | val tickTask = context.system.scheduler.schedule(settings.SubscriberBatchInterval, settings.SubscriberBatchInterval, 46 | self, Tick) 47 | 48 | override def postStop(): Unit = tickTask.cancel() 49 | 50 | def receive = { 51 | case bbox: BoundingBox => 52 | // Calculate new regions 53 | val newRegions = settings.GeoFunctions.regionsForBoundingBox(bbox) 54 | // Subscribe to any regions that we're not already subscribed to 55 | (newRegions -- regions) foreach { region => 56 | mediator ! Subscribe(region.name, self) 57 | } 58 | // Unsubscribe from any regions that we no longer should be subscribed to 59 | (regions -- newRegions) foreach { region => 60 | mediator ! Unsubscribe(region.name, self) 61 | } 62 | regions = newRegions 63 | currentArea = Some(bbox) 64 | 65 | case p: UserPosition => 66 | updates += (p.id -> p) 67 | 68 | case RegionPoints(regionId, points) => 69 | updates ++= points.map(p => p.id -> p) 70 | 71 | case Tick => 72 | subscriber ! PositionSubscriberUpdate(currentArea, updates.values.toVector) 73 | updates = Map.empty 74 | 75 | } 76 | 77 | } 78 | -------------------------------------------------------------------------------- /conf/application.conf: -------------------------------------------------------------------------------- 1 | # This is the main configuration file for the application. 2 | # ~~~~~ 3 | 4 | # 14 means 2 ^^ 14 regions, ie 16 million 5 | reactiveMaps.maxZoomDepth=14 6 | 7 | reactiveMaps.maxSubscriptionRegions=6 8 | 9 | reactiveMaps.clusterThreshold=16 10 | reactiveMaps.clusterDimension=4 11 | 12 | reactiveMaps.summaryInterval=5s 13 | reactiveMaps.expiryInterval=30s 14 | reactiveMaps.subscriberBatchInterval=2s 15 | 16 | reactiveMaps.bots.enabled=true 17 | reactiveMaps.bots.totalNumberOfBots=75 18 | 19 | # The actors module 20 | play.modules.enabled += "actors.Actors" 21 | 22 | # Secret key 23 | # ~~~~~ 24 | # The secret key is used to secure cryptographics functions. 25 | # If you deploy your application to several instances be sure to use the same key! 26 | play.crypto.secret="]I;nuZEPTPGytF5U/970v=FAxlxl5SHqfos=]C??:lG>:d;i13LFWp8rc5a`]j2c" 27 | 28 | # The application languages 29 | # ~~~~~ 30 | play.i18n.langs=["en"] 31 | 32 | # Akka configuration 33 | akka { 34 | 35 | loglevel = "INFO" 36 | 37 | actor.provider = "akka.cluster.ClusterActorRefProvider" 38 | 39 | extensions = [ 40 | "akka.contrib.pattern.DistributedPubSubExtension" 41 | ] 42 | 43 | remote.netty.tcp { 44 | hostname = "127.0.0.1" 45 | port = 2552 46 | } 47 | 48 | cluster { 49 | seed-nodes = ["akka.tcp://application@127.0.0.1:2552"] 50 | roles = ["frontend", "backend-region", "backend-summary"] 51 | auto-down = on 52 | } 53 | 54 | actor.deployment { 55 | # Routing of position updates to the regionManager on the right 56 | # backend node. The node is selected by consistent hashing of 57 | # the region id, i.e. user position updates in a region is routed 58 | # to the backend node responsible for that region. 59 | /regionManagerClient/router { 60 | router = consistent-hashing 61 | nr-of-instances = 1000 62 | cluster { 63 | enabled = on 64 | routees-path = "/user/regionManager" 65 | allow-local-routees = on 66 | use-role = "backend-region" 67 | } 68 | } 69 | # Routing of sub-region summary information to enclosing region, 70 | # which may be located on another backend node. The node is selected 71 | # by consistent hashing of the region id. 72 | /regionManager/router { 73 | router = consistent-hashing 74 | nr-of-instances = 1000 75 | cluster { 76 | enabled = on 77 | routees-path = "/user/regionManager" 78 | allow-local-routees = on 79 | use-role = "backend-summary" 80 | } 81 | } 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /app/backend/BotManager.scala: -------------------------------------------------------------------------------- 1 | package backend 2 | 3 | import scala.concurrent.duration._ 4 | import scala.collection.immutable.Seq 5 | import scala.concurrent.forkjoin.ThreadLocalRandom 6 | import akka.actor.{ ActorRef, Props } 7 | import play.api.libs.json.Json 8 | import play.extras.geojson.{ LineString, LatLng, FeatureCollection } 9 | import play.api.Logger 10 | import actors.GeoJsonBot 11 | import java.net.URL 12 | import akka.actor.Actor 13 | import scala.io.Source 14 | import akka.cluster.Cluster 15 | 16 | object BotManager { 17 | def props(regionManagerClient: ActorRef, data: Seq[URL]): Props = 18 | Props(new BotManager(regionManagerClient, data)) 19 | 20 | private case object Tick 21 | } 22 | 23 | /** 24 | * Loads and starts GeoJSON bots 25 | */ 26 | class BotManager(regionManagerClient: ActorRef, data: Seq[URL]) extends Actor { 27 | import BotManager._ 28 | 29 | var total = 0 30 | val max = Settings(context.system).TotalNumberOfBots 31 | 32 | import context.dispatcher 33 | val tickTask = context.system.scheduler.schedule(1.seconds, 3.seconds, self, Tick) 34 | val port = Cluster(context.system).selfAddress.port.get 35 | 36 | override def postStop(): Unit = tickTask.cancel() 37 | 38 | def receive = { 39 | case Tick if total >= max => 40 | tickTask.cancel() 41 | 42 | case Tick => 43 | val totalBefore = total 44 | val originalTrail = total == 0 45 | data.zipWithIndex.foreach { 46 | case (url, id) => 47 | val json = Json.parse(Source.fromURL(url).mkString) 48 | Json.fromJson[FeatureCollection[LatLng]](json).fold( 49 | { invalid => 50 | Logger.error("Error loading geojson bot: " + invalid) 51 | }, valid => valid.features.zipWithIndex.map { feature => 52 | feature._1.geometry match { 53 | case route: LineString[LatLng] if total < max => 54 | total += 1 55 | val userId = "bot-" + total + "-" + port + "-" + id + "-" + feature._1.id.getOrElse(feature._2) + "-" + feature._1.properties.flatMap(js => (js \ "name").asOpt[String]).getOrElse("") 56 | val offset = 57 | if (originalTrail) (0.0, 0.0) 58 | else (ThreadLocalRandom.current.nextDouble() * 15.0, 59 | ThreadLocalRandom.current.nextDouble() * -30.0) 60 | context.actorOf(GeoJsonBot.props(route, offset, userId, regionManagerClient)) 61 | case other => 62 | } 63 | }) 64 | } 65 | 66 | println("Started " + (total - totalBefore) + " bots, total " + total) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /app/backend/SummaryRegion.scala: -------------------------------------------------------------------------------- 1 | package backend 2 | 3 | import scala.concurrent.duration.Deadline 4 | import akka.actor.Actor 5 | import akka.contrib.pattern.DistributedPubSubExtension 6 | import akka.contrib.pattern.DistributedPubSubMediator.Publish 7 | import models.backend.{ RegionId, RegionPoints, BoundingBox, PointOfInterest } 8 | import akka.actor.Props 9 | import akka.actor.ActorLogging 10 | 11 | object SummaryRegion { 12 | 13 | def props(regionId: RegionId): Props = Props(new SummaryRegion(regionId)) 14 | 15 | private case object Tick 16 | } 17 | 18 | /** 19 | * Summary regions receive region points from their 4 sub regions, cluster them, and publishes the resulting points 20 | * to subscribers of the topic with the region id. 21 | */ 22 | class SummaryRegion(regionId: RegionId) extends Actor with ActorLogging { 23 | import SummaryRegion._ 24 | 25 | val mediator = DistributedPubSubExtension(context.system).mediator 26 | val settings = Settings(context.system) 27 | 28 | /** 29 | * The bounding box for this region. 30 | */ 31 | val regionBounds: BoundingBox = settings.GeoFunctions.boundingBoxForRegion(regionId) 32 | 33 | /** 34 | * The active points for this region, keyed by sub region id. 35 | * 36 | * The values are the points for the sub region, tupled with the deadline they are valid until. 37 | */ 38 | var activePoints = Map.empty[RegionId, (Seq[PointOfInterest], Deadline)] 39 | 40 | import context.dispatcher 41 | val tickTask = context.system.scheduler.schedule(settings.SummaryInterval / 2, settings.SummaryInterval, self, Tick) 42 | 43 | override def postStop(): Unit = { 44 | tickTask.cancel() 45 | log.debug("Stopped summary region: {}", regionId.name) 46 | } 47 | 48 | def receive = { 49 | case RegionPoints(id, points) => 50 | // update from sub-region 51 | activePoints += id -> (points, Deadline.now + settings.ExpiryInterval) 52 | 53 | case Tick => 54 | // expire inactive sub-regions 55 | val obsolete = activePoints.collect { 56 | case (rid, (map, deadline)) if deadline.isOverdue() => rid 57 | } 58 | activePoints --= obsolete 59 | 60 | // Cluster 61 | val points = RegionPoints(regionId, settings.GeoFunctions.cluster(regionId.name, regionBounds, 62 | activePoints.values.flatMap(_._1)(collection.breakOut))) 63 | 64 | // propagate the points to higher level summary region via the manager 65 | context.parent ! points 66 | // publish total count to subscribers 67 | mediator ! Publish(regionId.name, points) 68 | 69 | // stop the actor when no active sub-regions 70 | if (activePoints.isEmpty) 71 | context.stop(self) 72 | } 73 | 74 | } 75 | -------------------------------------------------------------------------------- /app/backend/Settings.scala: -------------------------------------------------------------------------------- 1 | package backend 2 | 3 | import akka.actor.ActorSystem 4 | import akka.actor.Extension 5 | import akka.actor.ExtensionId 6 | import akka.actor.ExtensionIdProvider 7 | import akka.actor.ExtendedActorSystem 8 | import scala.concurrent.duration._ 9 | import com.typesafe.config.Config 10 | import java.util.concurrent.TimeUnit 11 | 12 | class Settings(config: Config) extends Extension { 13 | /** 14 | * The maximum zoom depth for regions. The concrete regions will sit at this depth, summary regions will sit above 15 | * that. 16 | */ 17 | val MaxZoomDepth = config.getInt("reactiveMaps.maxZoomDepth") 18 | 19 | /** 20 | * The maximum number of regions that can be subscribed to. 21 | * 22 | * This is enforced automatically by selecting the deepest zoom depth for a given bounding box that is covered by 23 | * this number of regions or less. 24 | */ 25 | val MaxSubscriptionRegions = config.getInt("reactiveMaps.maxSubscriptionRegions") 26 | 27 | /** 28 | * The number of points that need to be in a region/summary region before it decides to cluster them. 29 | */ 30 | val ClusterThreshold = config.getInt("reactiveMaps.clusterThreshold") 31 | 32 | /** 33 | * The dimension depth at which to cluster. 34 | * 35 | * A region will be clustered into the square of this number boxes. 36 | */ 37 | val ClusterDimension = config.getInt("reactiveMaps.clusterDimension") 38 | 39 | /** 40 | * The interval at which each region should generate and send its summaries. 41 | */ 42 | val SummaryInterval = config.getDuration("reactiveMaps.summaryInterval", TimeUnit.MILLISECONDS).milliseconds 43 | 44 | /** 45 | * The interval after which user positions and cluster data should expire. 46 | */ 47 | val ExpiryInterval = config.getDuration("reactiveMaps.expiryInterval", TimeUnit.MILLISECONDS).milliseconds 48 | 49 | /** 50 | * The interval at which subscribers should batch their points to send to clients. 51 | */ 52 | val SubscriberBatchInterval = config.getDuration("reactiveMaps.subscriberBatchInterval", TimeUnit.MILLISECONDS).milliseconds 53 | 54 | /** 55 | * Geospatial functions. 56 | */ 57 | val GeoFunctions = new GeoFunctions(this) 58 | 59 | /** 60 | * Whether this node should run the bots it knows about. 61 | */ 62 | val BotsEnabled = config.getBoolean("reactiveMaps.bots.enabled") 63 | 64 | /** 65 | * How many bots to create in total 66 | */ 67 | val TotalNumberOfBots = config.getInt("reactiveMaps.bots.totalNumberOfBots") 68 | } 69 | 70 | /** 71 | * The settings for this application. 72 | */ 73 | object Settings extends ExtensionId[Settings] with ExtensionIdProvider { 74 | 75 | override def lookup = Settings 76 | 77 | override def createExtension(system: ExtendedActorSystem) = 78 | new Settings(system.settings.config) 79 | 80 | override def get(system: ActorSystem): Settings = super.get(system) 81 | } -------------------------------------------------------------------------------- /app/assets/javascripts/models/mainPage.coffee: -------------------------------------------------------------------------------- 1 | # 2 | # The main page. 3 | # 4 | # This class handles most of the user interactions with the buttons/menus/forms on the page, as well as manages 5 | # the WebSocket connection. It delegates to other classes to manage everything else. 6 | # 7 | define ["knockout", "map", "gps", "mockGps"], (ko, Map, Gps, MockGps) -> 8 | 9 | class MainPageModel 10 | constructor: () -> 11 | # the current user 12 | @email = ko.observable() 13 | 14 | # Contains a message to say that we're either connecting or reconnecting 15 | @connecting = ko.observable() 16 | @disconnected = ko.observable(true) 17 | 18 | # The MockGps model 19 | @mockGps = ko.observable() 20 | # The GPS model 21 | @gps = ko.observable() 22 | 23 | # If we're closing 24 | @closing = false 25 | 26 | # Load the previously entered email if set 27 | if localStorage.email 28 | @email(localStorage.email) 29 | @connect() 30 | 31 | # The user clicked connect 32 | submitEmail: -> 33 | localStorage.email = @email() 34 | @connect() 35 | 36 | # Connect function. Connects to the websocket, and sets up callbacks. 37 | connect: -> 38 | email = @email() 39 | @connecting("Connecting...") 40 | @disconnected(null) 41 | 42 | @ws = new WebSocket(jsRoutes.controllers.Application.stream(email).webSocketURL()) 43 | 44 | # When the websocket opens, create a new map and new GPS 45 | @ws.onopen = (event) => 46 | @connecting(null) 47 | @map = new Map(@ws) 48 | @gps(new Gps(@ws)) 49 | 50 | @ws.onclose = (event) => 51 | # Need to handle reconnects in case of errors 52 | if (!event.wasClean && !self.closing) 53 | @connect() 54 | @connecting("Reconnecting...") 55 | else 56 | @disconnected(true) 57 | @closing = false 58 | # Destroy everything and clean it all up. 59 | @map.destroy() if @map 60 | @mockGps().destroy() if @mockGps() 61 | @gps().destroy() if @gps() 62 | @map = null 63 | @mockGps(null) 64 | @gps(null) 65 | 66 | # Handle the stream of feature updates 67 | @ws.onmessage = (event) => 68 | json = JSON.parse(event.data) 69 | if json.event == "user-positions" 70 | # Update all the markers on the map 71 | @map.updateMarkers(json.positions.features) 72 | 73 | # Disconnect the web socket 74 | disconnect: -> 75 | @closing = true 76 | @ws.close() 77 | 78 | # Switch between the mock GPS and the real GPS 79 | toggleMockGps: -> 80 | if @mockGps() 81 | @mockGps().destroy() 82 | @mockGps(null) 83 | @gps(new Gps(@ws)) 84 | else 85 | @gps().destroy() if @gps() 86 | @gps(null) 87 | @mockGps(new MockGps(@ws)) 88 | 89 | return MainPageModel 90 | 91 | -------------------------------------------------------------------------------- /app/backend/RegionManager.scala: -------------------------------------------------------------------------------- 1 | package backend 2 | 3 | import akka.actor.Actor 4 | import akka.actor.Props 5 | import akka.routing.ConsistentHashingRouter.ConsistentHashable 6 | import akka.routing.FromConfig 7 | import models.backend.{ RegionId, RegionPoints, UserPosition } 8 | import akka.actor.ActorLogging 9 | 10 | object RegionManager { 11 | 12 | def props(): Props = Props[RegionManager] 13 | 14 | /** 15 | * Update the users position. 16 | * 17 | * Sent by clients of the backend when they want to update a users position. 18 | * 19 | * @param regionId The region id that position is in. This is used as the hash key for deciding which node 20 | * to route the update to. 21 | * @param userPosition The user position object. 22 | */ 23 | case class UpdateUserPosition(regionId: RegionId, userPosition: UserPosition) extends ConsistentHashable { 24 | override def consistentHashKey: Any = regionId.name 25 | } 26 | 27 | /** 28 | * Update the region points at a given region. 29 | * 30 | * Sent by child regions to update their data in their parent summary region. 31 | * 32 | * @param regionId The region id that position is in. This is used as the hash key for deciding which node 33 | * to route the update to. 34 | * @param regionPoints The points to update. 35 | */ 36 | case class UpdateRegionPoints(regionId: RegionId, regionPoints: RegionPoints) extends ConsistentHashable { 37 | override def consistentHashKey: Any = regionId.name 38 | } 39 | 40 | } 41 | 42 | /** 43 | * Handles instantiating region and summary region actors when data arrives for them, if they don't already exist. 44 | * It also routes the `RegionPoints` from child `Region` or `SummaryRegion` to the node 45 | * responsible for the target region. 46 | */ 47 | class RegionManager extends Actor with ActorLogging { 48 | import RegionManager._ 49 | 50 | val regionManagerRouter = context.actorOf(Props.empty.withRouter(FromConfig), "router") 51 | val settings = Settings(context.system) 52 | 53 | def receive = { 54 | case UpdateUserPosition(regionId, userPosition) => 55 | val region = context.child(regionId.name).getOrElse { 56 | log.debug("Creating region: {}", regionId.name) 57 | context.actorOf(Region.props(regionId), regionId.name) 58 | } 59 | region ! userPosition 60 | 61 | case UpdateRegionPoints(regionId, regionPoints) => 62 | val summaryRegion = context.child(regionId.name).getOrElse { 63 | log.debug("Creating summary region: {}", regionId.name) 64 | context.actorOf(SummaryRegion.props(regionId), regionId.name) 65 | } 66 | summaryRegion ! regionPoints 67 | 68 | case p @ RegionPoints(regionId, _) => 69 | 70 | // count reported by child region, propagate it to summary region on responsible node 71 | settings.GeoFunctions.summaryRegionForRegion(regionId).foreach { summaryRegionId => 72 | regionManagerRouter ! UpdateRegionPoints(summaryRegionId, p) 73 | } 74 | } 75 | 76 | } -------------------------------------------------------------------------------- /test/assets/javascripts/map/MapSpec.coffee: -------------------------------------------------------------------------------- 1 | # Mocks 2 | class LatLng 3 | constructor: (lat, lng) -> 4 | @lat = lat 5 | @lng = lng 6 | wrap: () -> 7 | this 8 | 9 | class MockLeaflet 10 | constructor: () -> 11 | self = @ 12 | @TileLayer = class 13 | addTo: (map) -> 14 | self._addedTo = map 15 | 16 | _map: { 17 | markers: {} 18 | setView: (center, zoom) -> 19 | @_center = center 20 | @_zoom = zoom 21 | on: (event, fn) -> 22 | remove: () -> 23 | getBounds: () -> 24 | center = @_center 25 | { 26 | getCenter: () -> 27 | new LatLng(center[0], center[1]) 28 | } 29 | } 30 | 31 | map: -> 32 | @_map 33 | 34 | LatLng: LatLng 35 | 36 | class MockStorage 37 | area: null 38 | lastArea: -> 39 | @area 40 | setLastArea: (area) -> 41 | @area = area 42 | 43 | MM = () -> 44 | class MockMarker 45 | constructor: (map, feature, latLng) -> 46 | map.markers[feature.id] = this 47 | @feature = feature 48 | @latLng = latLng 49 | update: (feature, latLng) -> 50 | @feature = feature 51 | @latLng = latLng 52 | 53 | # Tests 54 | testMap = (test) -> 55 | (done) -> 56 | 57 | # Create mocks 58 | leaflet = new MockLeaflet() 59 | storage = new MockStorage() 60 | 61 | # Mockout require js environment 62 | new Squire() 63 | .mock("marker", MM) 64 | .mock("storage", storage) 65 | .mock("leaflet", leaflet) 66 | .require ["javascripts/map/map"], (Map) -> 67 | test(leaflet, storage, Map, done) 68 | 69 | describe "Map", -> 70 | 71 | # Mock features 72 | a = 73 | id: "a" 74 | geometry: 75 | coordinates: [1, 2] 76 | b = 77 | id: "b" 78 | geometry: 79 | coordinates: [3, 4] 80 | aUpdated = 81 | id: "a" 82 | geometry: 83 | coordinates: [5, 6] 84 | 85 | it "should create a tile layer", testMap (leaflet, storage, Map, done) -> 86 | new Map() 87 | assert.equal leaflet._map, leaflet._addedTo 88 | done() 89 | 90 | it "should initialise the map view", testMap (leaflet, storage, Map, done) -> 91 | new Map() 92 | assert.equal 0, leaflet._map._center[0] 93 | assert.equal 0, leaflet._map._center[1] 94 | assert.equal 2, leaflet._map._zoom 95 | done() 96 | 97 | it "should initialise the map view to the last stored area", testMap (leaflet, storage, Map, done) -> 98 | storage.setLastArea({center: [1, 2], zoom: 3}) 99 | new Map() 100 | assert.equal 1, leaflet._map._center[0] 101 | assert.equal 2, leaflet._map._center[1] 102 | assert.equal 3, leaflet._map._zoom 103 | done() 104 | 105 | it "should create new markers for features", testMap (leaflet, storage, Map, done) -> 106 | map = new Map() 107 | map.updateMarkers [a, b] 108 | 109 | assert.equal a, leaflet._map.markers["a"].feature 110 | assert.equal 2, leaflet._map.markers["a"].latLng.lat 111 | assert.equal 1, leaflet._map.markers["a"].latLng.lng 112 | assert.equal b, leaflet._map.markers["b"].feature 113 | done() 114 | 115 | it "should update existing markers for features", testMap (leaflet, storage, Map, done) -> 116 | map = new Map() 117 | map.updateMarkers [a, b] 118 | map.updateMarkers [aUpdated, b] 119 | 120 | assert.equal aUpdated, leaflet._map.markers["a"].feature 121 | assert.equal 6, leaflet._map.markers["a"].latLng.lat 122 | assert.equal 5, leaflet._map.markers["a"].latLng.lng 123 | done() 124 | -------------------------------------------------------------------------------- /test/assets/javascripts/map/MarkerSpec.coffee: -------------------------------------------------------------------------------- 1 | # Mocks 2 | class MockPromise 3 | constructor: (value) -> 4 | @value = value 5 | done: (callback) -> 6 | callback(@value) 7 | 8 | class LatLng 9 | constructor: (lat, lng) -> 10 | @lat = lat 11 | @lng = lng 12 | 13 | class MockPopup 14 | constructor: (content) -> 15 | @content = content 16 | setContent: (content) -> 17 | @content = content 18 | @ 19 | update: -> 20 | 21 | 22 | class MockMarker 23 | constructor: (latLng, options) -> 24 | @latLng = latLng 25 | @options = options 26 | bindPopup: (content) -> 27 | @popup = new MockPopup(content) 28 | getPopup: -> 29 | @popup 30 | setLatLng: (latLng) -> 31 | @latLng = latLng 32 | setIcon: (icon) -> 33 | @options.icon = icon 34 | addTo: (map) -> 35 | @addedTo = map 36 | on: (type, callback) -> 37 | @onClick = callback 38 | 39 | 40 | class MockMarkerRenderer 41 | renderPopup: (userId) -> 42 | "Popup " + userId 43 | createClusterMarkerIcon: (count) -> 44 | "cluster of " + count 45 | resetTranstion: -> 46 | transition: -> 47 | 48 | class MockLeaflet 49 | Marker: MockMarker 50 | LatLng: LatLng 51 | 52 | class MockMap 53 | 54 | testMarker = (test) -> 55 | (done) -> 56 | 57 | # Create mocks 58 | leaflet = new MockLeaflet() 59 | renderer = new MockMarkerRenderer() 60 | 61 | # Mockout require js environment 62 | new Squire() 63 | .mock("markerRenderer", renderer) 64 | .mock("leaflet", leaflet) 65 | .require ["javascripts/map/marker"], (Marker) -> 66 | test({ 67 | leaflet: leaflet, 68 | renderer: renderer, 69 | }, Marker) 70 | done() 71 | 72 | cluster = { 73 | properties: { 74 | count: 10 75 | timestamp: 0 76 | } 77 | id: "somecluster" 78 | } 79 | 80 | single = { 81 | properties: { 82 | timestamp: 0 83 | } 84 | id: "userid" 85 | } 86 | 87 | describe "Marker", -> 88 | it "should create a single marker", testMarker (deps, Marker) -> 89 | marker = new Marker(new MockMap(), single, new LatLng(10, 20)) 90 | assert.equal(single, marker.feature) 91 | assert.deepEqual(new LatLng(10, 20), marker.marker.latLng) 92 | assert.equal("Popup userid", marker.marker.popup.content) 93 | 94 | it "should create a cluster marker", testMarker (deps, Marker) -> 95 | marker = new Marker(new MockMap(), cluster, new LatLng(10, 20)) 96 | assert.equal(cluster, marker.feature) 97 | assert.deepEqual(new LatLng(10, 20), marker.marker.latLng) 98 | assert.equal("cluster of 10", marker.marker.options.icon) 99 | 100 | it "should add it to the map", testMarker (deps, Marker) -> 101 | map = new MockMap() 102 | marker = new Marker(map, single, new LatLng(10, 20)) 103 | assert.equal(map, marker.marker.addedTo) 104 | 105 | it "should update the position", testMarker (deps, Marker) -> 106 | marker = new Marker(new MockMap(), single, new LatLng(10, 20)) 107 | marker.update({ 108 | properties: { 109 | timestamp: 0 110 | } 111 | id: "userid" 112 | }, new LatLng(20, 30)) 113 | assert.deepEqual(new LatLng(20, 30), marker.marker.latLng) 114 | 115 | it "should update the cluster count", testMarker (deps, Marker) -> 116 | marker = new Marker(new MockMap(), cluster, new LatLng(10, 20)) 117 | marker.update({ 118 | properties: { 119 | timestamp: 0 120 | count: 20 121 | } 122 | id: "somecluster" 123 | }, new LatLng(20, 30)) 124 | assert.equal("cluster of 20", marker.marker.options.icon) 125 | 126 | 127 | -------------------------------------------------------------------------------- /public/javascripts/md5.min.js: -------------------------------------------------------------------------------- 1 | !function(n){"use strict";function t(n,t){var r=(65535&n)+(65535&t),e=(n>>16)+(t>>16)+(r>>16);return e<<16|65535&r}function r(n,t){return n<>>32-t}function e(n,e,u,o,c,f){return t(r(t(t(e,n),t(o,f)),c),u)}function u(n,t,r,u,o,c,f){return e(t&r|~t&u,n,t,o,c,f)}function o(n,t,r,u,o,c,f){return e(t&u|r&~u,n,t,o,c,f)}function c(n,t,r,u,o,c,f){return e(t^r^u,n,t,o,c,f)}function f(n,t,r,u,o,c,f){return e(r^(t|~u),n,t,o,c,f)}function i(n,r){n[r>>5]|=128<>>9<<4)+14]=r;var e,i,a,h,d,g=1732584193,l=-271733879,v=-1732584194,C=271733878;for(e=0;e>5]>>>t%32);return r}function h(n){var t,r=[];for(r[(n.length>>2)-1]=void 0,t=0;t>5]|=(255&n.charCodeAt(t/8))<16&&(u=i(u,8*n.length)),r=0;16>r;r+=1)o[r]=909522486^u[r],c[r]=1549556828^u[r];return e=i(o.concat(h(t)),512+8*t.length),a(i(c.concat(e),640))}function l(n){var t,r,e="0123456789abcdef",u="";for(r=0;r>>4)+e.charAt(15&t);return u}function v(n){return unescape(encodeURIComponent(n))}function C(n){return d(v(n))}function m(n){return l(C(n))}function s(n,t){return g(v(n),v(t))}function A(n,t){return l(s(n,t))}function p(n,t,r){return t?r?s(t,n):A(t,n):r?C(n):m(n)}"function"==typeof define&&define.amd?define(function(){return p}):n.md5=p}(this); -------------------------------------------------------------------------------- /app/actors/ClientConnection.scala: -------------------------------------------------------------------------------- 1 | package actors 2 | 3 | import javax.inject.{Named, Inject} 4 | 5 | import akka.actor.{ActorRef, Actor} 6 | import com.google.inject.assistedinject.Assisted 7 | import play.extras.geojson._ 8 | import play.api.libs.json._ 9 | import play.api.libs.functional.syntax._ 10 | import play.api.mvc.WebSocket.FrameFormatter 11 | import actors.PositionSubscriber.PositionSubscriberUpdate 12 | import models.backend._ 13 | 14 | object ClientConnection { 15 | 16 | /** 17 | * The factory interface for creating client connections 18 | */ 19 | trait Factory { 20 | def apply(email: String, upstream: ActorRef): Actor 21 | } 22 | 23 | /** 24 | * Events to/from the client side 25 | */ 26 | sealed trait ClientEvent 27 | 28 | /** 29 | * Event sent to the client when one or more users have updated their position in the current area 30 | */ 31 | case class UserPositions(positions: FeatureCollection[LatLng]) extends ClientEvent 32 | 33 | /** 34 | * Event sent from the client when the viewing area has changed 35 | */ 36 | case class ViewingArea(area: Polygon[LatLng]) extends ClientEvent 37 | 38 | /** 39 | * Event sent from the client when they have moved 40 | */ 41 | case class UserMoved(position: Point[LatLng]) extends ClientEvent 42 | 43 | 44 | /* 45 | * JSON serialisers/deserialisers for the above messages 46 | */ 47 | 48 | object ClientEvent { 49 | implicit def clientEventFormat: Format[ClientEvent] = Format( 50 | (__ \ "event").read[String].flatMap { 51 | case "user-positions" => UserPositions.userPositionsFormat.map(identity) 52 | case "viewing-area" => ViewingArea.viewingAreaFormat.map(identity) 53 | case "user-moved" => UserMoved.userMovedFormat.map(identity) 54 | case other => Reads(_ => JsError("Unknown client event: " + other)) 55 | }, 56 | Writes { 57 | case up: UserPositions => UserPositions.userPositionsFormat.writes(up) 58 | case va: ViewingArea => ViewingArea.viewingAreaFormat.writes(va) 59 | case um: UserMoved => UserMoved.userMovedFormat.writes(um) 60 | } 61 | ) 62 | 63 | /** 64 | * Formats WebSocket frames to be ClientEvents. 65 | */ 66 | implicit def clientEventFrameFormatter: FrameFormatter[ClientEvent] = FrameFormatter.jsonFrame.transform( 67 | clientEvent => Json.toJson(clientEvent), 68 | json => Json.fromJson[ClientEvent](json).fold( 69 | invalid => throw new RuntimeException("Bad client event on WebSocket: " + invalid), 70 | valid => valid 71 | ) 72 | ) 73 | } 74 | 75 | object UserPositions { 76 | implicit def userPositionsFormat: Format[UserPositions] = ( 77 | (__ \ "event").format[String] ~ 78 | (__ \ "positions").format[FeatureCollection[LatLng]] 79 | ).apply({ 80 | case ("user-positions", positions) => UserPositions(positions) 81 | }, userPositions => ("user-positions", userPositions.positions)) 82 | } 83 | 84 | object ViewingArea { 85 | implicit def viewingAreaFormat: Format[ViewingArea] = ( 86 | (__ \ "event").format[String] ~ 87 | (__ \ "area").format[Polygon[LatLng]] 88 | ).apply({ 89 | case ("viewing-area", area) => ViewingArea(area) 90 | }, viewingArea => ("viewing-area", viewingArea.area)) 91 | } 92 | 93 | object UserMoved { 94 | implicit def userMovedFormat: Format[UserMoved] = ( 95 | (__ \ "event").format[String] ~ 96 | (__ \ "position").format[Point[LatLng]] 97 | ).apply({ 98 | case ("user-moved", position) => UserMoved(position) 99 | }, userMoved => ("user-moved", userMoved.position)) 100 | } 101 | 102 | } 103 | 104 | /** 105 | * Represents a client connection 106 | * 107 | * @param email The email address of the client 108 | * @param regionManagerClient The region manager client to send updates to 109 | */ 110 | class ClientConnection @Inject() (@Named("regionManagerClient") regionManagerClient: ActorRef, 111 | @Assisted email: String, @Assisted upstream: ActorRef) extends Actor { 112 | 113 | // Create the subscriber actor to subscribe to position updates 114 | val subscriber = context.actorOf(PositionSubscriber.props(self), "positionSubscriber") 115 | 116 | import ClientConnection._ 117 | 118 | def receive = { 119 | // The users has moved their position, publish to the region 120 | case UserMoved(point) => 121 | regionManagerClient ! UserPosition(email, System.currentTimeMillis(), point.coordinates) 122 | 123 | // The viewing area has changed, tell the subscriber 124 | case ViewingArea(area) => area.bbox.foreach { bbox => 125 | subscriber ! BoundingBox(bbox._1, bbox._2) 126 | } 127 | 128 | // The subscriber received an update 129 | case PositionSubscriberUpdate(area, updates) => 130 | val userPositions = UserPositions(FeatureCollection( 131 | features = updates.map { pos => 132 | 133 | val properties = pos match { 134 | case _: UserPosition => Json.obj("timestamp" -> pos.timestamp) 135 | case Cluster(_, _, _, count) => Json.obj( 136 | "timestamp" -> pos.timestamp, 137 | "count" -> count) 138 | } 139 | 140 | Feature( 141 | geometry = Point(pos.position), 142 | id = Some(JsString(pos.id)), 143 | properties = Some(properties)) 144 | }, 145 | bbox = area.map(area => (area.southWest, area.northEast)) 146 | )) 147 | 148 | upstream ! userPositions 149 | 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /app/assets/javascripts/map/map.coffee: -------------------------------------------------------------------------------- 1 | # 2 | # The main map. Manages displaying markers on the map, as well as responding to the user moving around and zooming 3 | # on the map. 4 | # 5 | define ["marker", "storage", "leaflet"], (Marker, Storage, Leaflet) -> 6 | 7 | class Map 8 | constructor: (ws) -> 9 | # the map itself 10 | @map = Leaflet.map("map") 11 | new Leaflet.TileLayer("http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png", 12 | minZoom: 1 13 | maxZoom: 16 14 | attribution: "Map data © OpenStreetMap contributors" 15 | ).addTo(@map) 16 | 17 | # Focus on the last area that was viewed 18 | lastArea = Storage.lastArea() 19 | if (lastArea) 20 | try 21 | @map.setView(lastArea.center, lastArea.zoom) 22 | catch e 23 | @map.setView([0, 0], 2) 24 | else 25 | @map.setView([0, 0], 2) 26 | 27 | # the websocket 28 | @ws = ws 29 | 30 | # the markers on the map 31 | @markers = {} 32 | 33 | # When zooming, the markers are likely to all change ids due to clustering, which means until they expire, 34 | # the screen is going to have too much data on it. So after zooming, we want to clear them off the screen, 35 | # but not before we've got at least some data to display, so we hold the ones that existed before zooming 36 | # in this map. 37 | @preZoomMarkers = {} 38 | 39 | # the sendArea timeout id 40 | @sendArea = null 41 | 42 | # When zooming starts or ends, we want to snap the markers to their proper place, so that the marker 43 | # animation doesn't interfere with the zoom animation. 44 | @map.on "zoomstart", => 45 | @snapMarkers() 46 | @map.on "zoomend", => 47 | @snapMarkers() 48 | # Move all the markers to the preZoomMarkers 49 | for id of @markers 50 | @preZoomMarkers[id] = @markers[id] 51 | @markers = {} 52 | # Tell the server about our new viewing area 53 | @updatePosition() 54 | 55 | @map.on "moveend", => 56 | # Tell the server about our new viewing area 57 | @updatePosition() 58 | 59 | # The clean up task for removing markers that haven't been updated in 20 seconds 60 | @intervalId = setInterval(=> 61 | time = new Date().getTime() 62 | for id of @markers 63 | marker = @markers[id] 64 | if time - marker.lastSeen > 20000 65 | delete @markers[id] 66 | marker.remove() 67 | , 5000) 68 | 69 | @updatePosition() 70 | 71 | updatePosition: () -> 72 | # If we're moving around a lot, we don't want to overwhelm the server with viewing 73 | # area updates. So, we wait 500ms before sending the update, and if no further 74 | # updates happen, then we do it. 75 | clearTimeout @sendArea if @sendArea 76 | @sendArea = setTimeout(=> 77 | @doUpdatePosition() 78 | , 500) 79 | 80 | doUpdatePosition: () -> 81 | @sendArea = null 82 | bounds = @map.getBounds() 83 | 84 | # Update the last area that was viewed in the local storage so we can load it next time. 85 | localStorage.lastArea = Storage.setLastArea { 86 | center: bounds.getCenter().wrap(-180, 180) 87 | zoom: @map.getZoom() 88 | } 89 | 90 | # Create the event 91 | event = 92 | event: "viewing-area" 93 | area: 94 | type: "Polygon" 95 | coordinates: [[[bounds.getSouthWest().lng, bounds.getSouthWest().lat], 96 | [bounds.getNorthWest().lng, bounds.getNorthWest().lat], 97 | [bounds.getNorthEast().lng, bounds.getNorthEast().lat], 98 | [bounds.getSouthEast().lng, bounds.getSouthEast().lat], 99 | [bounds.getSouthWest().lng, bounds.getSouthWest().lat]]] 100 | bbox: [bounds.getWest(), bounds.getSouth(), bounds.getEast(), bounds.getNorth()] 101 | 102 | # Send the viewing area upate to the server 103 | @ws.send(JSON.stringify(event)) 104 | 105 | # Update the given marker positions 106 | updateMarkers: (features) -> 107 | for id of features 108 | feature = features[id] 109 | 110 | # If the marker was in the pre zoom markers, then we can promote it to the markers map 111 | marker = if @preZoomMarkers[feature.id] 112 | marker = @preZoomMarkers[feature.id] 113 | @markers[feature.id] = marker 114 | delete @preZoomMarkers[feature.id] 115 | marker 116 | else 117 | # Otherwise, just get it from the normal markers map 118 | @markers[feature.id] 119 | 120 | # Get the LatLng for the marker 121 | coordinates = feature.geometry.coordinates 122 | latLng = @wrapForMap(new Leaflet.LatLng(coordinates[1], coordinates[0])) 123 | 124 | # If the marker is already on the map 125 | if marker 126 | # Update it 127 | marker.update(feature, latLng) 128 | else 129 | # Otherwise create a new one 130 | marker = new Marker(@map, feature, latLng) 131 | @markers[feature.id] = marker 132 | 133 | # Clear any remaining pre zoom markers 134 | for id of @preZoomMarkers 135 | @preZoomMarkers[id].remove() 136 | @preZoomMarkers = {} 137 | 138 | # When the map stops zooming, we want to stop the animations of all the markers, otherwise they will very 139 | # slowly move to their new position on the zoomed map 140 | snapMarkers: -> 141 | for id of @markers 142 | @markers[id].snap() 143 | 144 | # Destroy the map 145 | destroy: -> 146 | try 147 | @map.remove() 148 | clearInterval(@intervalId) 149 | catch e 150 | 151 | # Handles when the user scrolls beyond the bounds of -180 and 180 152 | wrapForMap: (latLng) -> 153 | center = @map.getBounds().getCenter() 154 | offset = center.lng - center.wrap(-180, 180).lng 155 | if (offset != 0) 156 | return new Leaflet.LatLng(latLng.lat, latLng.lng + offset) 157 | else 158 | return latLng 159 | 160 | return Map 161 | -------------------------------------------------------------------------------- /app/backend/GeoFunctions.scala: -------------------------------------------------------------------------------- 1 | package backend 2 | 3 | import scala.collection.immutable.IndexedSeq 4 | import play.extras.geojson.LatLng 5 | import models.backend._ 6 | import scala.Some 7 | import models.backend.BoundingBox 8 | import models.backend.Cluster 9 | import models.backend.UserPosition 10 | import scala.annotation.tailrec 11 | 12 | /** 13 | * Geo functions. 14 | */ 15 | class GeoFunctions(settings: Settings) { 16 | 17 | /** 18 | * Get the region for the given point. 19 | * 20 | * @param point The point. 21 | * @param zoomDepth The zoom depth. 22 | * @return The id of the region at the given zoom depth. 23 | */ 24 | def regionForPoint(point: LatLng, zoomDepth: Int = settings.MaxZoomDepth): RegionId = { 25 | require(zoomDepth <= settings.MaxZoomDepth, "Too deep!") 26 | val axisSteps = 1l << zoomDepth 27 | val xStep = 360d / axisSteps 28 | val x = Math.floor((point.lng + 180) / xStep).asInstanceOf[Int] 29 | val yStep = 180d / axisSteps 30 | val y = Math.floor((point.lat + 90) / yStep).asInstanceOf[Int] 31 | RegionId(zoomDepth, x, y) 32 | } 33 | 34 | /** 35 | * Get the regions for the given bounding box. 36 | * 37 | * @param bbox The bounding box. 38 | * @return The regions 39 | */ 40 | def regionsForBoundingBox(bbox: BoundingBox): Set[RegionId] = { 41 | @tailrec def regionsAtZoomLevel(zoomLevel: Int): Set[RegionId] = { 42 | if (zoomLevel == 0) { 43 | Set(RegionId(0, 0, 0)) 44 | } else { 45 | val axisSteps = 1 << zoomLevel 46 | // First, we get the regions that the bounds are in 47 | val southWestRegion = regionForPoint(bbox.southWest, zoomLevel) 48 | val northEastRegion = regionForPoint(bbox.northEast, zoomLevel) 49 | // Now calculate the width of regions we need, we need to add 1 for it to be inclusive of both end regions 50 | val xLength = northEastRegion.x - southWestRegion.x + 1 51 | val yLength = northEastRegion.y - southWestRegion.y + 1 52 | // Check if the number of regions is in our bounds 53 | val numRegions = xLength * yLength 54 | if (numRegions <= 0) { 55 | Set(RegionId(0, 0, 0)) 56 | } else if (settings.MaxSubscriptionRegions >= numRegions) { 57 | // Generate the sequence of regions 58 | (0 until numRegions).map { i => 59 | val y = i / xLength 60 | val x = i % xLength 61 | // We need to mod positive the x value, because it's possible that the bounding box started or ended from 62 | // less than -180 or greater than 180 W/E. 63 | RegionId(zoomLevel, modPositive(southWestRegion.x + x, axisSteps), southWestRegion.y + y) 64 | }(collection.breakOut) 65 | } else { 66 | regionsAtZoomLevel(zoomLevel - 1) 67 | } 68 | } 69 | } 70 | regionsAtZoomLevel(settings.MaxZoomDepth) 71 | } 72 | 73 | /** 74 | * Get the bounding box for the given region. 75 | */ 76 | def boundingBoxForRegion(regionId: RegionId): BoundingBox = { 77 | val axisSteps = 1l << regionId.zoomLevel 78 | val yStep = 180d / axisSteps 79 | val xStep = 360d / axisSteps 80 | val latRegion = regionId.y * yStep - 90 81 | val lngRegion = regionId.x * xStep - 180 82 | 83 | BoundingBox( 84 | LatLng(latRegion, lngRegion), 85 | LatLng(latRegion + yStep, lngRegion + xStep)) 86 | } 87 | 88 | def summaryRegionForRegion(regionId: RegionId): Option[RegionId] = { 89 | if (regionId.zoomLevel == 0) None 90 | else Some(RegionId(regionId.zoomLevel - 1, regionId.x >>> 1, regionId.y >>> 1)) 91 | } 92 | 93 | /** 94 | * Cluster the given points into n2 boxes 95 | * 96 | * @param id The id of the region 97 | * @param bbox The bounding box within which to cluster 98 | * @param points The points to cluster 99 | * @return The clustered points 100 | */ 101 | def cluster(id: String, bbox: BoundingBox, points: IndexedSeq[PointOfInterest]): IndexedSeq[PointOfInterest] = { 102 | if (points.size > settings.ClusterThreshold) { 103 | groupNBoxes(bbox, settings.ClusterDimension, points).map { 104 | case (_, IndexedSeq(single)) => single 105 | // The fold operation here normalises all points to making the west of the bounding box 0, and then takes an average 106 | case (segment, multiple) => 107 | val (lng, lat, count) = multiple.foldLeft((0d, 0d, 0l)) { (totals, next) => 108 | val normalisedWest = modPositive(next.position.lng + 180d, 360) 109 | next match { 110 | case u: UserPosition => (totals._1 + normalisedWest, totals._2 + next.position.lat, totals._3 + 1) 111 | case Cluster(_, _, _, c) => (totals._1 + normalisedWest * c, totals._2 + next.position.lat * c, totals._3 + c) 112 | } 113 | } 114 | Cluster(id + "-" + segment, System.currentTimeMillis(), LatLng(lat / count, (lng / count) - 180d), count) 115 | }(collection.breakOut) 116 | } else { 117 | points 118 | } 119 | } 120 | 121 | /** 122 | * Group the positions into n2 boxes 123 | * 124 | * @param bbox The bounding box 125 | * @param positions The positions to group 126 | * @return The grouped positions 127 | */ 128 | def groupNBoxes(bbox: BoundingBox, n: Int, positions: IndexedSeq[PointOfInterest]): Map[Int, IndexedSeq[PointOfInterest]] = { 129 | positions.groupBy { pos => 130 | latitudeSegment(n, bbox.southWest.lat, bbox.northEast.lat, pos.position.lat) * n + 131 | longitudeSegment(n, bbox.southWest.lng, bbox.northEast.lng, pos.position.lng) 132 | } 133 | } 134 | 135 | /** 136 | * Find the segment that the point lies in in the given south/north range 137 | * 138 | * @return A number from 0 to n - 1 139 | */ 140 | def latitudeSegment(n: Int, south: Double, north: Double, point: Double): Int = { 141 | // Normalise so that the southern most point is 0 142 | val range = north - south 143 | val normalisedPoint = point - south 144 | val segment = Math.floor(normalisedPoint * (n / range)).asInstanceOf[Int] 145 | if (segment >= n || segment < 0) { 146 | // The point was never in the given range. Default to 0. 147 | 0 148 | } else { 149 | segment 150 | } 151 | } 152 | 153 | /** 154 | * Find the segment that the point lies in in the given west/east range 155 | * 156 | * @return A number from 0 to n - 1 157 | */ 158 | def longitudeSegment(n: Int, west: Double, east: Double, point: Double): Int = { 159 | // Normalise so that the western most point is 0, taking into account the 180 cut over 160 | val range = modPositive(east - west, 360) 161 | val normalisedPoint = modPositive(point - west, 360) 162 | val segment = Math.floor(normalisedPoint * (n / range)).asInstanceOf[Int] 163 | if (segment >= n || segment < 0) { 164 | // The point was never in the given range. Default to 0. 165 | 0 166 | } else { 167 | segment 168 | } 169 | } 170 | 171 | /** 172 | * Modulo function that always returns a positive number 173 | */ 174 | def modPositive(x: Double, y: Int): Double = { 175 | val mod = x % y 176 | if (mod > 0) mod else mod + y 177 | } 178 | 179 | /** 180 | * Modulo function that always returns a positive number 181 | */ 182 | def modPositive(x: Int, y: Int): Int = { 183 | val mod = x % y 184 | if (mod > 0) mod else mod + y 185 | } 186 | 187 | } 188 | -------------------------------------------------------------------------------- /tutorial/index.html.script: -------------------------------------------------------------------------------- 1 | 2 | 3 | Reactive Maps with Play, Akka and Scala - Activator Template 4 | 5 | 6 |
7 | 8 |

The world is going reactive

9 | 10 |

11 | Not long ago, response times in the seconds were considered appropriate. Browser refreshes were the norm in web 12 | applications. Systems would go down for hours of maintenance, or even be rebooted nightly, and this was ok 13 | because people only expected the systems to be up during business hours. Applications didn't have to scale 14 | because they didn't have big user bases. And the complexity requirements put on web applications meant that 15 | typical requests could easily be handled by a thread per request model. 16 |

17 | 18 |

19 | Things are changing though. People expect web applications to react instantly. They expect them to be up all 20 | the time, while the applications are moving into the cloud, where failures are not exceptional, but rather are 21 | the norm, and so applications need to react to failure. Load on a web application can peak unpredictably, to 22 | be many orders of magnitude greater than normal, and so applications need to react to load and scale out. The 23 | complexity of business requirements means that in order to respond quickly to requests, things must 24 | be processed in parallel, reacting to events rather than waiting so as to utilise resources as efficiently as 25 | possible. 26 |

27 | 28 |

29 | This application is an example of how to implement the tenets of the 30 | Reactive Manifesto using the 31 | Typesafe Reactive Platform. 32 |

33 | 34 |

35 | It uses Play, combined with the latest in client side technologies to implement a reactive user interface. It 36 | uses Akka to provide horizontally scalable and resilient message passing and data management. 37 |

38 | 39 |

40 | The tutorial starts by becoming familiar with the application, walking through its code and then 41 | enhancing the code. Toward the end there is a discussion on how reactive applications can be managed in production using our 42 | ConductR product 43 | to demonstrate. 44 |

45 | 46 |
47 |
48 | 49 |

Browse the app

50 | 51 |

52 | Before jumping into the code, let's see the app in action. Go to the Run 53 | tab, and start the application if it's not already started. Then visit it at: 54 | http://localhost:9000. 55 |

56 | 57 |

58 | You will be presented with a screen asking for your email address. After entering it and submitting, you should 59 | see a map, and you should be able to find yourself on that map (this may take a short amount of time due to the way 60 | data flows through summary regions in the system, the further you zoom out the less realtime the app gets). 61 |

62 | 63 |

64 | If you zoom in on North Carolina, you should see some bots walking around. These bots are simulating other 65 | users, the data used to generate their paths is taken from hiking trail data that was grabbed from 66 | HikeWNC. 67 |

68 | 69 |
70 |
71 | 72 |

System Overview

73 | 74 |

75 | The system can be broadly divided into three parts. The first part is the client side app. This is written 76 | in CoffeeScript, and runs in the browser. The second 77 | part is the web front end, this is a Play application that serves web requests coming in. The third part is 78 | the Akka backend, which manages the distribution of data across backend nodes, and the publishing and 79 | subscribing of events. 80 |

81 | 82 |

83 | In the demo you're seeing now, the Play web front end and the Akka backend are running as one application, but 84 | in a production scenario, they would be run separately, allowing fine grained control of resources between the 85 | front and backend. 86 |

87 | 88 |

89 | We also recommend that you factor out the two types of application into separate sbt modules, or even 90 | their own projects. They are presented here under the one project for the convenience of this tutorial. 91 | Factoring them out into their own modules/projects then allows them to be released/deployed independently of 92 | each other. 93 |

94 | 95 |
96 |
97 | 98 |

System Overview - Client

99 | 100 |

101 | The client talks to the web front end using 102 | WebSockets: 103 |

104 | 105 | 106 | 107 |

108 | All the communication above is fire and forget, after sending a user moved event, the client doesn't need 109 | anything in response, after sending a viewing area message, the client might get many messages, 110 | or maybe none at all, depending on whether there are any users in that area, and after the server sends position 111 | updates, it expects nothing in return from the client. 112 |

113 | 114 |

115 | This differs from many traditional client server where clients make a request and expect a response. In a 116 | reactive application, much of the communication will not be request/response based, because the way reactive 117 | applications are designed is that data flows to consumers as it becomes available, and consumers of the data 118 | react to it, they don't ask for it. 119 |

120 | 121 |

122 | For this reason, WebSockets makes for a perfect transport for client server communication in a reactive 123 | application, since it allows events to be passed with low overhead, not needing to wait for a response, and 124 | facilitates reacting to events from the server. 125 |

126 | 127 |
128 |
129 | 130 |

System Overview - Backend

131 | 132 |

133 | Before explaining the backend interface, we need to have a short lesson in geo based systems. A naive way to 134 | create the reactive maps application would be to send all data from all users to every connected user. This 135 | might work if there are only 10 users connected, or maybe even 100. At 1000, each user is going to be 136 | downloading megabytes of updates per second - it is not going to scale. 137 |

138 | 139 |

140 | To manage this, we break the earth up into regions. There are many different ways to do this, but in our app 141 | we're using the simplest to understand, we flatten the map out into a rectangle, and then divide it into many 142 | smaller rectangles. How many rectangles is configurable, but we have defaulted this to 16 million. 143 | Because the earth is not a reactangle, but is actually a sphere, these rectangles don't all cover the same 144 | area, at the equator each one is a few kms wide, at the poles, each rectangle is only a few metres wide. But 145 | each rectangle is a constant number of degrees in longitude wide, and degrees in latitude high, so transforming 146 | latitude and longitude coordinates to regions is therefore a straightforward equation. 147 |

148 | 149 |

150 | The web front end talks to the backend using Akka clustering: 151 |

152 | 153 | 154 | 155 |

156 | Actors in an Akka cluster may talk to each other without knowing whether they are on the same node or different 157 | nodes. In the above diagram, when a frontend node receives a position update from the client, the region 158 | responsible for that position may be on the same node, or may be on a different node. The web frontend doesn't 159 | need to worry, all it needs to know is which region to send to, and Akka will work out how to get the message 160 | to the right node. 161 |

162 | 163 |

164 | Akka distributed PubSub messaging is used to publish location updates to the frontend. When the web frontend 165 | gets a new viewing area from the client, it works out which regions cover that viewing area, and then subscribes 166 | to updates from each of those regions. Whether those regions are on the same node or on different nodes is 167 | transparent, Akka ensures that the right messages get to the right subscribers on the right nodes. When a 168 | region has a new update to publish, it pushes it to the PubSub manager, which then pushes the messages to the 169 | right subscribers on the right nodes. 170 |

171 | 172 |

173 | Note that Akka's distributed PubSub messaging makes no guarantees in terms of delivering a message. However 174 | for this use-case, if the occasional publication of a location update is missed then we need not be concerned 175 | as the next update will bring the client up to date. Note that for situations where delivery becomes 176 | more important then look into 177 | Akka's distributed data library. 178 |

179 | 180 |

181 | Finally, regions get summarised into summary regions, and these summary regions are used so that clients that 182 | are viewing large areas at once aren't consuming too much data. Lowest level regions and higher level summary 183 | regions send updates to their higher level summary region, which aggregates and publishes the information. 184 | When the client requests a viewing area that contains too many regions, it subscribes instead to updates from 185 | summary regions. 186 |

187 | 188 |
189 |
190 | 191 |

The code - Client side

192 | 193 |

194 | Now that we've got a broad overview of the system architecture, let's start looking at the code. We'll start 195 | off with tracing the code through from what happens when a user's GPS enabled device sends an update. 196 |

197 | 198 |

199 | The entry point to this event flow is in 200 | gps.coffee. This file contains 201 | a class for for handling the GPS integration of the app. It uses the 202 | HTML5 Geolocation API to watch for location 203 | updates from the browser. 204 |

205 | 206 |

207 | The first thing you'll find in this, and most other CoffeeScript files in this app, is a call to 208 | define. This is a RequireJS call, used to 209 | define a module. RequireJS allows JavaScript to be developed in a modular way, which is important for rich 210 | client side apps that heavily use JavaScript like this one. At the bottom of the file you can see a return 211 | statement returning the Gps class that we've declared, this means anything that imports our module 212 | will get that class back. 213 |

214 | 215 |

216 | The bulk of the code in this file is actually dealing with ensuring that neither too few, nor too many location 217 | updates are sent to the server. It ensures that a location update is sent at least every 10 seconds, but no 218 | more frequently than every 2 seconds. The most pertinent code that we're interested in now though is the 219 | navigator.geolocation.watchPosition(...) call, this is the HTML5 Geolocation API call to watch for 220 | GPS updates, and also the @ws.send(...) call, this sends a user-moved event as JSON 221 | through the WebSocket, with the users current position. 222 |

223 | 224 |

225 | The position field of this event is formatted using the GeoJSON standard, 226 | which you'll soon see is used throughout the application. 227 |

228 | 229 |

230 | So we can now see how location updates are obtained from the browser. But where does the WebSocket come from 231 | that it's getting sent down? You'll see that the constructor of the Gps class accepts the WebSocket as a 232 | parameter. This constructor is called from the 233 | mainPage.coffee module. In 234 | this module, you can see that in the define call it declares a few dependencies, one being the 235 | ./gps module that we just looked at. 236 |

237 | 238 |

239 | Scroll down to the connect method, and you'll see the following code: 240 |

241 | 242 |
@ws = new WebSocket(jsRoutes.controllers.Application.stream(email).webSocketURL())
243 | 244 |

245 | This is the code that creates the WebSocket, and a few lines below that, in the onopen callback, 246 | you can see where we are passing the WebSocket to the Gps constructor. The URL for the WebSocket 247 | is generated from a JavaScript reverse route which is defined in 248 | main.scala.html. 249 |

250 | 251 |

252 | Open main.scala.html. This is the template 253 | where that reverse route is defined. Play has a configuration 254 | file called routes, this file contains all the configuration 255 | for how incoming requests are routed to their corresponding actions in Play. In addition to providing this 256 | forward routing, Play also generates a reverse router, that code such as this template can call, and it will 257 | return the URL that can be used to reach that route. This means that your path information is kept in one 258 | place - in your routes file, and everything else in your application can depend on it. 259 | Play also provides the same mechanism for client-side code so that you can assemble url and request methods 260 | by applying a function and giving it input parameters which will be used to populate placeholder values, 261 | such as path variables and query parameters. 262 |

263 | 264 |

265 | In the routes file, you can see that the /stream/:email path is routed to 266 | controllers.Application.stream, so the JavaScript reverse router call 267 | jsRoutes.controllers.Application.stream(email).webSocketURL will return us that path. 268 |

269 | 270 |

271 | You can read more about routing in Play Framework 272 | here. 273 | You can also read more about JavaScript routing 274 | here. 275 |

276 | 277 |
278 |
279 | 280 |

The code - Web Frontend

281 | 282 |

283 | In the routes file, we saw how the WebSocket route was 284 | defined, and how it gets routed to the controllers.Application.stream method. Let's open that 285 | class now, Application.scala. 286 |

287 | 288 |

289 | Looking at the stream method, the first thing to notice is that it is declared to be a WebSocket 290 | action that works with ClientEvent messages. These messages are defined in 291 | ClientConnection.scala, we can see our 292 | three types of messages there, UserMoved, ViewingArea and UserPositions. 293 |

294 | 295 |

296 | Below the declaration of the message types, we can see formats for serialising these events to and from JSON, 297 | and for formatting the WebSocket frames. We won't go into too much detail here, you can read more about Play's 298 | JSON support here. 299 |

300 | 301 |

302 | You can see back in Application.scala 303 | that we have told Play to use an actor to handle the WebSocket. This means our deserialized 304 | ClientEvent messages are sent to this actor, while when this actor sends ClientEvent 305 | messages to the passed in upstream actor, these messages will be serialized and sent over the WebSocket 306 | to the client. 307 |

308 | 309 |

310 | Back in ClientConnection.scala, beneath 311 | the event message types, you will find the actual actor that handles the client connection. The receive method 312 | shows the handling of the different message types that this actor will receive. We'll focus on just one of these 313 | message types. 314 |

315 | 316 |

317 | Each time a UserMoved event is received, it's translated to a UserPosition object, 318 | and sent to the RegionManagerClient. 319 | This class is responsible for sending user position updates to the right node for the region that that position 320 | lives in. You can see in that class that the first thing it does is look up the regionId, and then it creates 321 | a UserPositionUpdate message, and sends that message to a router. 322 |

323 | 324 |

325 | But how does that router get it to the right node? The configuration for that router can be found in 326 | application.conf. Scrolling down to the 327 | configuration in the akka section, you'll see this: 328 |

329 | 330 |
/regionManagerClient/router {
 331 |   router = consistent-hashing
 332 |   nr-of-instances = 1000
 333 |   cluster {
 334 |     enabled = on
 335 |     routees-path = "/user/regionManager"
 336 |     allow-local-routees = on
 337 |     use-role = "backend-region"
 338 |   }
 339 | }
340 | 341 |

342 | The routing to the node responsible for a region is done with a cluster aware 343 | consistent hashing 344 | router. The region identifier is used as key for the consistent hashing. This means that updates 345 | for a region are routed to the backend node responsible for that region. When the number of nodes in 346 | the cluster changes the responsibility for a region may change. In this application the states of the 347 | regions don't have to be migrated when this happens. Updates for some regions are routed to a new 348 | backend node and old data will expire. For a short period the region points (counts of users) might 349 | be slightly inaccurate, but that is acceptable for this application. 350 |

351 | 352 |

353 | The hash code used to route messages is specified by the ConsistentHashable interface, you can see 354 | that the UpdateUserPosition message 355 | implements this interface, and defines the hash key to be the region ID that the update is for. 356 |

357 | 358 |

359 | If you're interested in learning the full details of Akka routing and how to configure it, you can read about 360 | Routing and 361 | 362 | Cluster Aware Routers in the Akka documentation. 363 |

364 | 365 | 366 | 367 |
368 |
369 | 370 |

The code - Backend

371 | 372 |

373 | We've seen how the web frontend receives GPS user position events and then routes them to the right backend 374 | node in the Akka cluster. Now let's find out what happens with the events when it reaches the backend node. 375 |

376 | 377 |

378 | In the configuration for the router that we saw before, we could see this config item defined: 379 |

380 | 381 |
routees-path = "/user/regionManager"
382 | 383 |

384 | /user is the namespace for all user defined actors (as opposed to actors defined by the system 385 | itself), so this says that the messages get sent to a user defined actor called regionManager, 386 | which is implemented by the RegionManager 387 | class. 388 |

389 | 390 |

391 | The region manager is responsible for managing all the regions that belong on that node. If it gets some data 392 | for a region, and an actor for that region doesn't exist yet, it creates it. Once it has ensured that an actor 393 | exists for that region, then it sends the user position to that region. 394 |

395 | 396 |

397 | The actor class that represents a region is called 398 | Region. This class has a map called 399 | activeUsers, and when it receives the user position, it adds that users position to the map. 400 |

401 | 402 |
403 | 404 |
405 | 406 |

The code - Subscriptions between frontend and backend

407 | 408 |

409 | A client displays a section of the map, which is decorated with live markers of other users in that area. 410 | How are those user positions published to the client? 411 |

412 | 413 |

414 | When the user zooms or changes map position the client sends a 415 | ViewingArea event to the server, which 416 | ends up in PositionSubscriber via 417 | the controller. 418 | The PositionSubscriber works out which regions cover that viewing area, and then subscribes to updates from 419 | each of those regions. 420 |

421 | 422 |

423 | The published updates of user positions come from the backend Region 424 | actors. The thing that ties the publisher and subscriber together is the named topic, which in this case is 425 | the region id. 426 |

427 | 428 |

429 | In a similar way the PositionSubscriber may decide to subscribe to summary regions, and then the published 430 | region points comes from the SummaryRegion 431 | actors. 432 |

433 | 434 |

435 | The publish/subscribe 436 | mechanism in Akka is a registry of subscribers that is replicated to members in the cluster. 437 | There is no central hub or broker. When publishing a message to a named topic it sends the message to nodes with 438 | subscribers of that topic, and then delivers the message to all subscribing actors on that node. The message is sent 439 | over the wire only once per node that has at least one subscriber of the topic. The decoupling of publisher and 440 | subscriber makes it easy to add and remove nodes in the cluster as needed. 441 |

442 | 443 |

444 | Changes of subscribers are disseminated in a scalable way to other nodes with a gossip protocol. The registry is 445 | eventually consistent, i.e. changes are not immediately visible at other nodes, but typically they will be fully 446 | replicated to all other nodes after a few seconds. 447 |

448 | 449 |
450 | 451 |
452 | 453 |

The code - Summary

454 | 455 |

456 | At this stage of the tutorial, we've seen: 457 |

458 | 459 |
    460 |
  • How the browser gets the users position from their GPS enabled device
  • 461 |
  • How the browser sends the users position to the web front end via WebSockets
  • 462 |
  • How the WebSocket is routed to the corresponding action
  • 463 |
  • How the WebSocket action sends the users position to an Akka router
  • 464 |
  • How the Akka router routes the users position to the correct node for the region in the Akka cluster
  • 465 |
  • How the node receives the users position and sends it to the right actor that manages that position
  • 466 |
  • How the backend nodes publishes updates to frontend subscribers
  • 467 |
468 | 469 |

470 | And now for something completely different. 471 |

472 | 473 |
474 | 475 |
476 |

Add more nodes

477 | 478 |

479 | So far you are running the application in one single JVM, hosting both frontend 480 | and backend. Let's try to add more backend and frontend nodes to the cluster. 481 |

482 | 483 |

484 | Open a terminal and change directory to the root directory of the reactive-maps application. Start a backend node 485 | with the following command (on one line): 486 |

487 |

 488 |     <path to activator dir>/activator 
 489 |       -Dakka.remote.netty.tcp.port=0 
 490 |       -Dakka.cluster.seed-nodes.1="akka.tcp://application@127.0.0.1:2552"
 491 |       -Dakka.cluster.roles.1=backend-region
 492 |       "run-main backend.Main"
 493 |     
494 | 495 |

496 | This runs the backend.Main class and 497 | overrides the configuration to bind Akka remoting to a random available port and use the "backend-region" 498 | cluster role for this node. It also declares the node that is running as a seed node so that this 499 | node can find the already established cluster. 500 |

501 | 502 |

503 | If you take a look at the log in Run you can see that the new node 504 | joined the cluster. The new node knows how to join the cluster because the first node running on port 2552 505 | is configured as initial contact point in the 'seed-nodes' property in the 506 | application.conf. 507 | You can read more about Akka Clustering in the 508 | documentation. 509 |

510 | 511 |

512 | You can repeat the command in new terminal windows to add more backend nodes. 513 |

514 | 515 |

516 | You can also add more simulated users with the following command (on one line): 517 |

518 |

 519 |     <path to activator dir>/activator 
 520 |       -Dakka.remote.netty.tcp.port=0
 521 |       -Dakka.cluster.seed-nodes.1="akka.tcp://application@127.0.0.1:2552"
 522 |       -Dakka.cluster.roles.1=frontend 
 523 |       -DreactiveMaps.bots.totalNumberOfBots=500 
 524 |       "run-main backend.Main"		
 525 | 	
526 | 527 |

528 | The following command (on one line) will start another frontend node listening on HTTP port 9001: 529 |

530 |

 531 |     <path to activator dir>/activator 
 532 |       -Dhttp.port=9001
 533 |       -Dakka.remote.netty.tcp.port=0
 534 |       -Dakka.cluster.seed-nodes.1="akka.tcp://application@127.0.0.1:2552"
 535 |       -Dakka.cluster.roles.1=frontend
 536 |       run
 537 |     
538 |

539 | Try the added frontend in a new browser window: http://localhost:9001 540 |

541 | 542 |
543 | 544 |
545 |

Adding a new feature

546 | 547 |

548 | Now that we've had a detailed look at some of the system, let's try and add a new feature. Until now, our view 549 | of the data has been region based - all data is associated with and stored in an actor for a region. This 550 | allows us to shard regions over multiple nodes, allowing efficient access to the data by node. 551 |

552 | 553 |

554 | We're going to add functionality that is user based. We'll use exactly the same methods for scaling as for the 555 | region based data, so we can see how to build such a system from scratch. 556 |

557 | 558 |

559 | We'll start off with a simple implementation that only works when there is one node, implementing first the 560 | backend, then the client side. Then we'll demonstrate how this implementation can be scaled out to shard the 561 | data across many nodes. Finally we'll show some techniques for ensuring data consistency when nodes are 562 | introduced into or removed from the cluster. 563 |

564 | 565 |

566 | The new feature that we'll add is tracking the distance that a user has travelled. We'll make the client 567 | fetch this data when a user clicks on a marker. 568 |

569 | 570 |
571 | 572 |
573 |

Handling the maths

574 | 575 |

576 | The first thing we need to do is add a function for calculating the distance between two points. There are 577 | many such formulas that can be used to do this, but a simple general purpose one which will suit our purposes 578 | is called the haversine 579 | formula. 580 |

581 | 582 |

583 | Let's create a new method called distanceBetweenTwoPoints in 584 | GeoFunctions: 585 |

586 | 587 | <>= 588 | 186a 589 | def distanceBetweenPoints(pointA: LatLng, pointB: LatLng): Double = { 590 | import Math._ 591 | // Setup the inputs to the formula 592 | val R = 6371009d // average radius of the earth in metres 593 | val dLat = toRadians(pointB.lat - pointA.lat) 594 | val dLng = toRadians(pointB.lng - pointA.lng) 595 | val latA = toRadians(pointA.lat) 596 | val latB = toRadians(pointB.lat) 597 | 598 | // The actual haversine formula. a and c are well known value names in the formula. 599 | val a = sin(dLat / 2) * sin(dLat / 2) + 600 | sin(dLng / 2) * sin(dLng / 2) * cos(latA) * cos(latB) 601 | val c = 2 * atan2(sqrt(a), sqrt(1 - a)) 602 | val distance = R * c 603 | 604 | distance 605 | } 606 | . 607 | @ 608 | 609 | @@ sbt compile 610 | 611 |
612 | 613 |
614 |

User meta data register

615 | 616 |

617 | The first thing we need to implement is a user meta data register. As a first implementation, we'll write one 618 | actor that will store all user meta data, we'll call it UserMetaData. Create a new file called 619 | app/backend/UserMetaData.scala now, and start off by adding the following: 620 |

621 | 622 | <>= 623 | 0a 624 | package backend 625 | 626 | import akka.actor.{Props, Actor} 627 | import play.extras.geojson.LatLng 628 | 629 | object UserMetaData { 630 | case class GetUser(id: String) 631 | case class User(id: String, distance: Double) 632 | case class UpdateUserPosition(id: String, position: LatLng) 633 | 634 | val props = Props[UserMetaData] 635 | } 636 | . 637 | @ 638 | 639 |

640 | Some of the imports are unused for now, but the important thing to see here is the message types we've defined. 641 | This actor will receive UpdateUserPosition messages to update the user position, and will receive 642 | GetUser messages, and send back User messages to the sender. 643 |

644 | 645 |

646 | Now implement the actor itself: 647 |

648 | 649 | <>= 650 | 12a 651 | class UserMetaData extends Actor { 652 | 653 | import UserMetaData._ 654 | 655 | val settings = Settings(context.system) 656 | 657 | var users = Map.empty[String, (LatLng, Double)] 658 | 659 | def receive = { 660 | } 661 | } 662 | . 663 | @ 664 | 665 |

666 | Our actor depends on the settings (which provides the GeoFunctions class we already edited), and 667 | has a map that maps the userId to a tuple of the last position the user was seen at, and the distance they've 668 | travelled. We'll now implement handling the GetUser message in the receive method: 669 |

670 | 671 | <>= 672 | 21a 673 | case GetUser(id) => 674 | users.get(id) match { 675 | case Some((_, distance)) => sender ! User(id, distance) 676 | case None => sender ! User(id, 0) 677 | } 678 | . 679 | @ 680 | 681 |

682 | You can see that if the user wasn't found, we just return 0. Now implement handling the 683 | UpdateUserPosition message: 684 |

685 | 686 | <>= 687 | 27i 688 | case UpdateUserPosition(id, position) => 689 | val distance = users.get(id) match { 690 | case Some((lastPosition, lastDistance)) => 691 | lastDistance + settings.GeoFunctions.distanceBetweenPoints(lastPosition, position) 692 | case None => 0 693 | } 694 | 695 | users += (id -> (position, distance)) 696 | . 697 | @ 698 | 699 | @@ sbt compile 700 | 701 |

702 | You can see here we're using the distanceBetweenPoints method we implemented earlier, updating the 703 | distance if we have a last position to compare it to, and updating the map with the new user data. 704 |

705 | 706 |
707 | 708 |
709 |

Send user position updates

710 | 711 |

712 | Now we need to implement the code that sends the user position updates to this actor. This is a little tedious 713 | because there a two things that will do this, the web client, and the bots. We'll start with the web client. 714 |

715 | 716 |

717 | The actors for the web client are initialized by a Play module called Actors. It's using some 718 | Play helpers for creating and injecting actors, described 719 | here. We want to define a new actor binding for the userMetaData: 720 |

721 | 722 | <>= 723 | 22a 724 | bindActor[UserMetaData]("userMetaData") 725 | . 726 | @ 727 | 728 |

729 | The class that will ultimately use this actor is the 730 | ClientConnection.scala actor. Open it, 731 | and modify the constructor of ClientConnection to accept it as a parameter: 732 |

733 | 734 | <>= 735 | 110,111c 736 | class ClientConnection @Inject() (@Named("regionManagerClient") regionManagerClient: ActorRef, 737 | @Named("userMetaData") userMetaData: ActorRef, 738 | @Assisted email: String, @Assisted upstream: ActorRef) extends Actor { 739 | . 740 | @ 741 | 742 |

743 | Now when the client sends a UserMoved message through the web socket, in addition to sending a 744 | message to the regionManagerClient, we also want to update the user meta data: 745 |

746 | 747 | <>= 748 | 12a 749 | import backend.UserMetaData.UpdateUserPosition 750 | . 751 | 122a 752 | userMetaData ! UpdateUserPosition(email, point.coordinates) 753 | . 754 | @ 755 | 756 | @@ sbt compile 757 | 758 |

759 | The web front end is set to go, but the bots also need to be updated. As with ClientConnection, 760 | add a constructor parameter for the user meta data to GeoJsonBot.scala: 761 |

762 | 763 | <>= 764 | 24,25c 765 | class GeoJsonBot(trail: LineString[LatLng], offset: (Double, Double), userId: String, 766 | regionManagerClient: ActorRef, userMetaData: ActorRef) extends Actor { 767 | . 768 | @ 769 | 770 |

771 | And immediately after the bot sends a position update to the regionManagerClient, make it also 772 | send one to userMetaData: 773 |

774 | 775 | <>= 776 | 10a 777 | import backend.UserMetaData.UpdateUserPosition 778 | . 779 | 51a 780 | userMetaData ! UpdateUserPosition(userId, userPos.position) 781 | . 782 | @ 783 | 784 |

785 | Update the props method: 786 |

787 | 788 | <>= 789 | 15,16c 790 | def props(trail: LineString[LatLng], offset: (Double, Double), userId: String, regionManagerClient: ActorRef, 791 | userMetaData: ActorRef): Props = 792 | Props(classOf[GeoJsonBot], trail, offset, userId, regionManagerClient, userMetaData) 793 | . 794 | @ 795 | 796 |

797 | Now this is called by BotManager, modify the props method, constructor parameter and the 798 | the call that creates the bot: 799 |

800 | 801 | <>= 802 | 17,18c 803 | def props(regionManagerClient: ActorRef, userMetaData: ActorRef, data: Seq[URL]): Props = 804 | Props(classOf[BotManager], regionManagerClient, userMetaData, data) 805 | . 806 | 26c 807 | class BotManager(regionManagerClient: ActorRef, userMetaData: ActorRef, data: Seq[URL]) extends Actor { 808 | . 809 | 60c 810 | context.actorOf(GeoJsonBot.props(route, offset, userId, regionManagerClient, userMetaData)) 811 | . 812 | @ 813 | 814 |

815 | BotManager is initialised in two places, once in the web front end by Play in the 816 | BackendActors class, so update that to pass the userMetaData: 817 |

818 | 819 | <>= 820 | 34,35c 821 | class BackendActors @Inject() (system: ActorSystem, configuration: Configuration, environment: Environment, 822 | @Named("regionManagerClient") regionManagerClient: ActorRef, 823 | @Named("userMetaData") userMetaData: ActorRef) { 824 | . 825 | 46c 826 | system.actorOf(BotManager.props(regionManagerClient, userMetaData, findUrls(1))) 827 | . 828 | @ 829 | 830 |

831 | And finally, we need to update the Main class, this 832 | is used when running a non Play node in the cluster. Initialise the UserMetaData actor after 833 | creating RegionManagerClient: 834 |

835 | 836 | <>= 837 | 36a 838 | val userMetaData = system.actorOf(UserMetaData.props, "userMetaData") 839 | . 840 | @ 841 | 842 |

843 | Now pass the actor to the BotsManager props methed: 844 |

845 | 846 | <>= 847 | 44c 848 | system.actorOf(BotManager.props(regionManagerClient, userMetaData, findUrls(1))) 849 | . 850 | @ 851 | 852 | @@ sbt compile 853 | 854 |
855 | 856 |
857 |

Exposing user data to the web

858 | 859 |

860 | The users distance is going to be requested by the user on an as needed basis. Since the action is triggered 861 | by the user and the user expects a response, it makes sense in this case to use a simple HTTP request to get 862 | the data. So we're going to write a Play action to get the user meta data. 863 |

864 | 865 |

866 | Create a new Scala class called controllers.UserController: 867 |

868 | 869 | <>= 870 | 0a 871 | package controllers 872 | 873 | import akka.actor.ActorRef 874 | import akka.pattern.{AskTimeoutException, ask} 875 | import akka.util.Timeout 876 | 877 | import play.api.mvc._ 878 | import play.api.libs.json.Json 879 | 880 | import scala.concurrent.ExecutionContext 881 | import scala.concurrent.duration._ 882 | import javax.inject.{Inject, Named} 883 | 884 | import backend.UserMetaData._ 885 | 886 | class UserController @Inject() ( 887 | @Named("userMetaData") userMetaData: ActorRef 888 | )(implicit ec: ExecutionContext) extends Controller { 889 | 890 | } 891 | . 892 | @ 893 | 894 |

895 | There are quite a number of imports here, but don't be put off! The first thing we will do is define the 896 | return format for the user meta data. We're going to return JSON, so we need something to convert instances of 897 | User into JSON. We can do this by implementing a JSON Writes for User. 898 | Play's JSON API provides a handy macro that does this for you at compile time, so you can have type safe JSON 899 | converters with a minimum of code. Create this writes instance in the UserController object: 900 |

901 | 902 | <>= 903 | 19a 904 | implicit val userWrites = Json.writes[User] 905 | . 906 | @ 907 | 908 |

909 | Since it is declared to be implicit, it will be used whenever we call a method that requires an implicit writes 910 | for User, which the Json.toJson method does in the next block of code that we'll 911 | write: 912 |

913 | 914 | <>= 915 | 20a 916 | def get(id: String) = Action.async { 917 | implicit val timeout = Timeout(2.seconds) 918 | 919 | (userMetaData ? GetUser(id)) 920 | .mapTo[User] 921 | .map { user => 922 | Ok(Json.toJson(user)) 923 | } recover { 924 | case _: AskTimeoutException => NotFound 925 | } 926 | } 927 | . 928 | @ 929 | 930 |

931 | Here we have declared the action itself. It's an asynchronous action, meaning it returns a future. The action 932 | also takes a parameter, the id of the user. We'll see later how that parameter is passed to the action. 933 |

934 | 935 |

936 | As a first step we have defined a timeout. In the code below that, we use the ask pattern, represented by the 937 | ? operator, to ask the user meta data action for the user. Akka will not let you ask for something 938 | without specifying a timeout, the ? parameter takes an implicit timeout, so we've defined that to 939 | be two seconds. 940 |

941 | 942 |

943 | Having asked the actor for a user, we get back a future, and the first thing we do is map it to the type of 944 | User. Then we map that to something that will generate our response, we're returning an 945 | Ok response, with the body being the user serialised to JSON. 946 |

947 | 948 |

949 | Finally, we also want to recover from a timeout. We assume that if it timed out, it means the 950 | user could not be found. 951 |

952 | 953 |

954 | The last thing we need to do on the server side is declare how requests will be routed to this action. We do 955 | this in the routes file: 956 |

957 | 958 | <>= 959 | 9a 960 | GET /user/:email controllers.UserController.get(email) 961 | . 962 | @ 963 | 964 | @@ sbt compile 965 | 966 |

967 | You can see that we are defining a dynamic route with an email parameter, signalled by the colon 968 | before the parameter name. Then we invoke the action we just created, passing that email 969 | parameter as the id of the user. 970 |

971 |
972 | 973 |
974 |

Consuming user data on the client side

975 | 976 |

977 | Now that we've got the server side ready, we can write the client side code to consume the new action we just 978 | created. Let's start with creating a new module, 979 | app/assets/javascripts/services/userInfo.coffee, that will 980 | make the AJAX call. Although this module will be very simple, it's best practice to split out the making of 981 | AJAX calls from the business logic of your code, so you can easily mock and test. 982 |

983 | 984 | <>= 985 | 0a 986 | define ["jquery"], -> 987 | { 988 | get: (email) -> 989 | $.getJSON("/user/" + email) 990 | } 991 | . 992 | @ 993 | 994 |

995 | Our userInfo module depends on jquery, and simply provides one method, 996 | get, which calls the action. jQueries getJSON method returns a promise of the json, 997 | so we can consume that by attaching then or done callbacks to the returned promise. 998 |

999 | 1000 |

1001 | Now in marker.coffee, we want to 1002 | use this service to look up the user info. So we will add the userInfo module as a dependency. If 1003 | you're familiar with requireJS, you might notice that we're not using a relative path name here, this is because 1004 | we are using path aliases that we configure elsewhere, this also makes mocking dependencies simpler, as it 1005 | decouples modules from their implementation. 1006 |

1007 | 1008 | <>= 1009 | 4c 1010 | define ["leaflet", "markerRenderer", "userInfo"], (Leaflet, renderer, userInfo) -> 1011 | . 1012 | @ 1013 | 1014 |

1015 | In the constructor of the Marker class, after attaching the popup to the marker, we want to bind 1016 | to the click event of the marker so that we can update the popup with the users distance each time the user 1017 | clicks on it: 1018 |

1019 | 1020 | <>= 1021 | 24c 1022 | @marker.bindPopup(renderer.renderPopup(userId)) 1023 | 1024 | @marker.on "click", => 1025 | userInfo.get(userId).done (user) => 1026 | @marker.getPopup() 1027 | .setContent(renderer.renderPopup(userId, user.distance)).update() 1028 | . 1029 | @ 1030 | 1031 |

1032 | And now we want to handle that additional distance parameter that we've passed to renderPopup, 1033 | in markerRenderer.coffee: 1034 |

1035 | 1036 | <>= 1037 | 15,17c 1038 | renderPopup: (userId, distance) -> 1039 | popup = "

" + escapeHtml(userId) + "

" 1041 | if (distance) 1042 | popup + "

Travelled: " + Math.floor(distance) + "m

" 1043 | else 1044 | popup 1045 | . 1046 | @ 1047 | 1048 |

1049 | Finally, since we've defined the new userInfo module, we need to declare what the path for that 1050 | module is. This can be done in main.coffee: 1051 |

1052 | 1053 | <>= 1054 | 13a 1055 | userInfo: "./services/userInfo" 1056 | . 1057 | @ 1058 | 1059 | @@ sbt assets 1060 | 1061 |

1062 | And now we should be good to go, refresh your browser, and try clicking on a marker to see if the distance is 1063 | rendered. If you're looking at the bots walking around North Carolina, you might see that they are moving at 1064 | hundreds of metres per second - these bots are designed to provide interesting data, not necessarily realistic 1065 | data. 1066 |

1067 | 1068 |
1069 | 1070 |
1071 |

Client side testing

1072 | 1073 |

1074 | After making the previous changes to the client side logic, we've left some of the client side tests in a failing 1075 | state. Let's fix them. Start by running them to see that they are failing, by going to the 1076 | test tab. Run the tests, you should see some of them are failing. 1077 |

1078 | 1079 |

1080 | We're using mocha to write tests, in combination with Squire.js to mock out Require.js dependencies. We've 1081 | also been very careful to design our client side code in such a way that the DOM manipulation code, and any code 1082 | doing AJAX or WebSockets or using any other browser based APIs are separated from the business logic. This 1083 | allows us to comprehensively test the important code. 1084 |

1085 | 1086 |

1087 | Open MarkerSpec.coffee. This is where our 1088 | failing tests are. Since we've added a new user info service that 1089 | Marker.coffee depends on, we need to create a 1090 | mocked version for this: 1091 |

1092 | 1093 | <>= 1094 | 20a 1095 | class MockUserInfo 1096 | users: {} 1097 | get: (userId) -> 1098 | new MockPromise({ 1099 | distance: @users[userId] 1100 | }) 1101 | . 1102 | @ 1103 | 1104 |

1105 | Now to test that the marker correctly passes the looked up user distance to marker renderer, we'll modify the 1106 | MockMarkerRenderer.renderPopup method to "render" it: 1107 |

1108 | 1109 | <>= 1110 | 47,48c 1111 | renderPopup: (userId, distance) -> 1112 | if distance 1113 | userId + ":" + distance 1114 | else 1115 | "Popup " + userId 1116 | . 1117 | @ 1118 | 1119 |

1120 | Now lets modify the test setup code to instantiate and mock the mock user info service that we created: 1121 |

1122 | 1123 | <>= 1124 | 66,79c 1125 | # Create mocks 1126 | leaflet = new MockLeaflet() 1127 | renderer = new MockMarkerRenderer() 1128 | userInfo = new MockUserInfo() 1129 | 1130 | # Mockout require js environment 1131 | new Squire() 1132 | .mock("markerRenderer", renderer) 1133 | .mock("leaflet", leaflet) 1134 | .mock("userInfo", userInfo) 1135 | .require ["javascripts/map/marker"], (Marker) -> 1136 | test({ 1137 | leaflet: leaflet, 1138 | userInfo: userInfo, 1139 | renderer: renderer, 1140 | }, Marker) 1141 | done() 1142 | . 1143 | @ 1144 | 1145 |

1146 | Now try running the tests again. They should pass. Finally, let's add a new test that tests that the popup 1147 | is updated with the distance when the marker is clicked: 1148 |

1149 | 1150 | <>= 1151 | 136a 1152 | it "should update the popup with the current distance when clicked", testMarker (deps, Marker) -> 1153 | marker = new Marker(new MockMap(), single, new LatLng(10, 20)) 1154 | deps.userInfo.users["userid"] = 50 1155 | marker.marker.onClick() 1156 | assert.equal("userid:50", marker.marker.popup.content) 1157 | . 1158 | @ 1159 | 1160 | @@ sbt mocha 1161 | 1162 |

1163 | Run the tests to ensure the new test also passes. 1164 |

1165 | 1166 |
1167 | 1168 |
1169 |

Scaling out

1170 | 1171 |

1172 | So now we've implemented something that works on a single node. However, this application has been designed to 1173 | scale to millions of users on hundreds of nodes - a feature that only works on a single node will not suffice. 1174 |

1175 | 1176 |

1177 | The system should also be resilient to system crashes and restarts. To address these issues we will make it 1178 | persistent and distribute the actors over the backend nodes in the cluster. 1179 |

1180 | 1181 |
1182 |
1183 |

Make UserMetaData event sourced

1184 | 1185 |

1186 | The UserMetaData actor that we developed 1187 | previously holds the data for all users, which is obviously not scalable. Instead, let's rewrite the actor 1188 | to represent the data for one single user. We will use 1189 | Akka Persistence 1190 | to make it durable. 1191 |

1192 | 1193 | <>= 1194 | 0,60c 1195 | package backend 1196 | 1197 | import java.net.URLDecoder 1198 | import scala.concurrent.duration._ 1199 | import akka.actor.PoisonPill 1200 | import akka.actor.Props 1201 | import akka.actor.ReceiveTimeout 1202 | import akka.contrib.pattern.ShardRegion 1203 | import akka.persistence.EventsourcedProcessor 1204 | import play.extras.geojson.LatLng 1205 | 1206 | object UserMetaData { 1207 | case class GetUser(id: String) 1208 | case class User(id: String, distance: Double) 1209 | case class UpdateUserPosition(id: String, position: LatLng) 1210 | 1211 | val props = Props[UserMetaData] 1212 | 1213 | sealed trait Event 1214 | case class FirstObservation(position: LatLng) extends Event 1215 | case class Moved(to: LatLng, distance: Double) extends Event 1216 | 1217 | private case class State(position: Option[LatLng], distance: Double) { 1218 | def updated(evt: Event): State = evt match { 1219 | case Moved(to, d) => copy(position = Some(to), distance = distance + d) 1220 | case FirstObservation(pos) => copy(position = Some(pos)) 1221 | } 1222 | } 1223 | } 1224 | 1225 | class UserMetaData extends EventsourcedProcessor { 1226 | 1227 | import UserMetaData._ 1228 | 1229 | val settings = Settings(context.system) 1230 | val userId = URLDecoder.decode(self.path.name, "utf-8") 1231 | private var state = State(None, 0.0) 1232 | 1233 | // passivate the entity when no activity 1234 | context.setReceiveTimeout(30.seconds) 1235 | 1236 | override def receiveRecover: Receive = { 1237 | case evt: Event => state = state.updated(evt) 1238 | } 1239 | 1240 | override def receiveCommand: Receive = { 1241 | case _: GetUser => 1242 | sender() ! User(userId, state.distance) 1243 | 1244 | case UpdateUserPosition(_, position) => 1245 | state match { 1246 | case State(Some(lastPosition), _) => 1247 | val d = settings.GeoFunctions.distanceBetweenPoints(lastPosition, position) 1248 | persist(Moved(position, d)) { evt => 1249 | state = state.updated(evt) 1250 | } 1251 | case State(None, _) => 1252 | persist(FirstObservation(position)) { evt => 1253 | state = state.updated(evt) 1254 | } 1255 | } 1256 | 1257 | case ReceiveTimeout => 1258 | context.parent ! ShardRegion.Passivate(stopMessage = PoisonPill) 1259 | } 1260 | } 1261 | . 1262 | @ 1263 | 1264 | @@ sbt compile 1265 | 1266 |

1267 | Akka Persistence takes an event sourced approach and stores the changes that build up its current state. 1268 | In this case the current position and the total distance is stored by the events 1269 | FirstObservation and Moved. 1270 |

1271 | 1272 |

1273 | It is recommended to encapsulate the state in an immutable class as illustrated 1274 | in the UserMetaData.State class. It knows how to create a new State 1275 | instance when applying the changes represented by domain events. It is important that the 1276 | state updates are free from side effect, because they are applied when the actor 1277 | is recovered from the persisted events. See receiveRecover. 1278 |

1279 |
1280 |
1281 |

Add sharding

1282 | 1283 |

1284 | Akka cluster Sharding 1285 | is useful when you need to distribute actors across several nodes in the cluster and want to 1286 | be able to interact with them using their logical identifier, but without having to care about 1287 | their physical location in the cluster, which might also change over time. 1288 |

1289 | 1290 |

1291 | To use the UserMetaData actor with cluster sharding we must be able to 1292 | extract the identifier from the messages and define a hash function for the identifier. 1293 | The hash function is used to group actors into shards, potentially running on different 1294 | nodes. 1295 | Those functions can be defined in the companion object like this: 1296 |

1297 | 1298 | <>= 1299 | 13,15c 1300 | sealed trait Command { 1301 | def id: String 1302 | } 1303 | case class GetUser(id: String) extends Command 1304 | case class User(id: String, distance: Double) extends Command 1305 | case class UpdateUserPosition(id: String, position: LatLng) extends Command 1306 | 1307 | val idExtractor: ShardRegion.IdExtractor = { 1308 | case cmd: Command => (cmd.id, cmd) 1309 | } 1310 | 1311 | val shardResolver: ShardRegion.ShardResolver = msg => msg match { 1312 | case cmd: Command => (math.abs(cmd.id.hashCode) % 100).toString 1313 | } 1314 | 1315 | val shardName: String = "UserMetaData" 1316 | . 1317 | @ 1318 | 1319 | @@ sbt compile 1320 | 1321 |

1322 | To make the UserMetaData actors sharded in the cluster we need to register it to the 1323 | ClusterSharding extension. This must be done on all nodes in the cluster, 1324 | but we do it slightly different on the nodes with frontend role compared to 1325 | backend because the actors must only be created on the backend nodes, and only 1326 | proxied from frontend nodes. Add the following in Actors.scala 1327 |

1328 | 1329 | <>= 1330 | 7a 1331 | import akka.contrib.pattern.ClusterSharding 1332 | . 1333 | @ 1334 | 1335 | <>= 1336 | 38,40c 1337 | if (Cluster(system).selfRoles.exists(r => r.startsWith("backend"))) { 1338 | system.actorOf(RegionManager.props(), "regionManager") 1339 | 1340 | ClusterSharding(system).start( 1341 | typeName = UserMetaData.shardName, 1342 | entryProps = Some(UserMetaData.props), 1343 | idExtractor = UserMetaData.idExtractor, 1344 | shardResolver = UserMetaData.shardResolver) 1345 | } else { 1346 | ClusterSharding(system).start( 1347 | typeName = UserMetaData.shardName, 1348 | entryProps = None, 1349 | idExtractor = UserMetaData.idExtractor, 1350 | shardResolver = UserMetaData.shardResolver) 1351 | } 1352 | . 1353 | @ 1354 | 1355 |

1356 | Our setup for the user meta data actor is now a little more complex than the Play bindings helper will allow, so 1357 | we need to create a new provider for it, and bind it to be an eager singleton. This can be done using the 1358 | javax.inject.Provider interface: 1359 |

1360 | 1361 | <>= 1362 | 6a 1363 | import com.google.inject.name.Names 1364 | . 1365 | 25c 1366 | bind(classOf[ActorRef]).annotatedWith(Names.named("userMetaData")) 1367 | .toProvider(classOf[UserMetaDataProvider]).asEagerSingleton() 1368 | . 1369 | 33a 1370 | class UserMetaDataProvider @Inject() (system: ActorSystem) extends Provider[ActorRef] { 1371 | lazy val get = ClusterSharding(system).shardRegion(UserMetaData.shardName) 1372 | } 1373 | @ 1374 | 1375 | @@ sbt compile 1376 | 1377 |

1378 | And corresponding in Main.scala: 1379 |

1380 | 1381 | <>= 1382 | 7a 1383 | import akka.contrib.pattern.ClusterSharding 1384 | . 1385 | @ 1386 | 1387 | <>= 1388 | 33a 1389 | 1390 | ClusterSharding(system).start( 1391 | typeName = UserMetaData.shardName, 1392 | entryProps = Some(UserMetaData.props), 1393 | idExtractor = UserMetaData.idExtractor, 1394 | shardResolver = UserMetaData.shardResolver) 1395 | . 1396 | @ 1397 | 1398 | <>= 1399 | 44c 1400 | 1401 | ClusterSharding(system).start( 1402 | typeName = UserMetaData.shardName, 1403 | entryProps = None, 1404 | idExtractor = UserMetaData.idExtractor, 1405 | shardResolver = UserMetaData.shardResolver) 1406 | val userMetaData = ClusterSharding(system).shardRegion(UserMetaData.shardName) 1407 | . 1408 | @ 1409 | 1410 |
1411 |
1412 |

Setup the journal

1413 | 1414 | @@ sbt compile 1415 | 1416 |

1417 | When using Akka Persistence in a cluster we need a journal that is replicated or accessible from all nodes. 1418 | In this sample we will use a 1419 | shared LevelDB journal 1420 | running on the node with port 2551. This is a single point of failure, and should not be used in production. 1421 | A real system would use a distributed journal. 1422 |

1423 | 1424 |

1425 | Add the configuration for the journal: 1426 |

1427 | 1428 | <>= 1429 | 83a 1430 | 1431 | persistence { 1432 | journal.plugin = "akka.persistence.journal.leveldb-shared" 1433 | journal.leveldb-shared.store { 1434 | # DO NOT USE 'native = off' IN PRODUCTION !!! 1435 | native = off 1436 | dir = "target/shared-journal" 1437 | } 1438 | snapshot-store.local.dir = "target/snapshots" 1439 | } 1440 | . 1441 | @ 1442 | 1443 |

1444 | Add the file app/backend/SharedJournalHelper.scala 1445 | with the following content: 1446 | 1447 | <>= 1448 | 0a 1449 | package backend 1450 | 1451 | import scala.concurrent.duration._ 1452 | import akka.actor.ActorIdentity 1453 | import akka.actor.ActorPath 1454 | import akka.actor.ActorSystem 1455 | import akka.actor.Identify 1456 | import akka.actor.Props 1457 | import akka.pattern.ask 1458 | import akka.persistence.journal.leveldb.SharedLeveldbJournal 1459 | import akka.persistence.journal.leveldb.SharedLeveldbStore 1460 | import akka.util.Timeout 1461 | import akka.cluster.Cluster 1462 | 1463 | object SharedJournalHelper { 1464 | 1465 | def startupSharedJournal(system: ActorSystem): Unit = { 1466 | // Start the shared journal one one node (don't crash this SPOF) 1467 | // This will not be needed with a distributed journal 1468 | val storePort = 2552 1469 | if (Cluster(system).selfAddress.port.get == storePort) 1470 | system.actorOf(Props[SharedLeveldbStore], "store") 1471 | // register the shared journal 1472 | import system.dispatcher 1473 | implicit val timeout = Timeout(10.seconds) 1474 | val storePath = ActorPath.fromString(s"akka.tcp://${system.name}@127.0.0.1:$storePort/user/store") 1475 | val f = (system.actorSelection(storePath) ? Identify(None)) 1476 | f.onSuccess { 1477 | case ActorIdentity(_, Some(ref)) => SharedLeveldbJournal.setStore(ref, system) 1478 | case _ => 1479 | system.log.error("Shared journal not started at {}", storePath) 1480 | system.shutdown() 1481 | } 1482 | f.onFailure { 1483 | case _ => 1484 | system.log.error("Lookup of shared journal at {} timed out", storePath) 1485 | system.shutdown() 1486 | } 1487 | } 1488 | 1489 | } 1490 | . 1491 | @ 1492 | 1493 |

1494 | Initialize the shared journal in Actors.scala 1495 |

1496 | 1497 | <>= 1498 | 44a 1499 | 1500 | // This will not be needed with a distributed journal 1501 | SharedJournalHelper.startupSharedJournal(system) 1502 | 1503 | . 1504 | @ 1505 | 1506 |

1507 | And corresponding in Main.scala: 1508 |

1509 | 1510 | <>= 1511 | 31a 1512 | // This will not be needed with a distributed journal 1513 | SharedJournalHelper.startupSharedJournal(system) 1514 | 1515 | . 1516 | @ 1517 | 1518 | @@ sbt compile 1519 | 1520 |
1521 |
1522 |

Run it again

1523 | 1524 |

1525 | We have now added scalability and resilience to the new user meta data feature. Try it again, by refreshing your browser 1526 | at http://localhost:9000. 1527 |

1528 | 1529 |

1530 | The files of the shared journal are saved in the target directory and when you restart 1531 | the application the state is recovered. You can clean the state with: 1532 |

1533 | 1534 |

1535 | <path to activator dir>/activator clean
1536 | 
1537 | 1538 |
1539 | 1540 |
1541 |

Deploying Reactive Applications

1542 | 1543 |

1544 | We have developed ConductR; 1545 | a solution that is designed to ease the deployment and 1546 | management of reactive applications and services. ConductR handles failures gracefully, scales elastically and 1547 | embraces change in your Reactive system. Enough of the sales talk though, let's ship this app! 1548 |

1549 | 1550 |

1551 | For any application that will target ConductR you first need to add the sbt plugin 1552 | sbt-conductr to your 1553 | project/plugins.sbt. You will also need to have installed 1554 | Docker and the 1555 | conductr-cli in order to run a local ConductR cluster within the 1556 | sandbox. 1557 |

1558 | 1559 |

1560 | sbt-conductr actually brings in another sbt auto plugin named BundlePlugin and between them, you declare some 1561 | settings required for deploying your application. 1562 | You then load and run a "bundle" which is the 1563 | unit of deployment for ConductR. A bundle is a zip file with the hash of its contents encoded in its name. 1564 |

1565 | 1566 |

1567 | The sandbox allows you to deploy your application on a local ConductR cluster for the 1568 | purposes of testing. The sandbox provides a visualization of the loaded and running bundles, application log 1569 | reporting within the comfort of sbt and proxying of your services. You can therefore get your application 1570 | perfectly prepared for deploying into more production-like environments that run ConductR (such as EC2). 1571 |

1572 | 1573 |

1574 | In terms of modifying ReactiveMaps very little is required. The essential requirement is that your application 1575 | must signal ConductR that is it ready upon having 1576 | started up. 1577 | Note the lines that obtain a BUNDLE_SYSTEM and BUNDLE_SYSTEM_VERSION. These environment variables are set in the context of ConductR. 1578 | In our case we use these information to specify the actor system name. The latter lines signal to ConductR that the app has started up. 1579 | Also note that this code is only required when running pure Akka applications such as the backend of ReactiveMaps. 1580 | For Play, no configuration or code change is necessary. Both the Akka and Play approaches utilize a library named 1581 | 1582 | conductr-bundle-lib. That's it! 1583 |

1584 | 1585 |

1586 | The following describes something that can only be done from the activator command line. You'll need to jump 1587 | into a separate terminal window and just type "activator" to fire up the activator console. 1588 |

1589 | 1590 |

1591 | To prepare ConductR's bundles, with activator type: 1592 |

1593 | 1594 |
bundle:dist
1595 |  backend-region:dist
1596 |  backend-summary:dist
1597 | 1598 |

1599 | Three separate bundles will then be generated. Note the files produced. 1600 |

1601 | 1602 |

1603 | We then start up the sandbox so that we can load and then run the bundles we've produced: 1604 |

1605 | 1606 |
sandbox run
1607 | 1608 |

1609 | The sandbox will report the address that bundles will be found on. It will be something like 192.168.59.103. 1610 | Take note of this address. 1611 |

1612 | 1613 |

1614 | You can then load a bundle and run it using the following commands (substitute your paths and hashes as appropriate): 1615 |

1616 | 1617 |
conduct load /pathtotheproject/target/bundle/reactive-maps-v1-76d4af00c95dbed5eddc4a7a93a753bf7a85c14771745eea79239b52f61b90a0
1618 |  conduct run reactive-maps-frontend
1619 | 1620 |

1621 | Do the above for each bundle that you produced. 1622 |

1623 | 1624 |

1625 | Once your bundles are loaded and running then you can type "conduct info" in order to see the state of the world. 1626 | You should see something like this: 1627 |

1628 | 1629 |
ID               NAME                           #REP  #STR  #RUN
1630 |  76d4af0-03202dc  reactive-maps-frontend            1     0     1
1631 |  76d4af0-088d9a0  reactive-maps-backend-region      1     0     1
1632 |  76d4af0-52e1a1d  reactive-maps-backend-summary     1     0     1
1633 | 1634 |

1635 | You can then visit your application by navigating to the address that the sandbox provided, and the port 1636 | that is configured for ReactiveMaps e.g. http://192.168.59.103:9000. The ip address is your corresponding docker host ip address. 1637 |

1638 | 1639 |

1640 | With a production cluster (not the sandbox, which is only single noded!), you'd now be able to scale up instances 1641 | of each of these bundles by simply typing 1642 | "conduct run reactive-maps-frontend --scale 3" to get 3 instances of the frontend. Note that there is no need 1643 | to specify the seed nodes manually. ConductR takes care of that for you. ConductR will also 1644 | keep ReactiveMaps running should a node fail, or if itself fails. Resiliency is at the core of ConductR. 1645 |

1646 |
1647 | 1648 |
1649 |

Learn more about reactive design

1650 | 1651 |

1652 | Essential characteristic of a reactive application: 1653 |

1654 |
    1655 |
  • react to events - the event-driven nature enables the following qualities
  • 1656 |
  • react to load - focus on scalability by avoiding contention on shared resources
  • 1657 |
  • react to failure - build resilient systems with the ability to recover at all levels
  • 1658 |
  • react to users - honor response time guarantees regardless of load
  • 1659 |
1660 | 1661 |

1662 | Read more about how to build reactive applications in the 1663 | Reactive Manifesto. 1664 |

1665 | 1666 |

1667 | Find out more about the Typesafe Reactive Platform. 1668 |

1669 | 1670 |
1671 | 1672 | 1673 | 1674 | --------------------------------------------------------------------------------