├── project ├── build.properties ├── project │ └── typesafe.sbt └── plugins.sbt ├── public ├── images │ └── favicon.png └── javascripts │ └── md5.min.js ├── README.md ├── tutorial ├── frontend-region.jssequence ├── client-server.jssequence └── index.html.script ├── .gitignore ├── test └── assets │ ├── SetupMocha.js │ └── javascripts │ └── map │ ├── MapSpec.coffee │ └── MarkerSpec.coffee ├── conf ├── routes └── application.conf ├── app ├── assets │ ├── javascripts │ │ ├── services │ │ │ ├── storage.coffee │ │ │ ├── mockGps.coffee │ │ │ └── gps.coffee │ │ ├── main.coffee │ │ ├── map │ │ │ ├── markerRenderer.coffee │ │ │ ├── marker.coffee │ │ │ └── map.coffee │ │ └── models │ │ │ └── mainPage.coffee │ └── stylesheets │ │ └── main.less ├── controllers │ └── Application.scala ├── actors │ ├── RegionManagerClient.scala │ ├── Actors.scala │ ├── GeoJsonBot.scala │ ├── PositionSubscriber.scala │ └── ClientConnection.scala ├── views │ ├── main.scala.html │ └── index.scala.html ├── models │ └── backend │ │ └── Backend.scala └── backend │ ├── Main.scala │ ├── Region.scala │ ├── BotManager.scala │ ├── SummaryRegion.scala │ ├── Settings.scala │ ├── RegionManager.scala │ └── GeoFunctions.scala ├── LICENSE ├── .travis.yml └── activator.properties /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=0.13.9 2 | -------------------------------------------------------------------------------- /public/images/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/typesafehub/ReactiveMaps/HEAD/public/images/favicon.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Example archived 2 | 3 | This example project is outdated and no longer useful. 4 | 5 | Find example projects for Lightbend technologies at 6 | [Lightbend Tech Hub](https://developer.lightbend.com/start/) 7 | -------------------------------------------------------------------------------- /tutorial/frontend-region.jssequence: -------------------------------------------------------------------------------- 1 | participant Frontend 2 | participant PubSub 3 | participant Region 4 | 5 | Frontend->Region: UpdateUserPosition 6 | Frontend->PubSub: Subscribe 7 | Frontend->PubSub: Unsubscribe 8 | Region->PubSub: RegionPoints 9 | PubSub->Frontend: RegionPoints -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | logs 2 | project/typesafe.properties 3 | project/activator-tutorial-generator.sbt 4 | project/target 5 | target 6 | tmp 7 | .history 8 | dist 9 | /.idea 10 | /*.iml 11 | /out 12 | /.idea_modules 13 | /.classpath 14 | /.project 15 | /RUNNING_PID 16 | /.settings 17 | .target 18 | .cache 19 | bin 20 | .DS_Store 21 | activator-sbt-*-shim.sbt -------------------------------------------------------------------------------- /test/assets/SetupMocha.js: -------------------------------------------------------------------------------- 1 | // Setup requirejs to have the right baseUrl 2 | global.requirejs = require("requirejs"); 3 | 4 | requirejs.config({ 5 | nodeRequire: require, 6 | baseUrl: __dirname 7 | }); 8 | 9 | // A few modules that all tests will use 10 | global.Squire = requirejs("lib/squirejs/Squire"); 11 | global.assert = require("assert"); 12 | -------------------------------------------------------------------------------- /tutorial/client-server.jssequence: -------------------------------------------------------------------------------- 1 | Client->Server: UserMoved 2 | Note right of Server: Sent every 2-10 seconds,\nwhen the user moves\ntheir physical position. 3 | Client->Server: ViewingArea 4 | Note right of Server: Sent when the user zooms\nor changes map position. 5 | Server->Client: UserPositions 6 | Note right of Server: Sent when the server has\nupdated positions for\nusers in the currently\nviewed area. -------------------------------------------------------------------------------- /conf/routes: -------------------------------------------------------------------------------- 1 | # Routes 2 | # This file defines all application routes (Higher priority routes first) 3 | # ~~~~ 4 | 5 | # Home page 6 | GET / controllers.Application.index 7 | 8 | # The websocket 9 | GET /stream/:email controllers.Application.stream(email) 10 | 11 | # Static assets 12 | GET /assets/*file controllers.Assets.versioned(path="/public", file: Asset) 13 | -------------------------------------------------------------------------------- /app/assets/javascripts/services/storage.coffee: -------------------------------------------------------------------------------- 1 | # 2 | # Reactive maps client side storage 3 | # 4 | define () -> 5 | return { 6 | 7 | # Get the last viewed area 8 | lastArea: -> 9 | if (localStorage.lastArea) 10 | try 11 | lastArea = JSON.parse localStorage.lastArea 12 | return lastArea 13 | catch e 14 | localStorage.removeItem("lastArea") 15 | 16 | # Set the last viewed area 17 | setLastArea: (lastArea) -> 18 | localStorage.lastArea = JSON.stringify lastArea 19 | 20 | } -------------------------------------------------------------------------------- /project/project/typesafe.sbt: -------------------------------------------------------------------------------- 1 | // Update this when a new patch of Reactive Platform is available 2 | val rpVersion = "15v09p04" 3 | 4 | // Update this when a major version of Reactive Platform is available 5 | val rpUrl = "https://repo.typesafe.com/typesafe/for-subscribers-only/AEE4D829FC38A3247F251ED25BA45ADD675D48EB" 6 | 7 | addSbtPlugin("com.typesafe.rp" % "sbt-typesafe-rp" % rpVersion) 8 | 9 | // The resolver name must start with typesafe-rp 10 | resolvers += "typesafe-rp-mvn" at rpUrl 11 | 12 | // The resolver name must start with typesafe-rp 13 | resolvers += Resolver.url("typesafe-rp-ivy", url(rpUrl))(Resolver.ivyStylePatterns) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This software is licensed under the Apache 2 license, quoted below. 2 | 3 | Copyright 2009-2013 Typesafe Inc. [http://www.typesafe.com] 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); you may not 6 | use this file except in compliance with the License. You may obtain a copy of 7 | the License at 8 | 9 | [http://www.apache.org/licenses/LICENSE-2.0] 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | License for the specific language governing permissions and limitations under 15 | the License. 16 | -------------------------------------------------------------------------------- /app/controllers/Application.scala: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import javax.inject.Inject 4 | 5 | import akka.actor.Props 6 | import play.api.mvc._ 7 | import actors.ClientConnection 8 | import play.api.Play.current 9 | import actors.ClientConnection.ClientEvent 10 | 11 | class Application @Inject() ( 12 | clientConnectionFactory: ClientConnection.Factory 13 | ) extends Controller { 14 | 15 | /** 16 | * The index page. 17 | */ 18 | def index = Action { implicit req => 19 | Ok(views.html.index()) 20 | } 21 | 22 | /** 23 | * The WebSocket 24 | */ 25 | def stream(email: String) = WebSocket.acceptWithActor[ClientEvent, ClientEvent] { _ => upstream => 26 | Props(clientConnectionFactory(email, upstream)) 27 | } 28 | } -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: trusty 2 | sudo: required 3 | 4 | language: scala 5 | 6 | jdk: oraclejdk8 7 | 8 | script: 9 | - sbt test activatorRunTutorial activatorGenerateTutorial 10 | - git diff --quiet tutorial/index.html || (echo "index.html has been updated directly, do not do this, edit tutorial/index.html, then use https://github.com/typesafehub/activator-tutorial-generator to generate the new index.html" && false) 11 | 12 | before_script: 13 | - printf "resolvers += \"Typesafe repository\" at \"http://repo.typesafe.com/typesafe/releases/\"\n\naddSbtPlugin(\"com.typesafe.sbt\" %%%% \"sbt-activator-tutorial-generator\" %% \"1.0.3\")" > project/activator-tutorial-generator.sbt 14 | - echo "typesafe.subscription=reactive-maps-ci" > project/typesafe.properties 15 | -------------------------------------------------------------------------------- /app/assets/javascripts/main.coffee: -------------------------------------------------------------------------------- 1 | # 2 | # The main entry point into the client side. Creates a new main page model and binds it to the page. 3 | # 4 | require.config { 5 | paths: { 6 | mainPage: "./models/mainPage" 7 | map: "./map/map" 8 | marker: "./map/marker" 9 | markerRenderer: "./map/markerRenderer" 10 | gps: "./services/gps" 11 | mockGps: "./services/mockGps" 12 | storage: "./services/storage" 13 | md5: "./md5.min" 14 | bootstrap: "../lib/bootstrap/js/bootstrap" 15 | jquery: "../lib/jquery/jquery" 16 | knockout: "../lib/knockout/knockout" 17 | leaflet: "../lib/leaflet/leaflet" 18 | } 19 | shim: { 20 | bootstrap: { 21 | deps: ["jquery"], 22 | exports: "$" 23 | } 24 | jquery: { 25 | exports: "$" 26 | } 27 | knockout: { 28 | exports: "ko" 29 | } 30 | } 31 | } 32 | 33 | require ["knockout", "mainPage", "bootstrap"], (ko, MainPageModel) -> 34 | 35 | model = new MainPageModel 36 | ko.applyBindings(model) 37 | 38 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | import sbt.Defaults.sbtPluginExtra 2 | 3 | // Comment to get more information during initialization 4 | logLevel := Level.Warn 5 | 6 | // The Typesafe repository 7 | resolvers += "Typesafe repository" at "http://repo.typesafe.com/typesafe/releases/" 8 | 9 | // Use the Play sbt plugin for Play projects 10 | libraryDependencies += sbtPluginExtra( 11 | TypesafeLibrary.playSbtPlugin.value, 12 | (sbtBinaryVersion in update).value, 13 | (scalaBinaryVersion in update).value 14 | ) 15 | 16 | addSbtPlugin("com.typesafe.sbt" % "sbt-less" % "1.0.6") 17 | addSbtPlugin("com.typesafe.sbt" % "sbt-coffeescript" % "1.0.0") 18 | addSbtPlugin("com.typesafe.sbt" % "sbt-rjs" % "1.0.7") 19 | addSbtPlugin("com.typesafe.sbt" % "sbt-digest" % "1.1.0") 20 | addSbtPlugin("com.typesafe.sbt" % "sbt-gzip" % "1.0.0") 21 | addSbtPlugin("com.typesafe.sbt" % "sbt-mocha" % "1.1.0") 22 | 23 | addSbtPlugin("com.typesafe.sbt" % "sbt-bintray-bundle" % "1.2.0") 24 | addSbtPlugin("com.lightbend.conductr" % "sbt-conductr" % "2.2.9") 25 | -------------------------------------------------------------------------------- /app/actors/RegionManagerClient.scala: -------------------------------------------------------------------------------- 1 | package actors 2 | 3 | import akka.actor.Actor 4 | import backend._ 5 | import akka.actor.Props 6 | import backend.RegionManager.UpdateUserPosition 7 | import akka.routing.FromConfig 8 | import models.backend.UserPosition 9 | 10 | object RegionManagerClient { 11 | def props(): Props = Props(new RegionManagerClient) 12 | } 13 | 14 | /** 15 | * A client for the region manager, handles routing of position updates to the 16 | * regionManager on the right backend node. 17 | */ 18 | class RegionManagerClient extends Actor { 19 | 20 | val regionManagerRouter = context.actorOf(Props.empty.withRouter(FromConfig), "router") 21 | 22 | val settings = Settings(context.system) 23 | 24 | def receive = { 25 | case p: UserPosition => 26 | // Calculate the regionId for the users position 27 | val regionId = settings.GeoFunctions.regionForPoint(p.position) 28 | // And send the update to the that region 29 | regionManagerRouter ! UpdateUserPosition(regionId, p) 30 | } 31 | } -------------------------------------------------------------------------------- /app/views/main.scala.html: -------------------------------------------------------------------------------- 1 | @(content: Html)(implicit req: RequestHeader) 2 | 3 | 4 | 5 | 6 |
7 |" + escapeHtml(userId) + "
" 18 | 19 | # Create the cluster marker icon 20 | createClusterMarkerIcon: (count) -> 21 | # Style according to the number of users in the cluster 22 | className = if count < 10 23 | "cluster-marker-small" 24 | else if count < 100 25 | "cluster-marker-medium" 26 | else 27 | "cluster-marker-large" 28 | return new Leaflet.DivIcon( 29 | html: "11 | Not long ago, response times in the seconds were considered appropriate. Browser refreshes were the norm in web 12 | applications. Systems would go down for hours of maintenance, or even be rebooted nightly, and this was ok 13 | because people only expected the systems to be up during business hours. Applications didn't have to scale 14 | because they didn't have big user bases. And the complexity requirements put on web applications meant that 15 | typical requests could easily be handled by a thread per request model. 16 |
17 | 18 |19 | Things are changing though. People expect web applications to react instantly. They expect them to be up all 20 | the time, while the applications are moving into the cloud, where failures are not exceptional, but rather are 21 | the norm, and so applications need to react to failure. Load on a web application can peak unpredictably, to 22 | be many orders of magnitude greater than normal, and so applications need to react to load and scale out. The 23 | complexity of business requirements means that in order to respond quickly to requests, things must 24 | be processed in parallel, reacting to events rather than waiting so as to utilise resources as efficiently as 25 | possible. 26 |
27 | 28 |29 | This application is an example of how to implement the tenets of the 30 | Reactive Manifesto using the 31 | Typesafe Reactive Platform. 32 |
33 | 34 |35 | It uses Play, combined with the latest in client side technologies to implement a reactive user interface. It 36 | uses Akka to provide horizontally scalable and resilient message passing and data management. 37 |
38 | 39 |40 | The tutorial starts by becoming familiar with the application, walking through its code and then 41 | enhancing the code. Toward the end there is a discussion on how reactive applications can be managed in production using our 42 | ConductR product 43 | to demonstrate. 44 |
45 | 46 |52 | Before jumping into the code, let's see the app in action. Go to the Run 53 | tab, and start the application if it's not already started. Then visit it at: 54 | http://localhost:9000. 55 |
56 | 57 |58 | You will be presented with a screen asking for your email address. After entering it and submitting, you should 59 | see a map, and you should be able to find yourself on that map (this may take a short amount of time due to the way 60 | data flows through summary regions in the system, the further you zoom out the less realtime the app gets). 61 |
62 | 63 |64 | If you zoom in on North Carolina, you should see some bots walking around. These bots are simulating other 65 | users, the data used to generate their paths is taken from hiking trail data that was grabbed from 66 | HikeWNC. 67 |
68 | 69 |75 | The system can be broadly divided into three parts. The first part is the client side app. This is written 76 | in CoffeeScript, and runs in the browser. The second 77 | part is the web front end, this is a Play application that serves web requests coming in. The third part is 78 | the Akka backend, which manages the distribution of data across backend nodes, and the publishing and 79 | subscribing of events. 80 |
81 | 82 |83 | In the demo you're seeing now, the Play web front end and the Akka backend are running as one application, but 84 | in a production scenario, they would be run separately, allowing fine grained control of resources between the 85 | front and backend. 86 |
87 | 88 |89 | We also recommend that you factor out the two types of application into separate sbt modules, or even 90 | their own projects. They are presented here under the one project for the convenience of this tutorial. 91 | Factoring them out into their own modules/projects then allows them to be released/deployed independently of 92 | each other. 93 |
94 | 95 |101 | The client talks to the web front end using 102 | WebSockets: 103 |
104 | 105 |108 | All the communication above is fire and forget, after sending a user moved event, the client doesn't need 109 | anything in response, after sending a viewing area message, the client might get many messages, 110 | or maybe none at all, depending on whether there are any users in that area, and after the server sends position 111 | updates, it expects nothing in return from the client. 112 |
113 | 114 |115 | This differs from many traditional client server where clients make a request and expect a response. In a 116 | reactive application, much of the communication will not be request/response based, because the way reactive 117 | applications are designed is that data flows to consumers as it becomes available, and consumers of the data 118 | react to it, they don't ask for it. 119 |
120 | 121 |122 | For this reason, WebSockets makes for a perfect transport for client server communication in a reactive 123 | application, since it allows events to be passed with low overhead, not needing to wait for a response, and 124 | facilitates reacting to events from the server. 125 |
126 | 127 |133 | Before explaining the backend interface, we need to have a short lesson in geo based systems. A naive way to 134 | create the reactive maps application would be to send all data from all users to every connected user. This 135 | might work if there are only 10 users connected, or maybe even 100. At 1000, each user is going to be 136 | downloading megabytes of updates per second - it is not going to scale. 137 |
138 | 139 |140 | To manage this, we break the earth up into regions. There are many different ways to do this, but in our app 141 | we're using the simplest to understand, we flatten the map out into a rectangle, and then divide it into many 142 | smaller rectangles. How many rectangles is configurable, but we have defaulted this to 16 million. 143 | Because the earth is not a reactangle, but is actually a sphere, these rectangles don't all cover the same 144 | area, at the equator each one is a few kms wide, at the poles, each rectangle is only a few metres wide. But 145 | each rectangle is a constant number of degrees in longitude wide, and degrees in latitude high, so transforming 146 | latitude and longitude coordinates to regions is therefore a straightforward equation. 147 |
148 | 149 |150 | The web front end talks to the backend using Akka clustering: 151 |
152 | 153 |156 | Actors in an Akka cluster may talk to each other without knowing whether they are on the same node or different 157 | nodes. In the above diagram, when a frontend node receives a position update from the client, the region 158 | responsible for that position may be on the same node, or may be on a different node. The web frontend doesn't 159 | need to worry, all it needs to know is which region to send to, and Akka will work out how to get the message 160 | to the right node. 161 |
162 | 163 |164 | Akka distributed PubSub messaging is used to publish location updates to the frontend. When the web frontend 165 | gets a new viewing area from the client, it works out which regions cover that viewing area, and then subscribes 166 | to updates from each of those regions. Whether those regions are on the same node or on different nodes is 167 | transparent, Akka ensures that the right messages get to the right subscribers on the right nodes. When a 168 | region has a new update to publish, it pushes it to the PubSub manager, which then pushes the messages to the 169 | right subscribers on the right nodes. 170 |
171 | 172 |173 | Note that Akka's distributed PubSub messaging makes no guarantees in terms of delivering a message. However 174 | for this use-case, if the occasional publication of a location update is missed then we need not be concerned 175 | as the next update will bring the client up to date. Note that for situations where delivery becomes 176 | more important then look into 177 | Akka's distributed data library. 178 |
179 | 180 |181 | Finally, regions get summarised into summary regions, and these summary regions are used so that clients that 182 | are viewing large areas at once aren't consuming too much data. Lowest level regions and higher level summary 183 | regions send updates to their higher level summary region, which aggregates and publishes the information. 184 | When the client requests a viewing area that contains too many regions, it subscribes instead to updates from 185 | summary regions. 186 |
187 | 188 |194 | Now that we've got a broad overview of the system architecture, let's start looking at the code. We'll start 195 | off with tracing the code through from what happens when a user's GPS enabled device sends an update. 196 |
197 | 198 |199 | The entry point to this event flow is in 200 | gps.coffee. This file contains 201 | a class for for handling the GPS integration of the app. It uses the 202 | HTML5 Geolocation API to watch for location 203 | updates from the browser. 204 |
205 | 206 |
207 | The first thing you'll find in this, and most other CoffeeScript files in this app, is a call to
208 | define. This is a RequireJS call, used to
209 | define a module. RequireJS allows JavaScript to be developed in a modular way, which is important for rich
210 | client side apps that heavily use JavaScript like this one. At the bottom of the file you can see a return
211 | statement returning the Gps class that we've declared, this means anything that imports our module
212 | will get that class back.
213 |
216 | The bulk of the code in this file is actually dealing with ensuring that neither too few, nor too many location
217 | updates are sent to the server. It ensures that a location update is sent at least every 10 seconds, but no
218 | more frequently than every 2 seconds. The most pertinent code that we're interested in now though is the
219 | navigator.geolocation.watchPosition(...) call, this is the HTML5 Geolocation API call to watch for
220 | GPS updates, and also the @ws.send(...) call, this sends a user-moved event as JSON
221 | through the WebSocket, with the users current position.
222 |
225 | The position field of this event is formatted using the GeoJSON standard,
226 | which you'll soon see is used throughout the application.
227 |
230 | So we can now see how location updates are obtained from the browser. But where does the WebSocket come from
231 | that it's getting sent down? You'll see that the constructor of the Gps class accepts the WebSocket as a
232 | parameter. This constructor is called from the
233 | mainPage.coffee module. In
234 | this module, you can see that in the define call it declares a few dependencies, one being the
235 | ./gps module that we just looked at.
236 |
239 | Scroll down to the connect method, and you'll see the following code:
240 |
@ws = new WebSocket(jsRoutes.controllers.Application.stream(email).webSocketURL())
243 |
244 |
245 | This is the code that creates the WebSocket, and a few lines below that, in the onopen callback,
246 | you can see where we are passing the WebSocket to the Gps constructor. The URL for the WebSocket
247 | is generated from a JavaScript reverse route which is defined in
248 | main.scala.html.
249 |
252 | Open main.scala.html. This is the template 253 | where that reverse route is defined. Play has a configuration 254 | file called routes, this file contains all the configuration 255 | for how incoming requests are routed to their corresponding actions in Play. In addition to providing this 256 | forward routing, Play also generates a reverse router, that code such as this template can call, and it will 257 | return the URL that can be used to reach that route. This means that your path information is kept in one 258 | place - in your routes file, and everything else in your application can depend on it. 259 | Play also provides the same mechanism for client-side code so that you can assemble url and request methods 260 | by applying a function and giving it input parameters which will be used to populate placeholder values, 261 | such as path variables and query parameters. 262 |
263 | 264 |
265 | In the routes file, you can see that the /stream/:email path is routed to
266 | controllers.Application.stream, so the JavaScript reverse router call
267 | jsRoutes.controllers.Application.stream(email).webSocketURL will return us that path.
268 |
271 | You can read more about routing in Play Framework 272 | here. 273 | You can also read more about JavaScript routing 274 | here. 275 |
276 | 277 |
283 | In the routes file, we saw how the WebSocket route was
284 | defined, and how it gets routed to the controllers.Application.stream method. Let's open that
285 | class now, Application.scala.
286 |
289 | Looking at the stream method, the first thing to notice is that it is declared to be a WebSocket
290 | action that works with ClientEvent messages. These messages are defined in
291 | ClientConnection.scala, we can see our
292 | three types of messages there, UserMoved, ViewingArea and UserPositions.
293 |
296 | Below the declaration of the message types, we can see formats for serialising these events to and from JSON, 297 | and for formatting the WebSocket frames. We won't go into too much detail here, you can read more about Play's 298 | JSON support here. 299 |
300 | 301 |
302 | You can see back in Application.scala
303 | that we have told Play to use an actor to handle the WebSocket. This means our deserialized
304 | ClientEvent messages are sent to this actor, while when this actor sends ClientEvent
305 | messages to the passed in upstream actor, these messages will be serialized and sent over the WebSocket
306 | to the client.
307 |
310 | Back in ClientConnection.scala, beneath 311 | the event message types, you will find the actual actor that handles the client connection. The receive method 312 | shows the handling of the different message types that this actor will receive. We'll focus on just one of these 313 | message types. 314 |
315 | 316 |
317 | Each time a UserMoved event is received, it's translated to a UserPosition object,
318 | and sent to the RegionManagerClient.
319 | This class is responsible for sending user position updates to the right node for the region that that position
320 | lives in. You can see in that class that the first thing it does is look up the regionId, and then it creates
321 | a UserPositionUpdate message, and sends that message to a router.
322 |
325 | But how does that router get it to the right node? The configuration for that router can be found in
326 | application.conf. Scrolling down to the
327 | configuration in the akka section, you'll see this:
328 |
/regionManagerClient/router {
331 | router = consistent-hashing
332 | nr-of-instances = 1000
333 | cluster {
334 | enabled = on
335 | routees-path = "/user/regionManager"
336 | allow-local-routees = on
337 | use-role = "backend-region"
338 | }
339 | }
340 |
341 | 342 | The routing to the node responsible for a region is done with a cluster aware 343 | consistent hashing 344 | router. The region identifier is used as key for the consistent hashing. This means that updates 345 | for a region are routed to the backend node responsible for that region. When the number of nodes in 346 | the cluster changes the responsibility for a region may change. In this application the states of the 347 | regions don't have to be migrated when this happens. Updates for some regions are routed to a new 348 | backend node and old data will expire. For a short period the region points (counts of users) might 349 | be slightly inaccurate, but that is acceptable for this application. 350 |
351 | 352 |
353 | The hash code used to route messages is specified by the ConsistentHashable interface, you can see
354 | that the UpdateUserPosition message
355 | implements this interface, and defines the hash key to be the region ID that the update is for.
356 |
359 | If you're interested in learning the full details of Akka routing and how to configure it, you can read about 360 | Routing and 361 | 362 | Cluster Aware Routers in the Akka documentation. 363 |
364 | 365 | 366 | 367 |373 | We've seen how the web frontend receives GPS user position events and then routes them to the right backend 374 | node in the Akka cluster. Now let's find out what happens with the events when it reaches the backend node. 375 |
376 | 377 |378 | In the configuration for the router that we saw before, we could see this config item defined: 379 |
380 | 381 |routees-path = "/user/regionManager"
382 |
383 |
384 | /user is the namespace for all user defined actors (as opposed to actors defined by the system
385 | itself), so this says that the messages get sent to a user defined actor called regionManager,
386 | which is implemented by the RegionManager
387 | class.
388 |
391 | The region manager is responsible for managing all the regions that belong on that node. If it gets some data 392 | for a region, and an actor for that region doesn't exist yet, it creates it. Once it has ensured that an actor 393 | exists for that region, then it sends the user position to that region. 394 |
395 | 396 |
397 | The actor class that represents a region is called
398 | Region. This class has a map called
399 | activeUsers, and when it receives the user position, it adds that users position to the map.
400 |
409 | A client displays a section of the map, which is decorated with live markers of other users in that area. 410 | How are those user positions published to the client? 411 |
412 | 413 |414 | When the user zooms or changes map position the client sends a 415 | ViewingArea event to the server, which 416 | ends up in PositionSubscriber via 417 | the controller. 418 | The PositionSubscriber works out which regions cover that viewing area, and then subscribes to updates from 419 | each of those regions. 420 |
421 | 422 |423 | The published updates of user positions come from the backend Region 424 | actors. The thing that ties the publisher and subscriber together is the named topic, which in this case is 425 | the region id. 426 |
427 | 428 |429 | In a similar way the PositionSubscriber may decide to subscribe to summary regions, and then the published 430 | region points comes from the SummaryRegion 431 | actors. 432 |
433 | 434 |435 | The publish/subscribe 436 | mechanism in Akka is a registry of subscribers that is replicated to members in the cluster. 437 | There is no central hub or broker. When publishing a message to a named topic it sends the message to nodes with 438 | subscribers of that topic, and then delivers the message to all subscribing actors on that node. The message is sent 439 | over the wire only once per node that has at least one subscriber of the topic. The decoupling of publisher and 440 | subscriber makes it easy to add and remove nodes in the cluster as needed. 441 |
442 | 443 |444 | Changes of subscribers are disseminated in a scalable way to other nodes with a gossip protocol. The registry is 445 | eventually consistent, i.e. changes are not immediately visible at other nodes, but typically they will be fully 446 | replicated to all other nodes after a few seconds. 447 |
448 | 449 |456 | At this stage of the tutorial, we've seen: 457 |
458 | 459 |470 | And now for something completely different. 471 |
472 | 473 |479 | So far you are running the application in one single JVM, hosting both frontend 480 | and backend. Let's try to add more backend and frontend nodes to the cluster. 481 |
482 | 483 |484 | Open a terminal and change directory to the root directory of the reactive-maps application. Start a backend node 485 | with the following command (on one line): 486 |
487 |
488 | <path to activator dir>/activator
489 | -Dakka.remote.netty.tcp.port=0
490 | -Dakka.cluster.seed-nodes.1="akka.tcp://application@127.0.0.1:2552"
491 | -Dakka.cluster.roles.1=backend-region
492 | "run-main backend.Main"
493 |
494 |
495 | 496 | This runs the backend.Main class and 497 | overrides the configuration to bind Akka remoting to a random available port and use the "backend-region" 498 | cluster role for this node. It also declares the node that is running as a seed node so that this 499 | node can find the already established cluster. 500 |
501 | 502 |503 | If you take a look at the log in Run you can see that the new node 504 | joined the cluster. The new node knows how to join the cluster because the first node running on port 2552 505 | is configured as initial contact point in the 'seed-nodes' property in the 506 | application.conf. 507 | You can read more about Akka Clustering in the 508 | documentation. 509 |
510 | 511 |512 | You can repeat the command in new terminal windows to add more backend nodes. 513 |
514 | 515 |516 | You can also add more simulated users with the following command (on one line): 517 |
518 |
519 | <path to activator dir>/activator
520 | -Dakka.remote.netty.tcp.port=0
521 | -Dakka.cluster.seed-nodes.1="akka.tcp://application@127.0.0.1:2552"
522 | -Dakka.cluster.roles.1=frontend
523 | -DreactiveMaps.bots.totalNumberOfBots=500
524 | "run-main backend.Main"
525 |
526 |
527 | 528 | The following command (on one line) will start another frontend node listening on HTTP port 9001: 529 |
530 |
531 | <path to activator dir>/activator
532 | -Dhttp.port=9001
533 | -Dakka.remote.netty.tcp.port=0
534 | -Dakka.cluster.seed-nodes.1="akka.tcp://application@127.0.0.1:2552"
535 | -Dakka.cluster.roles.1=frontend
536 | run
537 |
538 | 539 | Try the added frontend in a new browser window: http://localhost:9001 540 |
541 | 542 |548 | Now that we've had a detailed look at some of the system, let's try and add a new feature. Until now, our view 549 | of the data has been region based - all data is associated with and stored in an actor for a region. This 550 | allows us to shard regions over multiple nodes, allowing efficient access to the data by node. 551 |
552 | 553 |554 | We're going to add functionality that is user based. We'll use exactly the same methods for scaling as for the 555 | region based data, so we can see how to build such a system from scratch. 556 |
557 | 558 |559 | We'll start off with a simple implementation that only works when there is one node, implementing first the 560 | backend, then the client side. Then we'll demonstrate how this implementation can be scaled out to shard the 561 | data across many nodes. Finally we'll show some techniques for ensuring data consistency when nodes are 562 | introduced into or removed from the cluster. 563 |
564 | 565 |566 | The new feature that we'll add is tracking the distance that a user has travelled. We'll make the client 567 | fetch this data when a user clicks on a marker. 568 |
569 | 570 |576 | The first thing we need to do is add a function for calculating the distance between two points. There are 577 | many such formulas that can be used to do this, but a simple general purpose one which will suit our purposes 578 | is called the haversine 579 | formula. 580 |
581 | 582 |
583 | Let's create a new method called distanceBetweenTwoPoints in
584 | GeoFunctions:
585 |
617 | The first thing we need to implement is a user meta data register. As a first implementation, we'll write one
618 | actor that will store all user meta data, we'll call it UserMetaData. Create a new file called
619 | app/backend/UserMetaData.scala now, and start off by adding the following:
620 |
640 | Some of the imports are unused for now, but the important thing to see here is the message types we've defined.
641 | This actor will receive UpdateUserPosition messages to update the user position, and will receive
642 | GetUser messages, and send back User messages to the sender.
643 |
646 | Now implement the actor itself: 647 |
648 | 649 | <
666 | Our actor depends on the settings (which provides the GeoFunctions class we already edited), and
667 | has a map that maps the userId to a tuple of the last position the user was seen at, and the distance they've
668 | travelled. We'll now implement handling the GetUser message in the receive method:
669 |
682 | You can see that if the user wasn't found, we just return 0. Now implement handling the
683 | UpdateUserPosition message:
684 |
702 | You can see here we're using the distanceBetweenPoints method we implemented earlier, updating the
703 | distance if we have a last position to compare it to, and updating the map with the new user data.
704 |
712 | Now we need to implement the code that sends the user position updates to this actor. This is a little tedious 713 | because there a two things that will do this, the web client, and the bots. We'll start with the web client. 714 |
715 | 716 |
717 | The actors for the web client are initialized by a Play module called Actors. It's using some
718 | Play helpers for creating and injecting actors, described
719 | here. We want to define a new actor binding for the userMetaData:
720 |
729 | The class that will ultimately use this actor is the
730 | ClientConnection.scala actor. Open it,
731 | and modify the constructor of ClientConnection to accept it as a parameter:
732 |
743 | Now when the client sends a UserMoved message through the web socket, in addition to sending a
744 | message to the regionManagerClient, we also want to update the user meta data:
745 |
759 | The web front end is set to go, but the bots also need to be updated. As with ClientConnection,
760 | add a constructor parameter for the user meta data to GeoJsonBot.scala:
761 |
771 | And immediately after the bot sends a position update to the regionManagerClient, make it also
772 | send one to userMetaData:
773 |
785 | Update the props method: 786 |
787 | 788 | <
797 | Now this is called by BotManager, modify the props method, constructor parameter and the
798 | the call that creates the bot:
799 |
815 | BotManager is initialised in two places, once in the web front end by Play in the
816 | BackendActors class, so update that to pass the userMetaData:
817 |
831 | And finally, we need to update the Main class, this
832 | is used when running a non Play node in the cluster. Initialise the UserMetaData actor after
833 | creating RegionManagerClient:
834 |
843 | Now pass the actor to the BotsManager props methed:
844 |
860 | The users distance is going to be requested by the user on an as needed basis. Since the action is triggered 861 | by the user and the user expects a response, it makes sense in this case to use a simple HTTP request to get 862 | the data. So we're going to write a Play action to get the user meta data. 863 |
864 | 865 |
866 | Create a new Scala class called controllers.UserController:
867 |
895 | There are quite a number of imports here, but don't be put off! The first thing we will do is define the
896 | return format for the user meta data. We're going to return JSON, so we need something to convert instances of
897 | User into JSON. We can do this by implementing a JSON Writes for User.
898 | Play's JSON API provides a handy macro that does this for you at compile time, so you can have type safe JSON
899 | converters with a minimum of code. Create this writes instance in the UserController object:
900 |
909 | Since it is declared to be implicit, it will be used whenever we call a method that requires an implicit writes
910 | for User, which the Json.toJson method does in the next block of code that we'll
911 | write:
912 |
931 | Here we have declared the action itself. It's an asynchronous action, meaning it returns a future. The action 932 | also takes a parameter, the id of the user. We'll see later how that parameter is passed to the action. 933 |
934 | 935 |
936 | As a first step we have defined a timeout. In the code below that, we use the ask pattern, represented by the
937 | ? operator, to ask the user meta data action for the user. Akka will not let you ask for something
938 | without specifying a timeout, the ? parameter takes an implicit timeout, so we've defined that to
939 | be two seconds.
940 |
943 | Having asked the actor for a user, we get back a future, and the first thing we do is map it to the type of
944 | User. Then we map that to something that will generate our response, we're returning an
945 | Ok response, with the body being the user serialised to JSON.
946 |
949 | Finally, we also want to recover from a timeout. We assume that if it timed out, it means the
950 | user could not be found.
951 |
954 | The last thing we need to do on the server side is declare how requests will be routed to this action. We do
955 | this in the routes file:
956 |
967 | You can see that we are defining a dynamic route with an email parameter, signalled by the colon
968 | before the parameter name. Then we invoke the action we just created, passing that email
969 | parameter as the id of the user.
970 |
977 | Now that we've got the server side ready, we can write the client side code to consume the new action we just
978 | created. Let's start with creating a new module,
979 | app/assets/javascripts/services/userInfo.coffee, that will
980 | make the AJAX call. Although this module will be very simple, it's best practice to split out the making of
981 | AJAX calls from the business logic of your code, so you can easily mock and test.
982 |
995 | Our userInfo module depends on jquery, and simply provides one method,
996 | get, which calls the action. jQueries getJSON method returns a promise of the json,
997 | so we can consume that by attaching then or done callbacks to the returned promise.
998 |
1001 | Now in marker.coffee, we want to
1002 | use this service to look up the user info. So we will add the userInfo module as a dependency. If
1003 | you're familiar with requireJS, you might notice that we're not using a relative path name here, this is because
1004 | we are using path aliases that we configure elsewhere, this also makes mocking dependencies simpler, as it
1005 | decouples modules from their implementation.
1006 |
1015 | In the constructor of the Marker class, after attaching the popup to the marker, we want to bind
1016 | to the click event of the marker so that we can update the popup with the users distance each time the user
1017 | clicks on it:
1018 |
1032 | And now we want to handle that additional distance parameter that we've passed to renderPopup,
1033 | in markerRenderer.coffee:
1034 |
" + escapeHtml(userId) + "
" 1041 | if (distance) 1042 | popup + "Travelled: " + Math.floor(distance) + "m
" 1043 | else 1044 | popup 1045 | . 1046 | @ 1047 | 1048 |
1049 | Finally, since we've defined the new userInfo module, we need to declare what the path for that
1050 | module is. This can be done in main.coffee:
1051 |
1062 | And now we should be good to go, refresh your browser, and try clicking on a marker to see if the distance is 1063 | rendered. If you're looking at the bots walking around North Carolina, you might see that they are moving at 1064 | hundreds of metres per second - these bots are designed to provide interesting data, not necessarily realistic 1065 | data. 1066 |
1067 | 1068 |1074 | After making the previous changes to the client side logic, we've left some of the client side tests in a failing 1075 | state. Let's fix them. Start by running them to see that they are failing, by going to the 1076 | test tab. Run the tests, you should see some of them are failing. 1077 |
1078 | 1079 |1080 | We're using mocha to write tests, in combination with Squire.js to mock out Require.js dependencies. We've 1081 | also been very careful to design our client side code in such a way that the DOM manipulation code, and any code 1082 | doing AJAX or WebSockets or using any other browser based APIs are separated from the business logic. This 1083 | allows us to comprehensively test the important code. 1084 |
1085 | 1086 |1087 | Open MarkerSpec.coffee. This is where our 1088 | failing tests are. Since we've added a new user info service that 1089 | Marker.coffee depends on, we need to create a 1090 | mocked version for this: 1091 |
1092 | 1093 | <
1105 | Now to test that the marker correctly passes the looked up user distance to marker renderer, we'll modify the
1106 | MockMarkerRenderer.renderPopup method to "render" it:
1107 |
1120 | Now lets modify the test setup code to instantiate and mock the mock user info service that we created: 1121 |
1122 | 1123 | <1146 | Now try running the tests again. They should pass. Finally, let's add a new test that tests that the popup 1147 | is updated with the distance when the marker is clicked: 1148 |
1149 | 1150 | <1163 | Run the tests to ensure the new test also passes. 1164 |
1165 | 1166 |1172 | So now we've implemented something that works on a single node. However, this application has been designed to 1173 | scale to millions of users on hundreds of nodes - a feature that only works on a single node will not suffice. 1174 |
1175 | 1176 |1177 | The system should also be resilient to system crashes and restarts. To address these issues we will make it 1178 | persistent and distribute the actors over the backend nodes in the cluster. 1179 |
1180 | 1181 |1186 | The UserMetaData actor that we developed 1187 | previously holds the data for all users, which is obviously not scalable. Instead, let's rewrite the actor 1188 | to represent the data for one single user. We will use 1189 | Akka Persistence 1190 | to make it durable. 1191 |
1192 | 1193 | <
1267 | Akka Persistence takes an event sourced approach and stores the changes that build up its current state.
1268 | In this case the current position and the total distance is stored by the events
1269 | FirstObservation and Moved.
1270 |
1273 | It is recommended to encapsulate the state in an immutable class as illustrated
1274 | in the UserMetaData.State class. It knows how to create a new State
1275 | instance when applying the changes represented by domain events. It is important that the
1276 | state updates are free from side effect, because they are applied when the actor
1277 | is recovered from the persisted events. See receiveRecover.
1278 |
1284 | Akka cluster Sharding 1285 | is useful when you need to distribute actors across several nodes in the cluster and want to 1286 | be able to interact with them using their logical identifier, but without having to care about 1287 | their physical location in the cluster, which might also change over time. 1288 |
1289 | 1290 |
1291 | To use the UserMetaData actor with cluster sharding we must be able to
1292 | extract the identifier from the messages and define a hash function for the identifier.
1293 | The hash function is used to group actors into shards, potentially running on different
1294 | nodes.
1295 | Those functions can be defined in the companion object like this:
1296 |
1322 | To make the UserMetaData actors sharded in the cluster we need to register it to the
1323 | ClusterSharding extension. This must be done on all nodes in the cluster,
1324 | but we do it slightly different on the nodes with frontend role compared to
1325 | backend because the actors must only be created on the backend nodes, and only
1326 | proxied from frontend nodes. Add the following in Actors.scala
1327 |
1356 | Our setup for the user meta data actor is now a little more complex than the Play bindings helper will allow, so
1357 | we need to create a new provider for it, and bind it to be an eager singleton. This can be done using the
1358 | javax.inject.Provider interface:
1359 |
1378 | And corresponding in Main.scala: 1379 |
1380 | 1381 | <1417 | When using Akka Persistence in a cluster we need a journal that is replicated or accessible from all nodes. 1418 | In this sample we will use a 1419 | shared LevelDB journal 1420 | running on the node with port 2551. This is a single point of failure, and should not be used in production. 1421 | A real system would use a distributed journal. 1422 |
1423 | 1424 |1425 | Add the configuration for the journal: 1426 |
1427 | 1428 | <
1444 | Add the file app/backend/SharedJournalHelper.scala
1445 | with the following content:
1446 |
1447 | <
1494 | Initialize the shared journal in Actors.scala
1495 |
1507 | And corresponding in Main.scala:
1508 |
1525 | We have now added scalability and resilience to the new user meta data feature. Try it again, by refreshing your browser 1526 | at http://localhost:9000. 1527 |
1528 | 1529 |1530 | The files of the shared journal are saved in the target directory and when you restart 1531 | the application the state is recovered. You can clean the state with: 1532 |
1533 | 1534 |
1535 | <path to activator dir>/activator clean
1536 |
1537 |
1538 | 1544 | We have developed ConductR; 1545 | a solution that is designed to ease the deployment and 1546 | management of reactive applications and services. ConductR handles failures gracefully, scales elastically and 1547 | embraces change in your Reactive system. Enough of the sales talk though, let's ship this app! 1548 |
1549 | 1550 |1551 | For any application that will target ConductR you first need to add the sbt plugin 1552 | sbt-conductr to your 1553 | project/plugins.sbt. You will also need to have installed 1554 | Docker and the 1555 | conductr-cli in order to run a local ConductR cluster within the 1556 | sandbox. 1557 |
1558 | 1559 |1560 | sbt-conductr actually brings in another sbt auto plugin named BundlePlugin and between them, you declare some 1561 | settings required for deploying your application. 1562 | You then load and run a "bundle" which is the 1563 | unit of deployment for ConductR. A bundle is a zip file with the hash of its contents encoded in its name. 1564 |
1565 | 1566 |1567 | The sandbox allows you to deploy your application on a local ConductR cluster for the 1568 | purposes of testing. The sandbox provides a visualization of the loaded and running bundles, application log 1569 | reporting within the comfort of sbt and proxying of your services. You can therefore get your application 1570 | perfectly prepared for deploying into more production-like environments that run ConductR (such as EC2). 1571 |
1572 | 1573 |
1574 | In terms of modifying ReactiveMaps very little is required. The essential requirement is that your application 1575 | must signal ConductR that is it ready upon having 1576 | started up. 1577 | Note the lines that obtain a BUNDLE_SYSTEM and BUNDLE_SYSTEM_VERSION. These environment variables are set in the context of ConductR. 1578 | In our case we use these information to specify the actor system name. The latter lines signal to ConductR that the app has started up. 1579 | Also note that this code is only required when running pure Akka applications such as the backend of ReactiveMaps. 1580 | For Play, no configuration or code change is necessary. Both the Akka and Play approaches utilize a library named 1581 | 1582 | conductr-bundle-lib. That's it! 1583 |
1584 | 1585 |1586 | The following describes something that can only be done from the activator command line. You'll need to jump 1587 | into a separate terminal window and just type "activator" to fire up the activator console. 1588 |
1589 | 1590 |1591 | To prepare ConductR's bundles, with activator type: 1592 |
1593 | 1594 |bundle:dist
1595 | backend-region:dist
1596 | backend-summary:dist
1597 |
1598 | 1599 | Three separate bundles will then be generated. Note the files produced. 1600 |
1601 | 1602 |1603 | We then start up the sandbox so that we can load and then run the bundles we've produced: 1604 |
1605 | 1606 |sandbox run
1607 |
1608 | 1609 | The sandbox will report the address that bundles will be found on. It will be something like 192.168.59.103. 1610 | Take note of this address. 1611 |
1612 | 1613 |1614 | You can then load a bundle and run it using the following commands (substitute your paths and hashes as appropriate): 1615 |
1616 | 1617 |conduct load /pathtotheproject/target/bundle/reactive-maps-v1-76d4af00c95dbed5eddc4a7a93a753bf7a85c14771745eea79239b52f61b90a0
1618 | conduct run reactive-maps-frontend
1619 |
1620 | 1621 | Do the above for each bundle that you produced. 1622 |
1623 | 1624 |1625 | Once your bundles are loaded and running then you can type "conduct info" in order to see the state of the world. 1626 | You should see something like this: 1627 |
1628 | 1629 |ID NAME #REP #STR #RUN
1630 | 76d4af0-03202dc reactive-maps-frontend 1 0 1
1631 | 76d4af0-088d9a0 reactive-maps-backend-region 1 0 1
1632 | 76d4af0-52e1a1d reactive-maps-backend-summary 1 0 1
1633 |
1634 | 1635 | You can then visit your application by navigating to the address that the sandbox provided, and the port 1636 | that is configured for ReactiveMaps e.g. http://192.168.59.103:9000. The ip address is your corresponding docker host ip address. 1637 |
1638 | 1639 |1640 | With a production cluster (not the sandbox, which is only single noded!), you'd now be able to scale up instances 1641 | of each of these bundles by simply typing 1642 | "conduct run reactive-maps-frontend --scale 3" to get 3 instances of the frontend. Note that there is no need 1643 | to specify the seed nodes manually. ConductR takes care of that for you. ConductR will also 1644 | keep ReactiveMaps running should a node fail, or if itself fails. Resiliency is at the core of ConductR. 1645 |
1646 |1652 | Essential characteristic of a reactive application: 1653 |
1654 |1662 | Read more about how to build reactive applications in the 1663 | Reactive Manifesto. 1664 |
1665 | 1666 |1667 | Find out more about the Typesafe Reactive Platform. 1668 |
1669 | 1670 |