├── project ├── build.properties └── plugins.sbt ├── version.sbt ├── src ├── main │ └── scala │ │ └── com │ │ └── teambytes │ │ └── awsleader │ │ ├── LeaderActionsHandler.scala │ │ ├── PeriodicTask.scala │ │ ├── LeaderActor.scala │ │ ├── AwsLeaderElection.scala │ │ ├── SchedulingLeaderActionsHandler.scala │ │ ├── EC2.scala │ │ ├── LeaderElectionActor.scala │ │ └── AkkaConfig.scala └── multi-jvm │ ├── scala │ └── com │ │ └── teambytes │ │ └── awsleader │ │ └── test │ │ ├── util │ │ ├── WrapperActor.scala │ │ ├── SeqRefsFactor.scala │ │ ├── STMultiNodeSpec.scala │ │ ├── ClusterMemberLeaves.scala │ │ ├── ClusterMemberUp.scala │ │ └── ClusterConfig.scala │ │ ├── LeaderActorSpec.scala │ │ └── LeaderElectionActorSpec.scala │ └── resources │ └── logback.xml ├── .gitignore ├── README.md └── LICENSE /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=0.13.6 2 | 3 | -------------------------------------------------------------------------------- /version.sbt: -------------------------------------------------------------------------------- 1 | version in ThisBuild := "1.0.1-SNAPSHOT" -------------------------------------------------------------------------------- /src/main/scala/com/teambytes/awsleader/LeaderActionsHandler.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader 2 | 3 | trait LeaderActionsHandler { 4 | 5 | def onIsLeader(): Unit 6 | 7 | def onIsNotLeader(): Unit 8 | 9 | } 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.class 2 | *.log 3 | 4 | # sbt specific 5 | .cache/ 6 | .history/ 7 | .lib/ 8 | dist/* 9 | target/ 10 | lib_managed/ 11 | src_managed/ 12 | project/boot/ 13 | project/plugins/project/ 14 | 15 | # Scala-IDE specific 16 | .scala_dependencies 17 | .worksheet 18 | -------------------------------------------------------------------------------- /src/multi-jvm/scala/com/teambytes/awsleader/test/util/WrapperActor.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader.test.util 2 | 3 | import akka.actor.{ActorRef, Actor, Props} 4 | 5 | object WrapperActor{ 6 | def props(target: ActorRef) = Props(classOf[WrapperActor], target) 7 | } 8 | 9 | class WrapperActor (target: ActorRef) extends Actor { 10 | def receive = { 11 | case x => target forward x 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/multi-jvm/scala/com/teambytes/awsleader/test/util/SeqRefsFactor.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader.test.util 2 | 3 | import akka.actor.Props 4 | 5 | class SeqRefsFactor(props: List[Props]) { 6 | private var index = 0 7 | 8 | def next(batchSize: Int):Props = { 9 | assert(batchSize == 10) 10 | next() 11 | } 12 | 13 | def next():Props = { 14 | val actorRef = props(index) 15 | index += 1 16 | actorRef 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /src/multi-jvm/scala/com/teambytes/awsleader/test/util/STMultiNodeSpec.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader.test.util 2 | 3 | import akka.remote.testkit.MultiNodeSpecCallbacks 4 | import org.scalatest.{BeforeAndAfterAll, WordSpecLike} 5 | 6 | trait STMultiNodeSpec extends MultiNodeSpecCallbacks with WordSpecLike with BeforeAndAfterAll { 7 | 8 | override def beforeAll() = multiNodeSpecBeforeAll() 9 | 10 | override def afterAll() = multiNodeSpecAfterAll() 11 | } 12 | -------------------------------------------------------------------------------- /src/multi-jvm/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | %level %logger{15} - %message%n%xException{5} 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /src/main/scala/com/teambytes/awsleader/PeriodicTask.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader 2 | 3 | /** 4 | * Runnable Task that can be run periodically, with the given rate in milli seconds 5 | */ 6 | trait PeriodicTask extends Runnable { 7 | 8 | /** 9 | * The rate at which this task is executed while the node is leader 10 | */ 11 | def periodMs: Long 12 | 13 | /** 14 | * The initial delay before this task is run for the first time after the node has become leader 15 | */ 16 | def initialDelayMs: Long = 0 17 | 18 | } 19 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | logLevel := Level.Warn 2 | 3 | resolvers += "Typesafe repository" at "http://repo.typesafe.com/typesafe/releases/" 4 | 5 | resolvers += Classpaths.sbtPluginReleases 6 | 7 | addSbtPlugin("org.scoverage" %% "sbt-scoverage" % "0.99.5.1") 8 | 9 | addSbtPlugin("com.github.mpeltonen" % "sbt-idea" % "1.6.0") 10 | 11 | // plugins for SBT release to Maven central 12 | addSbtPlugin("com.jsuereth" % "sbt-pgp" % "1.0.0") 13 | 14 | addSbtPlugin("com.github.gseitz" % "sbt-release" % "0.8") 15 | 16 | addSbtPlugin("com.typesafe.sbt" % "sbt-multi-jvm" % "0.3.8") 17 | -------------------------------------------------------------------------------- /src/multi-jvm/scala/com/teambytes/awsleader/test/util/ClusterMemberLeaves.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader.test.util 2 | 3 | import akka.actor.{Actor, Address} 4 | import akka.cluster.Cluster 5 | import akka.cluster.ClusterEvent.MemberRemoved 6 | 7 | class ClusterMemberLeaves(address: Address) extends Actor{ 8 | private val cluster = Cluster(context.system) 9 | 10 | override def preStart() = cluster.subscribe(self, classOf[MemberRemoved]) 11 | override def postStop() = cluster.unsubscribe(self) 12 | 13 | private var result = false 14 | 15 | def receive = { 16 | case MemberRemoved(member, _) if member.address == address => result = true 17 | case "HASLEFT" => sender ! result 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/main/scala/com/teambytes/awsleader/LeaderActor.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader 2 | 3 | import akka.actor.{Props, Actor, ActorLogging} 4 | 5 | class LeaderActor(handler: LeaderActionsHandler) extends Actor with ActorLogging { 6 | 7 | override def preStart() = { 8 | log.info("LeaderActor: Starting.") 9 | handler.onIsLeader() 10 | } 11 | 12 | override def postStop() = { 13 | handler.onIsNotLeader() 14 | log.info("LeaderActor: Stopped.") 15 | } 16 | 17 | override def receive: Receive = { 18 | case _ => 19 | // Do nothing, we only care about leader election 20 | } 21 | 22 | } 23 | 24 | object LeaderActor { 25 | def props(handler: LeaderActionsHandler) = Props(classOf[LeaderActor], handler) 26 | } 27 | -------------------------------------------------------------------------------- /src/multi-jvm/scala/com/teambytes/awsleader/test/util/ClusterMemberUp.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader.test.util 2 | 3 | import akka.actor.{Actor, Address} 4 | import akka.cluster.Cluster 5 | import akka.cluster.ClusterEvent.{ClusterDomainEvent, CurrentClusterState, MemberUp} 6 | 7 | class ClusterMemberUp(address: Address) extends Actor { 8 | private val cluster = Cluster(context.system) 9 | 10 | override def preStart() = cluster.subscribe(self, classOf[ClusterDomainEvent]) 11 | override def postStop() = cluster.unsubscribe(self) 12 | 13 | private var result = false 14 | 15 | def receive = { 16 | case state: CurrentClusterState => result = state.members.filterNot(_.status == MemberUp).map(_.address).contains(address) 17 | case MemberUp(member) if member.address == address => result = true 18 | case "ISUP" => sender ! result 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/multi-jvm/scala/com/teambytes/awsleader/test/util/ClusterConfig.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader.test.util 2 | 3 | trait ClusterConfig { 4 | 5 | def config(port: Int, seed: String) = 6 | s""" 7 | akka { 8 | actor { 9 | provider = "akka.cluster.ClusterActorRefProvider" 10 | } 11 | remote { 12 | netty.tcp { 13 | host = 'localhost' 14 | port = $port 15 | } 16 | } 17 | cluster { 18 | seed-nodes = [ 19 | "$seed" 20 | ] 21 | } 22 | }""" 23 | 24 | def configNoSeeds(port: Int) = 25 | s""" 26 | akka { 27 | actor { 28 | provider = "akka.cluster.ClusterActorRefProvider" 29 | } 30 | remote { 31 | netty.tcp { 32 | host = 'localhost' 33 | port = $port 34 | } 35 | } 36 | }""" 37 | 38 | } 39 | -------------------------------------------------------------------------------- /src/main/scala/com/teambytes/awsleader/AwsLeaderElection.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader 2 | 3 | import akka.actor.{ActorSystem, PoisonPill} 4 | import akka.contrib.pattern.ClusterSingletonManager 5 | import com.typesafe.config.Config 6 | import org.slf4j.LoggerFactory 7 | 8 | import scala.concurrent.ExecutionContext 9 | 10 | object AwsLeaderElection { 11 | 12 | def startLeaderElection(handler: LeaderActionsHandler)(implicit ec: ExecutionContext): Unit = 13 | new AwsLeaderElection(handler, AkkaConfig.apply())(ec) 14 | 15 | def startLeaderElection(handler: LeaderActionsHandler, defaults: Config)(implicit ec: ExecutionContext): Unit = 16 | new AwsLeaderElection(handler, AkkaConfig(defaults))(ec) 17 | 18 | } 19 | 20 | class AwsLeaderElection(handler: LeaderActionsHandler, akkaConfig: AkkaConfig)(implicit ec: ExecutionContext) { 21 | 22 | private val logger = LoggerFactory.getLogger(classOf[AwsLeaderElection]) 23 | 24 | logger.info("Loading leader election system...") 25 | logger.info(s"Seeds: ${akkaConfig.seeds}") 26 | 27 | private val clusterSystem = ActorSystem("aws-leader-election-cluster", akkaConfig.config) 28 | 29 | clusterSystem.actorOf( 30 | ClusterSingletonManager.props( 31 | singletonProps = LeaderElectionActor.props(handler, akkaConfig.seeds.size), 32 | singletonName = "aws-leader-elector", 33 | terminationMessage = PoisonPill, 34 | role = None 35 | ), 36 | name = "singleton" 37 | ) 38 | 39 | logger.info("Leader election started!") 40 | 41 | } -------------------------------------------------------------------------------- /src/main/scala/com/teambytes/awsleader/SchedulingLeaderActionsHandler.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader 2 | 3 | import java.util.concurrent.{TimeUnit, ScheduledFuture, ScheduledExecutorService} 4 | 5 | import org.slf4j.LoggerFactory 6 | 7 | trait SchedulingLeaderActionsHandler extends LeaderActionsHandler { 8 | 9 | private lazy val log = LoggerFactory.getLogger(getClass) 10 | 11 | def leaderExecutor: ScheduledExecutorService 12 | 13 | def leaderTasks: Iterable[PeriodicTask] 14 | 15 | val interruptTaskThreads = false 16 | 17 | // future around the periodic task, if we are leader & replicating 18 | @volatile private var scheduledFutures: Iterable[ScheduledFuture[_]] = Iterable.empty 19 | 20 | override def onIsLeader(): Unit = { 21 | // Only schedule tasks if we're not already the leader 22 | if (scheduledFutures.isEmpty) { 23 | // There was some disagreement on this. I think this is right. If we start to run slow, 24 | // we decrease the time between invocations to try to catch up. That feels better to me 25 | // than, "we start to get slow and still wait 1 second before running again". It should 26 | // speed up in that case. 27 | log.info("Starting leader tasks") 28 | scheduledFutures = leaderTasks.map(task => leaderExecutor.scheduleAtFixedRate(task, task.initialDelayMs, task.periodMs, TimeUnit.MILLISECONDS)) 29 | } 30 | } 31 | 32 | override def onIsNotLeader(): Unit = { 33 | log.info("Stopping leader tasks") 34 | scheduledFutures.map(_.cancel(interruptTaskThreads)) 35 | scheduledFutures = Iterable.empty 36 | } 37 | 38 | } 39 | -------------------------------------------------------------------------------- /src/multi-jvm/scala/com/teambytes/awsleader/test/LeaderActorSpec.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader.test 2 | 3 | import akka.testkit.{TestActorRef, ImplicitSender} 4 | import akka.actor._ 5 | import com.teambytes.awsleader.{LeaderActionsHandler, LeaderActor} 6 | import org.scalatest.mock.MockitoSugar 7 | import util._ 8 | import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec} 9 | import java.util.UUID 10 | import scala.concurrent.duration._ 11 | import akka.util.Timeout 12 | import scala.language.postfixOps 13 | import com.typesafe.config.ConfigFactory 14 | import org.mockito.Mockito._ 15 | 16 | class LeaderActorSpecMultiJvmNode1 extends LeaderActorSpec 17 | class LeaderActorSpecMultiJvmNode2 extends LeaderActorSpec 18 | class LeaderActorSpecMultiJvmNode3 extends LeaderActorSpec 19 | 20 | abstract class LeaderActorSpec extends MultiNodeSpec(LAClusterConfig) with STMultiNodeSpec with ImplicitSender with MockitoSugar { 21 | 22 | implicit val timeout = Timeout(5 second) 23 | 24 | def initialParticipants = roles.size 25 | 26 | def newHandler = mock[LeaderActionsHandler] 27 | 28 | "Leader actor" should { 29 | "call leader action handler after first sync" in { 30 | runOn(LAClusterConfig.node1) { 31 | enterBarrier("deployed") 32 | 33 | val handler = newHandler 34 | val leaderActorName = UUID.randomUUID().toString 35 | val actor = TestActorRef(Props(classOf[LeaderActor], handler), leaderActorName) 36 | 37 | expectNoMsg(1.second) 38 | 39 | verify(handler).onIsLeader 40 | 41 | actor ! PoisonPill 42 | 43 | verify(handler).onIsNotLeader 44 | } 45 | 46 | runOn(LAClusterConfig.node2, LAClusterConfig.node3) { enterBarrier("deployed") } 47 | enterBarrier("finished") 48 | } 49 | 50 | } 51 | 52 | } 53 | 54 | object LAClusterConfig extends MultiNodeConfig with ClusterConfig { 55 | val seed = s"akka.tcp://LeaderActorSpec@localhost:33456" 56 | val node1 = role("node1") 57 | val node2 = role("node2") 58 | val node3 = role("node3") 59 | 60 | nodeConfig(node1)(ConfigFactory.parseString(config(33456, seed))) 61 | nodeConfig(node2)(ConfigFactory.parseString(config(33457, seed))) 62 | nodeConfig(node3)(ConfigFactory.parseString(configNoSeeds(33458))) 63 | } 64 | -------------------------------------------------------------------------------- /src/main/scala/com/teambytes/awsleader/EC2.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader 2 | 3 | import com.amazonaws.services.ec2.AmazonEC2Client 4 | import java.net.URL 5 | import java.io.{InputStreamReader, BufferedReader} 6 | import com.amazonaws.services.ec2.model.{InstanceStateName, DescribeInstancesRequest, Instance} 7 | import scala.collection.JavaConversions._ 8 | import com.amazonaws.services.autoscaling.model.{DescribeAutoScalingGroupsRequest, DescribeAutoScalingInstancesRequest} 9 | import com.amazonaws.services.autoscaling.AmazonAutoScalingClient 10 | 11 | private[awsleader] class EC2(scaling: AmazonAutoScalingClient, ec2: AmazonEC2Client) { 12 | 13 | def siblingIps: List[String] = groupInstanceIds(groupName(instanceId)).map(instanceFromId).collect { 14 | case instance if isRunning(instance) => 15 | instance.getPrivateIpAddress 16 | } 17 | 18 | def currentIp = instanceFromId(instanceId).getPrivateIpAddress 19 | 20 | val isRunning: Instance => Boolean = _.getState.getName == InstanceStateName.Running.toString 21 | 22 | /** 23 | * To view instance metadata from within all EC2 instances we use the following URI. 24 | * See: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html 25 | */ 26 | private def instanceId = { 27 | val conn = new URL("http://169.254.169.254/latest/meta-data/instance-id").openConnection 28 | val in = new BufferedReader(new InputStreamReader(conn.getInputStream)) 29 | try in.readLine() finally in.close() 30 | } 31 | 32 | private def instanceFromId(id: String): Instance = { 33 | val result = ec2 describeInstances new DescribeInstancesRequest { 34 | setInstanceIds(id :: Nil) 35 | } 36 | result.getReservations.head.getInstances.head 37 | } 38 | 39 | private def groupName(instanceId: String) = { 40 | val result = scaling.describeAutoScalingInstances(new DescribeAutoScalingInstancesRequest { 41 | setInstanceIds(instanceId :: Nil) 42 | }) 43 | result.getAutoScalingInstances.head.getAutoScalingGroupName 44 | } 45 | 46 | private def groupInstanceIds(groupName: String) = { 47 | val result = scaling.describeAutoScalingGroups(new DescribeAutoScalingGroupsRequest { 48 | setAutoScalingGroupNames(groupName :: Nil) 49 | }) 50 | result.getAutoScalingGroups.head.getInstances.toList map (_.getInstanceId) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/main/scala/com/teambytes/awsleader/LeaderElectionActor.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader 2 | 3 | import akka.actor._ 4 | import akka.cluster.ClusterEvent.{MemberRemoved, MemberUp, CurrentClusterState, ClusterDomainEvent} 5 | import akka.cluster.{MemberStatus, Cluster, Member} 6 | import com.teambytes.awsleader.LeaderElectionActor.{Data, State} 7 | 8 | private[awsleader] class LeaderElectionActor(minMembers: Int, leaderProp: () => Props) extends Actor with FSM[State, Data] with ActorLogging { 9 | import LeaderElectionActor._ 10 | 11 | startWith(NoQuorum, Data(None, Set())) 12 | 13 | private val cluster = Cluster(context.system) 14 | override def preStart() = { 15 | log.info("LeaderElectionActor: Starting.") 16 | cluster.subscribe(self, classOf[ClusterDomainEvent]) 17 | } 18 | 19 | override def postStop() = { 20 | cluster.unsubscribe(self) 21 | log.info("LeaderElectionActor: Stopped.") 22 | } 23 | 24 | when(NoQuorum) { 25 | case e@Event(s:CurrentClusterState, d: Data) => stayOrGoToQuorum(d.copy(clusterMembers = s.members.filter(_.status == MemberStatus.Up))) 26 | case e@Event(MemberUp(member), d: Data) => stayOrGoToQuorum(d.copy(clusterMembers = d.clusterMembers + member)) 27 | case e@Event(MemberRemoved(member, previousStatus), d: Data) => stayOrGoToQuorum(d.copy(clusterMembers = d.clusterMembers - member)) 28 | } 29 | 30 | when(Quorum) { 31 | case e@Event(s:CurrentClusterState, d: Data) => stayOrGoToNoQuorum(d.copy(clusterMembers = s.members.filter(_.status == MemberStatus.Up))) 32 | case e@Event(MemberUp(member), d: Data) => stayOrGoToNoQuorum(d.copy(clusterMembers = d.clusterMembers + member)) 33 | case e@Event(MemberRemoved(member, previousStatus), d: Data) => stayOrGoToNoQuorum(d.copy(clusterMembers = d.clusterMembers - member)) 34 | } 35 | 36 | whenUnhandled { 37 | case e@Event(c:ClusterDomainEvent, d: Data) => stay using d 38 | } 39 | 40 | private def stayOrGoToQuorum(newData: Data) = 41 | if (newData.numberOfMembers() >= minMembers){ 42 | log.info("LeaderElectionActor: Quorum has been achieved. Current members: {}", newData.clusterMembers) 43 | goto(Quorum) using newData.copy(target = Some(context.actorOf(leaderProp(), "leader"))) 44 | } else { 45 | log.info("LeaderElectionActor: Quorum has not been reached. Current members: {}", newData.clusterMembers) 46 | stay using newData 47 | } 48 | 49 | private def stayOrGoToNoQuorum(newData: Data) = 50 | if (newData.numberOfMembers() < minMembers) { 51 | log.info("LeaderElectionActor: Quorum has been lost. Current members: {}", newData.clusterMembers) 52 | newData.target.foreach(_ ! PoisonPill) 53 | goto(NoQuorum) using newData.copy(target = None) 54 | } else { 55 | log.info("LeaderElectionActor: Still have quorum. Current members: {}", newData.clusterMembers) 56 | stay using newData 57 | } 58 | 59 | } 60 | 61 | object LeaderElectionActor { 62 | def props(handler: LeaderActionsHandler, minMembers: Int) = 63 | Props(classOf[LeaderElectionActor], minMembers, () => LeaderActor.props(handler)) 64 | 65 | // states 66 | private[awsleader] sealed trait State 67 | private[awsleader] case object NoQuorum extends State 68 | private[awsleader] case object Quorum extends State 69 | 70 | private[awsleader] case class Data(target: Option[ActorRef], clusterMembers: Set[Member]){ 71 | def numberOfMembers() = clusterMembers.size 72 | } 73 | 74 | } 75 | -------------------------------------------------------------------------------- /src/main/scala/com/teambytes/awsleader/AkkaConfig.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader 2 | 3 | import com.amazonaws.auth._ 4 | import com.amazonaws.services.autoscaling.AmazonAutoScalingClient 5 | import com.amazonaws.services.ec2.AmazonEC2Client 6 | import com.typesafe.config.{Config, ConfigValueFactory, ConfigFactory} 7 | import org.slf4j.LoggerFactory 8 | import scala.collection.JavaConverters._ 9 | import scala.util.Try 10 | 11 | private[awsleader] object AkkaConfig { 12 | 13 | def apply(defaults: Config = ConfigFactory.load()) = new AkkaConfig(defaults) 14 | 15 | /** 16 | * Create a credentials provider, based on configured access and secret keys 17 | * 18 | * If the keys are both set to "from-classpath", the provider will 19 | * look for a properties file in the classpath that contains properties 20 | * named access-key and secret-key. 21 | * 22 | * @param accessKey the configured accessKey, or the 'from-classpath' string 23 | * @param secretKey the configured secretKey, or the 'from-classpath' string 24 | * @return an AWSCredentialsProvider wrapping the configured keys 25 | */ 26 | def createAwsCredentialsProvider(accessKey: String, secretKey: String): AWSCredentialsProvider = { 27 | 28 | def isClasspath(key: String) = "from-classpath".equals(key) 29 | 30 | if (isClasspath(accessKey) && isClasspath(secretKey)) { 31 | new ClasspathPropertiesFileCredentialsProvider() 32 | } else if (isClasspath(accessKey) || isClasspath(secretKey)) { 33 | throw new RuntimeException("Both AWS credentials 'aws.credentials.access-key' and 'aws.credentials.secret-key' must be 'from-classpath' or neither.") 34 | } else new AWSCredentialsProvider { 35 | override def getCredentials: AWSCredentials = new BasicAWSCredentials(accessKey, secretKey) 36 | override def refresh(): Unit = {} 37 | } 38 | } 39 | 40 | } 41 | 42 | private[awsleader] class AkkaConfig(defaults: Config) { 43 | 44 | private lazy val logger = LoggerFactory.getLogger(getClass) 45 | 46 | private val local = Try(defaults.getBoolean("aws.leader.local")).getOrElse(false) 47 | private val defaultPort = defaults.getString("akka.port") 48 | 49 | private lazy val ec2 = { 50 | val credentials = AkkaConfig.createAwsCredentialsProvider( 51 | defaults.getString("aws.credentials.access-key"), 52 | defaults.getString("aws.credentials.secret-key") 53 | ) 54 | val scalingClient = new AmazonAutoScalingClient(credentials) 55 | val ec2Client = new AmazonEC2Client(credentials) 56 | logger.debug("Creating EC2 client") 57 | new EC2(scalingClient, ec2Client) 58 | } 59 | 60 | private val (host, siblings, port) = { 61 | if (local) { 62 | logger.info("Running with local configuration") 63 | val nodes = defaults.getStringList("akka.cluster.seed-nodes").asScala 64 | val localPort = defaults.getString("akka.remote.netty.tcp.port") 65 | ("localhost", nodes, localPort) 66 | } else { 67 | logger.info("Using EC2 autoscaling configuration") 68 | (ec2.currentIp, ec2.siblingIps, defaultPort) 69 | } 70 | } 71 | 72 | val seeds = siblings.map { ip => 73 | if(local) { 74 | logger.debug(s"Adding seed node: $ip") 75 | ip 76 | } else { 77 | val add = s"akka.tcp://aws-leader-election-cluster@$ip:$defaultPort" 78 | logger.debug(s"Adding seed node: $add") 79 | add 80 | } 81 | } 82 | 83 | private val overrideConfig = 84 | ConfigFactory.empty() 85 | .withValue("akka.remote.netty.tcp.hostname", ConfigValueFactory.fromAnyRef(host)) 86 | .withValue("akka.remote.netty.tcp.port", ConfigValueFactory.fromAnyRef(port)) 87 | .withValue("akka.cluster.seed-nodes", ConfigValueFactory.fromIterable(seeds.asJava)) 88 | .withValue("akka.cluster.min-nr-of-members", ConfigValueFactory.fromAnyRef(seeds.size)) 89 | 90 | val config = { 91 | if(local) defaults.withValue("akka.cluster.min-nr-of-members", ConfigValueFactory.fromAnyRef(seeds.size)) else overrideConfig.withFallback(defaults) 92 | } 93 | 94 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | aws-leader-election 2 | ========== 3 | 4 | A Scala & Akka-Cluster based PnP leader election library for use in AWS, built on [Akka](http://akka.io/) & using the akka-cluster cluster singleton to perform leader elected tasks. 5 | 6 | Designed for use in a distributed Play application, deployed in an auto-scaling cluster in Amazon EC2 (**in a single region only**). 7 | Uses AWS's auto-scaling java client to discover EC2 instances and creates an akka-cluster from the auto-scaling group members. 8 | 9 | Settings 10 | ------- 11 | Settings read from `application.conf` at the root of the classpath, or as JVM run parameters. 12 | - akka.port - The port to look for akka-cluster instances on. 13 | - aws.leader.local - `true` to run locally and bypass AWS discovery. 14 | - aws.credentials - `access-key` & `secret-key` to pass to the AWS client for EC2 instance discovery 15 | 16 | Maven Central Dependency 17 | ------- 18 | "com.teambytes" %% "aws-leader-election" % "1.0.0" 19 | 20 | Example Usage (Play Application) 21 | ------- 22 | 23 | ### Global.scala 24 | 25 | import com.teambytes.awsleader.AwsLeaderElection 26 | 27 | object Global extends play.api.GlobalSettings { 28 | 29 | implicit val ec = play.api.libs.concurrent.Execution.defaultContext 30 | 31 | override def onStart(app: play.api.Application) = { 32 | AwsLeaderElection.startLeaderElection(new TestLeaderElectionHandler(), app.configuration.underlying) 33 | } 34 | 35 | } 36 | 37 | ### TestLeaderElectionHandler.scala 38 | 39 | import java.util.concurrent.{Executors, SchedulingLeaderActionsHandler} 40 | 41 | import com.teambytes.awsleader.{PeriodicTask, SchedulingInflatableLeader} 42 | import play.core.NamedThreadFactory 43 | 44 | class TestLeaderElectionHandler extends SchedulingLeaderActionsHandler { 45 | 46 | val tasks = Set(new TestPeriodicJob()) 47 | 48 | override def leaderExecutor: ScheduledExecutorService = Executors.newScheduledThreadPool(1, new NamedThreadFactory("test-job")) 49 | 50 | override def leaderTasks: Iterable[PeriodicTask] = tasks 51 | } 52 | 53 | ### TestPeriodicJob.scala 54 | 55 | import com.teambytes.awsleader.PeriodicTask 56 | import play.api.Logger 57 | 58 | class TestPeriodicJob extends PeriodicTask { 59 | 60 | import scala.concurrent.duration._ 61 | 62 | override def periodMs: Long = 5.seconds.toMillis 63 | 64 | override def run(): Unit = { 65 | Logger.info( 66 | """ 67 | | 68 | | 69 | | 70 | | 71 | |Running test periodic job here and now!!!!!!! 72 | | 73 | | 74 | | 75 | | 76 | """.stripMargin) 77 | } 78 | } 79 | 80 | Potential problems to be aware of 81 | ------- 82 | This library uses the Akka cluster singleton pattern, which has several drawbacks, some of them are listed below: 83 | 84 | - the cluster singleton may quickly become a performance bottleneck, 85 | - you can not rely on the cluster singleton to be non-stop available - e.g. when node on which the singleton was running dies, it will take a few seconds for this to be noticed and the singleton be migrated to another node, 86 | - in the case of a network partition appearing in a Cluster that is using Automatic Downing (Automatic vs. Manual Downing), it may happen that the isolated clusters each decide to spin up their own singleton, meaning that there might be multiple singletons running in the system, yet the Clusters have no way of finding out about them (because of the network partition). 87 | 88 | Especially the last point is something you should be aware of - in general when using the Cluster Singleton pattern you should take care of downing nodes yourself and not rely on the timing based auto-down feature. 89 | 90 | Running Locally 91 | ------- 92 | 93 | When running locally, make sure to provide `-Daws.leader.local=true` 94 | 95 | License 96 | ------- 97 | 98 | *Apache 2.0* 99 | 100 | Links & kudos 101 | ------------- 102 | 103 | * [akka-ec2 - Example setup of an Akka cluster in an Amazon EC2 AutoScaling group](https://github.com/chrisloy/akka-ec2) 104 | -------------------------------------------------------------------------------- /src/multi-jvm/scala/com/teambytes/awsleader/test/LeaderElectionActorSpec.scala: -------------------------------------------------------------------------------- 1 | package com.teambytes.awsleader.test 2 | 3 | import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec} 4 | import akka.testkit.{TestActorRef, TestProbe, ImplicitSender} 5 | import java.util.UUID 6 | import akka.actor._ 7 | import com.teambytes.awsleader.LeaderElectionActor 8 | import com.teambytes.awsleader.test.util._ 9 | import scala.concurrent.Await 10 | import akka.actor.ActorIdentity 11 | import akka.actor.Identify 12 | import akka.util.Timeout 13 | import scala.concurrent.duration._ 14 | import scala.language.postfixOps 15 | import com.typesafe.config.ConfigFactory 16 | import akka.cluster.Cluster 17 | 18 | class LeaderElectionActorSpecMultiJvmNode1 extends LeaderElectionActorSpec 19 | class LeaderElectionActorSpecMultiJvmNode2 extends LeaderElectionActorSpec 20 | class LeaderElectionActorSpecMultiJvmNode3 extends LeaderElectionActorSpec 21 | 22 | /** 23 | * NOTE: A node in a cluster can only leave once. 24 | */ 25 | abstract class LeaderElectionActorSpec extends MultiNodeSpec(LEAClusterConfig) with STMultiNodeSpec with ImplicitSender { 26 | 27 | implicit val timeout = Timeout(5 second) 28 | 29 | def initialParticipants = roles.size 30 | 31 | def leaderProps(factor: SeqRefsFactor) = () => factor.next() 32 | 33 | "Leader Election actor" should { 34 | "not start any actor when quorum not reached" in { 35 | runOn(LEAClusterConfig.node1) { 36 | enterBarrier("deployed") 37 | val leader1 = TestProbe() 38 | val leaderFactory = new SeqRefsFactor(List(WrapperActor.props(leader1.ref))) 39 | val leaderElectionActorName = UUID.randomUUID().toString 40 | val actor = TestActorRef(Props(classOf[LeaderElectionActor], 10, leaderProps(leaderFactory)), leaderElectionActorName) 41 | vertifyActorDoesNotExistsOnPath(s"akka.tcp://LeaderElectionActorSpec@localhost:23456/user/$leaderElectionActorName/leader") 42 | expectNoMsg(1.second) 43 | actor ! PoisonPill 44 | } 45 | 46 | runOn(LEAClusterConfig.node2, LEAClusterConfig.node3) { enterBarrier("deployed")} 47 | enterBarrier("finished") 48 | } 49 | 50 | "start leader when quorum satisfied on initial CurrentClusterState" in { 51 | runOn(LEAClusterConfig.node1) { 52 | enterBarrier("deployed") 53 | 54 | val leader1 = TestProbe() 55 | val leaderFactory = new SeqRefsFactor(List(WrapperActor.props(leader1.ref))) 56 | 57 | val leaderElectionActorName = UUID.randomUUID().toString 58 | val actor = TestActorRef(Props(classOf[LeaderElectionActor], 1, leaderProps(leaderFactory)), leaderElectionActorName) 59 | 60 | vertifyActorExistsOnPath(s"akka.tcp://LeaderElectionActorSpec@localhost:23456/user/$leaderElectionActorName/leader") 61 | actor ! PoisonPill 62 | 63 | enterBarrier("clean") 64 | } 65 | 66 | runOn(LEAClusterConfig.node2, LEAClusterConfig.node3) { 67 | enterBarrier("deployed") 68 | enterBarrier("clean") 69 | } 70 | enterBarrier("finished") 71 | } 72 | 73 | "start leader when Quorum satisfied when additional node joins" in { 74 | runOn(LEAClusterConfig.node1) { 75 | enterBarrier("deployed") 76 | 77 | val leader1 = TestProbe() 78 | val leaderFactory = new SeqRefsFactor(List(WrapperActor.props(leader1.ref))) 79 | 80 | val leaderElectionActorName = UUID.randomUUID().toString 81 | val actor = TestActorRef(Props(classOf[LeaderElectionActor], 2, leaderProps(leaderFactory)), leaderElectionActorName) 82 | 83 | vertifyActorDoesNotExistsOnPath(s"akka.tcp://LeaderElectionActorSpec@localhost:23456/user/$leaderElectionActorName/leader") 84 | 85 | enterBarrier("addNodeTwo") 86 | enterBarrier("nodeTwoAdded") 87 | 88 | vertifyActorExistsOnPath(s"akka.tcp://LeaderElectionActorSpec@localhost:23456/user/$leaderElectionActorName/leader") 89 | actor ! PoisonPill 90 | } 91 | 92 | runOn(LEAClusterConfig.node2) { 93 | enterBarrier("deployed") 94 | enterBarrier("addNodeTwo") 95 | nodeUp(node(LEAClusterConfig.node2).address) 96 | enterBarrier("nodeTwoAdded") 97 | } 98 | 99 | runOn(LEAClusterConfig.node3) { 100 | enterBarrier("deployed") 101 | enterBarrier("addNodeTwo") 102 | enterBarrier("nodeTwoAdded") 103 | } 104 | 105 | enterBarrier("finished") 106 | } 107 | 108 | "terminate leader when Quorum is lost" in { 109 | runOn(LEAClusterConfig.node1) { 110 | enterBarrier("deployed") 111 | 112 | val leader1 = TestProbe() 113 | val leaderFactory = new SeqRefsFactor(List(WrapperActor.props(leader1.ref))) 114 | 115 | val leaderElectionActorName = UUID.randomUUID().toString 116 | val actor = TestActorRef(Props(classOf[LeaderElectionActor], 2, leaderProps(leaderFactory)), leaderElectionActorName) 117 | 118 | vertifyActorExistsOnPath(s"akka.tcp://LeaderElectionActorSpec@localhost:23456/user/$leaderElectionActorName/leader") 119 | 120 | enterBarrier("removeNodeTwo") 121 | enterBarrier("nodeTwoRemoved") 122 | 123 | vertifyActorDoesNotExistsOnPath(s"akka.tcp://LeaderElectionActorSpec@localhost:23456/user/$leaderElectionActorName/leader") 124 | 125 | actor ! PoisonPill 126 | 127 | enterBarrier("clean") 128 | } 129 | 130 | runOn(LEAClusterConfig.node2) { 131 | nodeUp(node(LEAClusterConfig.node2).address) 132 | enterBarrier("deployed") 133 | enterBarrier("removeNodeTwo") 134 | downNode(node(LEAClusterConfig.node2).address) 135 | Thread.sleep(2000) 136 | enterBarrier("nodeTwoRemoved") 137 | enterBarrier("clean") 138 | } 139 | 140 | runOn(LEAClusterConfig.node3) { 141 | enterBarrier("deployed") 142 | enterBarrier("removeNodeTwo") 143 | enterBarrier("nodeTwoRemoved") 144 | enterBarrier("clean") 145 | } 146 | 147 | enterBarrier("finished") 148 | } 149 | } 150 | 151 | 152 | private def nodeUp(address: Address): Unit = { 153 | import akka.pattern._ 154 | Cluster(system).join(node(LAClusterConfig.node1).address) 155 | val ref = system.actorOf(Props(classOf[ClusterMemberUp], address)) 156 | awaitCond(Await.result((ref ? "ISUP").mapTo[Boolean], 1 second)) 157 | ref ! PoisonPill 158 | } 159 | 160 | private def downNode(address: Address) = { 161 | import akka.pattern._ 162 | val ref = system.actorOf(Props(classOf[ClusterMemberLeaves], address)) 163 | Cluster(system).leave(address) 164 | awaitCond(Await.result((ref ? "HASLEFT").mapTo[Boolean], 1 second)) 165 | ref ! PoisonPill 166 | } 167 | 168 | private def vertifyActorExistsOnPath(path: String) { 169 | import akka.pattern._ 170 | import scala.concurrent.ExecutionContext.Implicits.global 171 | awaitCond(Await.result((system.actorSelection(path) ? Identify(None)).mapTo[ActorIdentity].map(_.ref.isDefined), 1 seconds), 2 seconds) 172 | } 173 | 174 | private def vertifyActorDoesNotExistsOnPath(path: String) { 175 | import akka.pattern._ 176 | import scala.concurrent.ExecutionContext.Implicits.global 177 | awaitCond(Await.result((system.actorSelection(path) ? Identify(None)).mapTo[ActorIdentity].map(_.ref == None), 1 seconds), 6 seconds) 178 | } 179 | 180 | } 181 | 182 | object LEAClusterConfig extends MultiNodeConfig with ClusterConfig { 183 | val seed = s"akka.tcp://LeaderElectionActorSpec@localhost:23456" 184 | val node1 = role("node1") 185 | val node2 = role("node2") 186 | val node3 = role("node3") 187 | 188 | nodeConfig(node1)(ConfigFactory.parseString(config(23456, seed))) 189 | nodeConfig(node2)(ConfigFactory.parseString(configNoSeeds(23457))) 190 | nodeConfig(node3)(ConfigFactory.parseString(configNoSeeds(23458))) 191 | } 192 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2014 Graham Rhodes 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | --------------------------------------------------------------------------------