├── .arcconfig ├── .gitignore ├── LICENSE ├── pom.xml ├── readme.textile └── src ├── main └── scala │ └── scalang │ ├── BigTuple.scala │ ├── BitString.scala │ ├── Cluster.scala │ ├── ETerm.scala │ ├── ErlangPeer.scala │ ├── Fun.scala │ ├── FunProcess.scala │ ├── ImproperList.scala │ ├── Node.scala │ ├── NodeConfig.scala │ ├── Pid.scala │ ├── Port.scala │ ├── Process.scala │ ├── ProcessContext.scala │ ├── Reference.scala │ ├── ReplyRegistry.scala │ ├── Service.scala │ ├── ServiceContext.scala │ ├── TypeFactory.scala │ ├── epmd │ ├── Epmd.scala │ ├── EpmdDecoder.scala │ ├── EpmdEncoder.scala │ ├── EpmdHandler.scala │ └── EpmdMessages.scala │ ├── node │ ├── CaseClassFactory.scala │ ├── ClientHandshakeHandler.scala │ ├── Clock.scala │ ├── ErlangHandler.scala │ ├── ErlangNodeClient.scala │ ├── ErlangNodeServer.scala │ ├── ExitListenable.scala │ ├── ExitListener.scala │ ├── FailureDetectionHandler.scala │ ├── HandshakeDecoder.scala │ ├── HandshakeEncoder.scala │ ├── HandshakeHandler.scala │ ├── HandshakeMessages.scala │ ├── Link.scala │ ├── LinkListenable.scala │ ├── LinkListener.scala │ ├── Mailbox.scala │ ├── Monitor.scala │ ├── MonitorListenable.scala │ ├── MonitorListener.scala │ ├── NetKernel.scala │ ├── NodeMessages.scala │ ├── PacketCounter.scala │ ├── ProcessAdapter.scala │ ├── ProcessLauncher.scala │ ├── ProcessLike.scala │ ├── ReferenceCounter.scala │ ├── ScalaTermDecoder.scala │ ├── ScalaTermEncoder.scala │ ├── SendListenable.scala │ ├── SendListener.scala │ ├── ServerHandshakeHandler.scala │ └── ServiceLauncher.scala │ └── util │ ├── BatchPoolExecutor.scala │ ├── ByteArray.scala │ ├── CamelToUnder.scala │ ├── StateMachine.scala │ ├── ThreadPoolFactory.scala │ └── UnderToCamel.scala └── test ├── resources ├── echo.escript ├── link_delivery.escript ├── monitor.escript └── receive_connection.escript └── scala └── scalang ├── EchoProcess.scala ├── FailProcess.scala ├── LinkProcess.scala ├── MonitorProcess.scala ├── NodeSpec.scala ├── ServiceSpec.scala ├── TestHelper.scala ├── epmd ├── EpmdDecoderSpec.scala ├── EpmdEncoderSpec.scala └── EpmdSpec.scala ├── node ├── CaseClasses.scala ├── ClientHandshakeHandlerSpec.scala ├── FailureDetectionHandlerSpec.scala ├── HandshakeDecoderSpec.scala ├── HandshakeEncoderSpec.scala ├── ScalaTermDecoderSpec.scala └── ServerHandshakeHandlerSpec.scala ├── terms └── ScalaTermDecoderSpec.scala └── util └── TwoWayCodecEmbedder.scala /.arcconfig: -------------------------------------------------------------------------------- 1 | { 2 | "project_id" : "scalang", 3 | "conduit_uri" : "http://phabricator.cid1.boundary.com/" 4 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | project/boot/* 2 | project/build/target/* 3 | target/* 4 | lib_managed/* 5 | .DS_Store 6 | *.iml 7 | *.iws 8 | *.ipr 9 | .idea/* 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2011 Boundary, Inc 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4.0.0 4 | com.boundary 5 | scalang 6 | scalang-scala_2.9.1 7 | jar 8 | 0.31-SNAPSHOT 9 | 10 | 2.9.1 11 | UTF-8 12 | 13 | 14 | 15 | 2.2.1 16 | 17 | 18 | 19 | 20 | com.boundary 21 | logula_slf4j_2.9.1 22 | 2.1.3-p01 23 | 24 | 25 | org.slf4j 26 | slf4j-simple 27 | 1.7.6 28 | test 29 | 30 | 31 | com.boundary 32 | overlock-scala_${scala.version} 33 | 0.8.6 34 | 35 | 36 | org.jetlang 37 | jetlang 38 | 0.2.12 39 | 40 | 41 | org.jboss.netty 42 | netty 43 | 3.2.10.Final 44 | 45 | 46 | junit 47 | junit 48 | 4.11 49 | test 50 | 51 | 52 | org.scala-tools.testing 53 | specs_${scala.version} 54 | 1.6.9 55 | test 56 | 57 | 58 | 59 | 60 | BoundaryPublicRepo 61 | Boundary Public Repo 62 | http://maven.boundary.com/artifactory/repo/ 63 | 64 | 65 | 66 | src/main/scala 67 | src/test/scala 68 | 69 | 70 | 71 | org.scala-tools 72 | maven-scala-plugin 73 | 2.15.2 74 | 75 | 76 | 77 | compile 78 | testCompile 79 | 80 | 81 | 82 | 83 | 84 | -unchecked 85 | -deprecation 86 | 87 | UTF-8 88 | ${scala.version} 89 | 90 | 91 | 92 | 93 | org.apache.maven.plugins 94 | maven-compiler-plugin 95 | 3.1 96 | 97 | 1.7 98 | 1.7 99 | 100 | 101 | 102 | 103 | org.apache.maven.plugins 104 | maven-surefire-plugin 105 | 2.15 106 | 107 | false 108 | false 109 | -Xmx1024m 110 | 111 | **/*Spec.java 112 | 113 | 114 | **/*Test.java 115 | 116 | 117 | 118 | 119 | org.apache.maven.plugins 120 | maven-source-plugin 121 | 2.2.1 122 | 123 | 124 | attach-sources 125 | package 126 | 127 | jar-no-fork 128 | 129 | 130 | 131 | 132 | 133 | org.codehaus.mojo 134 | cobertura-maven-plugin 135 | 2.6 136 | 137 | 138 | 139 | xml 140 | html 141 | 142 | 143 | 144 | 145 | clean 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | org.codehaus.mojo 155 | cobertura-maven-plugin 156 | 2.6 157 | 158 | 159 | 160 | 161 | 162 | 163 | maven.boundary.com 164 | maven.boundary.com-releases 165 | http://maven.boundary.com/artifactory/external 166 | 167 | 168 | maven.boundary.com 169 | maven.boundary.com-snapshots 170 | http://maven.boundary.com/artifactory/external 171 | 172 | 173 | 174 | 175 | scm:git:git@github.com:boundary/scalang.git 176 | 177 | 178 | 179 | -------------------------------------------------------------------------------- /readme.textile: -------------------------------------------------------------------------------- 1 | h1. Introduction 2 | 3 | Scalang is a message passing and actor library that allows Scala and Erlang applications to easily communicate. Scalang includes a full implementation of the Erlang distributed node protocol. It provides an actor oriented API that can be used to interact with Erlang nodes in an idiomatic, OTP compliant way. Scalang is built on Netty for its networking layer and Jetlang for its actor implementation. 4 | 5 | h1. Installation 6 | 7 | From Maven: 8 | 9 |
 10 | 
 11 |   
 12 |     
 13 |       Boundary Public Repo
 14 |       http://maven.boundary.com/artifactory/repo
 15 |     
 16 |   
 17 | 
 18 |   
 19 |     
 20 |       com.boundary
 21 |       scalang-scala_2.9.1
 22 |       0.18
 23 |     
 24 |   
 25 | 
 26 | 
27 | 28 | From SBT: 29 | 30 |
 31 | 
 32 |   val boundaryPublic = "Boundary Public Repo" at "http://maven.boundary.com/artifactory/repo"
 33 | 
 34 |   val scalang = "com.boundary" %% "scalang" % "0.18"
 35 | 
 36 | 
37 | 38 | Scalang currently supports Scala 2.9.1. Scalang also requires "epmd":http://www.erlang.org/doc/man/epmd.html in order to run. 39 | 40 | h1. Usage 41 | 42 | h2. Starting 43 | 44 | The main entry point for Scalang's API is the node class. An instance of the Node class is a self contained representation of an Erlang Node, distinct in the set of nodes participating in a distributed system. Multiple nodes may be run within the same JVM. Start a node with the following invocation: 45 | 46 |
 47 | 
 48 |     val node = Node("scala@localhost.local", "cookie")
 49 | 
 50 | 
51 | 52 | Starting a Scalang node like this will register it with the local "epmd":http://erlang.org/doc/man/epmd.html instance, which must be running on the local host. Scalang will then be available to receive and make connections to other Erlang nodes. In this case, its node name would be scala@localhost.local and its magic cookie is @cookie@. Any Erlang or Scalang node which shares this magic cookie can now connect to this node and send messages using the node name. 53 | 54 | h2. Processes 55 | 56 | Scalang shares Erlang's concept of a process, a lightweight actor that is capable of sending messages to other processes, either local or remote. You can define your own processes by subclassing the Process class. The following code defines and then spawns a process: 57 | 58 |
 59 | 
 60 |   class MyProcess(ctx : ProcessContext) extends Process(ctx) {
 61 |     override def onMessage(msg : Any) {
 62 |       log.info("received %s", msg)
 63 |     }
 64 |   }
 65 | 
 66 |   val pid = node.spawn[MyProcess]("my_process")
 67 | 
 68 |   //send to the pid
 69 |   node.send(pid, "hey there")
 70 | 
 71 |   //send to the regname
 72 |   node.send("my_process", "you wanna party?")
 73 | 
 74 | 
75 | 76 | h2. Message Passing 77 | 78 | Message passing / receiving is the main means by which processes may interact with the outside world. Processes receive messages via the message handler method onMessage. Only one message may be handled by a process at a time unless that process has been spawned with the reentrant option. 79 | 80 | The following shows a simple echo server and client that demonstrate message sending. 81 | 82 |
 83 | 
 84 |   class EchoServer(ctx : ProcessContext) extends Process(ctx) {
 85 |     override def onMessage(msg : Any) = msg match {
 86 |       case (pid : Pid, request : Any) =>
 87 |         pid ! request
 88 |       case m =>
 89 |         log.error("sorry I don't understand %s.", m)
 90 |     }
 91 |   }
 92 | 
 93 | 
 94 |   val server = node.spawn[EchoServer]("echo_server")
 95 | 
 96 |   val client = node.spawn { mbox =>
 97 |     mbox.send(server, (client,'derp))
 98 |     val received = mbox.receive
 99 |     println("received " + received)
100 |   }
101 | 
102 | 
103 | 104 | Messages can also easily be passed from a remote node. Scalang supports Erlang's convention of addressing messages to the tuple of a registered name and a node name. 105 | 106 |
107 | 
108 |   val remoteNode = Node("remote", cookie)
109 |   val client = node.spawn { mbox =>
110 |     mbox.send(('echo_server, Symbol("test@localhost")), "heyo!")
111 |   }
112 | 
113 | 
114 | 115 | The above code will send a message to the process registered as "echo_server" on the node named "test@localhost". 116 | 117 | h2. Error Handling 118 | 119 | Scalang implements the Erlang concept of links. A link is a bidirectional relationship between two processes. If one of the processes exits the link will break and the other process will receive an exit notification. The default behavior of a process during exit notification is for the receiving process to exit with the same error message that was delivered with the link breakage. Creating a link between two processes requires both pids. 120 | 121 | Processes that must implement custom behavior may override the trapExit method. 122 | 123 |
124 | 
125 |   class ExitHandler(ctx : ProcessContext) extends Process(ctx) {
126 |     override def onMessage(msg : Any) = msg match {
127 |       case _ => log.info("derp %s", msg)
128 |     }
129 | 
130 |     override def trapExit(from : Pid, msg : Any) {
131 |       log.warning("got exit notification from %s reason %s", from, msg)
132 |     }
133 |   }
134 | 
135 | 
136 | 137 | h2. Serialization 138 | 139 | Scalang messages are serialized into "Erlang's external term format":http://www.erlang.org/doc/apps/erts/erl_ext_dist.html. Serialization automatically happens when messages are either sent from or received by a Scalang process. For the most part Scalang provides a 1-1 mapping of Erlangs terms onto Scala types. The type mappings are illustrated below. 140 | 141 | | From Erlang | To Scala | 142 | | Small Integer | Int | 143 | | Integer | Int | 144 | | Float | Double | 145 | | Boolean | Boolean | 146 | | Atom | Symbol | 147 | | Reference | "Reference":https://github.com/boundary/scalang/blob/master/src/main/scala/scalang/Reference.scala | 148 | | Port | "Port":https://github.com/boundary/scalang/blob/master/src/main/scala/scalang/Port.scala | 149 | | Pid | "Pid":https://github.com/boundary/scalang/blob/master/src/main/scala/scalang/Pid.scala | 150 | | Small Tuple | Tuple | 151 | | Large Tuple | "BigTuple":https://github.com/boundary/scalang/blob/master/src/main/scala/scalang/BigTuple.scala | 152 | | String | String | 153 | | List | List | 154 | | Binary | "ByteBuffer":http://download.oracle.com/javase/6/docs/api/java/nio/ByteBuffer.html | 155 | | Small Bignum | Long | 156 | | Large Bignum | BigInt | 157 | | Fun | "Fun":https://github.com/boundary/scalang/blob/master/src/main/scala/scalang/Fun.scala | 158 | | Bistring | "Bitstring":https://github.com/boundary/scalang/blob/master/src/main/scala/scalang/BitString.scala | 159 | 160 | | From Scala | To Erlang | 161 | | Byte | Small Integer | 162 | | Int | Integer | 163 | | Long | Small Bignum | 164 | | Double | Float | 165 | | Symbol | Atom | 166 | | Reference | Reference | 167 | | Port | Port | 168 | | Pid | Pid | 169 | | Fun | Fun | 170 | | String | String | 171 | | List | List | 172 | | BigInteger | Large Bignum | 173 | | Array[Byte] | Binary | 174 | | ByteBuffer | Binary | 175 | | BitString | Bitstring | 176 | | Tuple | Tuple | 177 | | BigTuple | Tuple | 178 | 179 | h2. Rich Type Mappings 180 | 181 | Sometimes the built-in type mappings in Scalang are not sufficient for an application's message format. Scalang provides the TypeFactory trait for client code to provide custom decoding behavior. A TypeFactory is invoked when Scalang comes across a term that looks like an erlang record: a tuple where the first element is an atom. The createType method is called with the first tuple element as the name and the arity of the tuple. 182 | 183 |
184 | 
185 |   object StructFactory extends TypeFactory {
186 |     def createType(name : Symbol, arity : Int, reader : TermReader) : Option[Seq[Any]] = {
187 |       try {
188 |         reader.mark
189 |         (name,arity) match {
190 |           case ('struct,2) => Some(readMap(reader))
191 |           case _ => None
192 |         }
193 |       }
194 |     }
195 | 
196 |     protected def readSeq(reader : TermReader) : Map[Symbol,Any] = {
197 |       val proplist = reader.readAs[List[Symbol,Any]]
198 |       proplist.toMap
199 |     }
200 |   }
201 | 
202 |   val node = Node("test", cookie, NodeConfig(
203 |     typeFactory = StructFactory))
204 | 
205 | 
206 | 207 | The above code will spawn a Scalang node that uses the StructFactory singleton to decode into a map any arity 2 tuples that begin with the atom @struct@. Anything else will get decoded with the normal type mappings. 208 | 209 | A more complex example is the "CaseClassFactory":https://github.com/boundary/scalang/blob/master/src/main/scala/scalang/node/CaseClassFactory.scala. It will attempt to decode Erlang records into Scala "case classes":http://www.scala-lang.org/node/107 reflectively. 210 | 211 | h2. Services 212 | 213 | Most modern Erlang applications are built using the OTP framework, and in particular the "gen_server":http://www.erlang.org/doc/design_principles/gen_server_concepts.html. In order to more effectively interface with gen_server based processes, Scalang has a special kind of process known as a service. Services respond to casts and calls like a gen_server and allow you to send casts and calls to gen_servers running in an Erlang VM. 214 | 215 |
216 | 
217 |   case class EchoServiceArgs(name : String)
218 |   class EchoService(ctx : ServiceContext[EchoServiceArgs]) extends Service(ctx) {
219 |     val EchoServiceArgs(name) = ctx.args
220 | 
221 |     override def handleCall(tag: (Pid, Reference), request: Any): Any = { 
222 |       name + " " + request
223 |     }   
224 | 
225 |     override def handleCast(request : Any) {
226 |       log.info("Can't echo a cast. %s", request)
227 |     }   
228 | 
229 |     override def handleInfo(request : Any) {
230 |       log.info("A wild message appeared. %s, request")
231 |     }   
232 |   }
233 | 
234 |   val node = Node("test", cookie)
235 |   val pid = node.spawnService[EchoService, EchoServiceArgs]("echo", EchoServiceArgs("test_echo"))
236 | 
237 | 
238 | 239 | This will spawn a new process with the registered name "echo" and it will be initialized with the argument "test_echo". The handleCall method will automatically send its return value to the caller. Casts are meant to be one directional, therefore the return value of handleCast is discarded. The handleInfo method is invoked when a message shows up without the appropriate call or cast semantics. 240 | 241 | h1. Roadmap 242 | 243 | h2. Upgrade Node Protocol 244 | 245 | Scalang uses the same version of Erlang's node protocol as JInterface. This means that for some operations Scalang nodes are not treated as full fledged members of the cluster. For instance, features like monitors and atom caches are not currently supported. Additionally, Scalang nodes will appear as "hidden nodes":http://www.erlang.org/doc/reference_manual/distributed.html#id82462. This is disadvantageous for Scalang's goal of transparent interoperability. 246 | 247 | h2. Supervision Trees 248 | 249 | Scalang needs its own implementation of Erlang OTP's supervision tree. All of the primitives are in place to support supervision trees currently, so an implementation of the actual supervisor process is all that's needed. 250 | 251 | h2. Pre-emptable Actors 252 | 253 | Scalang uses Jetlang's thread pool based actor implementation. There is currently no API for pre-empting these types of Jetlang actors. Therefore either Jetlang needs to be patched to allow this or another actor backend needs to be chosen. 254 | -------------------------------------------------------------------------------- /src/main/scala/scalang/BigTuple.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | class BigTuple(val elements : Seq[Any]) extends Product { 19 | 20 | override def productElement(n : Int) = elements(n) 21 | 22 | override def productArity = elements.size 23 | 24 | override def canEqual(other : Any) : Boolean = { 25 | other match { 26 | case o : BigTuple => o.elements == elements 27 | case _ => false 28 | } 29 | } 30 | 31 | override def equals(other : Any) = canEqual(other) 32 | } 33 | -------------------------------------------------------------------------------- /src/main/scala/scalang/BitString.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | import java.nio._ 19 | 20 | case class BitString(buffer : ByteBuffer, bits : Int) 21 | -------------------------------------------------------------------------------- /src/main/scala/scalang/Cluster.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | class Cluster(ctx : ProcessContext) extends Process(ctx) { 19 | @volatile var nodes = Set[Symbol]() 20 | 21 | def onMessage(msg : Any) = msg match { 22 | case ('cluster, pid : Pid, ref : Reference) => 23 | pid ! ('cluster, ref, nodes.toList) 24 | case ('nodeup, node : Symbol) => 25 | nodes += node 26 | case ('nodedown, node : Symbol) => 27 | nodes -= node 28 | } 29 | 30 | } 31 | -------------------------------------------------------------------------------- /src/main/scala/scalang/ETerm.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | trait ETerm { 19 | 20 | } 21 | -------------------------------------------------------------------------------- /src/main/scala/scalang/ErlangPeer.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | case class ErlangPeer(node : String) 19 | -------------------------------------------------------------------------------- /src/main/scala/scalang/Fun.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | case class Fun(pid : Pid, module : Symbol, index : Int, uniq : Int, vars : Seq[Any]) 19 | 20 | case class NewFun(pid : Pid, module : Symbol, oldIndex : Int, oldUniq : Int, arity : Int, index : Int, uniq : Seq[Byte], vars : Seq[Any]) 21 | 22 | case class ExportFun(module : Symbol, function : Symbol, arity : Int) 23 | -------------------------------------------------------------------------------- /src/main/scala/scalang/FunProcess.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | import scalang.node._ 19 | import java.util.concurrent.{LinkedBlockingQueue, TimeUnit} 20 | 21 | class FunProcess(fun : Mailbox => Unit, ctx : ProcessContext) extends ProcessAdapter { 22 | val queue = new LinkedBlockingQueue[Any] 23 | val referenceCounter = ctx.referenceCounter 24 | val self = ctx.pid 25 | val fiber = ctx.fiber 26 | val parentPid = self 27 | val parentRef = referenceCounter 28 | val parent = this 29 | val mbox = new Mailbox { 30 | def self = parentPid 31 | def referenceCounter = parentRef 32 | 33 | def handleMessage(msg : Any) { 34 | queue.offer(msg) 35 | } 36 | 37 | def receive : Any = { 38 | queue.take 39 | } 40 | 41 | def receive(timeout : Long) : Option[Any] = { 42 | Option(queue.poll(timeout, TimeUnit.MILLISECONDS)) 43 | } 44 | 45 | def send(pid : Pid, msg : Any) = parent.notifySend(pid,msg) 46 | 47 | def send(name : Symbol, msg : Any) = parent.notifySend(name,msg) 48 | 49 | def send(dest : (Symbol,Symbol), from : Pid, msg : Any) = parent.notifySend(dest,from,msg) 50 | 51 | def exit(reason : Any) = parent.exit(reason) 52 | 53 | def link(to : Pid) = parent.link(to) 54 | } 55 | 56 | 57 | def start { 58 | fiber.execute(new Runnable { 59 | override def run { 60 | fun(mbox) 61 | exit('normal) 62 | } 63 | }) 64 | } 65 | 66 | override def handleMessage(msg : Any) { 67 | queue.offer(msg) 68 | } 69 | 70 | override def handleExit(from : Pid, reason : Any) { 71 | queue.offer(('EXIT, from, reason)) 72 | } 73 | 74 | override def handleMonitorExit(monitored : Any, ref : Reference, reason : Any) { 75 | queue.offer(('DOWN, ref, 'process, monitored, reason)) 76 | } 77 | 78 | def cleanup = fiber.dispose 79 | } 80 | -------------------------------------------------------------------------------- /src/main/scala/scalang/ImproperList.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | import scala.collection.immutable.LinearSeq 19 | import scala.collection.mutable.StringBuilder 20 | 21 | case class ImproperList(under : List[Any], lastTail : Any) extends LinearSeq[Any] { 22 | override def isEmpty = under.isEmpty 23 | override def head = under.head 24 | override def tail = under.tail 25 | 26 | override def apply(idx : Int) = under(idx) 27 | 28 | override def length = under.length 29 | 30 | override def toString : String = { 31 | val buffer = new StringBuilder 32 | buffer ++= "ImproperList(" 33 | buffer ++= under.toString 34 | buffer ++= ", " 35 | buffer ++= lastTail.toString 36 | buffer ++= ")" 37 | buffer.toString 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/main/scala/scalang/NodeConfig.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | import util._ 19 | import org.jboss.netty.buffer.ChannelBuffer 20 | 21 | case class NodeConfig( 22 | poolFactory : ThreadPoolFactory = new DefaultThreadPoolFactory, 23 | clusterListener : Option[ClusterListener] = None, 24 | typeFactory : TypeFactory = NoneTypeFactory, 25 | typeEncoder: TypeEncoder = NoneTypeEncoder, 26 | typeDecoder : TypeDecoder = NoneTypeDecoder, 27 | tickTime : Int = 60) 28 | 29 | object NoneTypeFactory extends TypeFactory { 30 | def createType(name : Symbol, arity : Int, reader : TermReader) = None 31 | } 32 | 33 | object NoneTypeEncoder extends TypeEncoder { 34 | def unapply(obj: Any) = { None } 35 | def encode(obj: Any, buffer: ChannelBuffer) {} 36 | } 37 | 38 | object NoneTypeDecoder extends TypeDecoder { 39 | def unapply(typeOrdinal : Int) : Option[Int] = { None } 40 | def decode(typeOrdinal : Int, buffer : ChannelBuffer) : Any = {} 41 | } 42 | -------------------------------------------------------------------------------- /src/main/scala/scalang/Pid.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | case class Pid(node : Symbol, id : Int, serial : Int, creation : Int) { 19 | 20 | def toErlangString : String = { 21 | "<" + id + "." + serial + "." + creation + ">" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/main/scala/scalang/Port.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | case class Port(node : Symbol, id : Int, creation : Int) 19 | -------------------------------------------------------------------------------- /src/main/scala/scalang/Process.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | import scalang.node.ProcessLike 19 | import scala._ 20 | import com.boundary.logula.Logging 21 | 22 | abstract class Process(ctx : ProcessContext) extends ProcessLike with Logging { 23 | val self = ctx.pid 24 | val adapter = ctx.adapter 25 | val referenceCounter = ctx.referenceCounter 26 | val replyRegistry = ctx.replyRegistry 27 | val node = ctx.node 28 | 29 | implicit def pid2sendable(pid : Pid) = new PidSend(pid,this) 30 | implicit def sym2sendable(to : Symbol) = new SymSend(to,this) 31 | implicit def dest2sendable(dest : (Symbol,Symbol)) = new DestSend(dest,self,this) 32 | 33 | def sendEvery(pid : Pid, msg : Any, delay : Long) = adapter.sendEvery(pid, msg, delay) 34 | def sendEvery(name : Symbol, msg : Any, delay : Long) = adapter.sendEvery(name, msg, delay) 35 | def sendEvery(name : (Symbol,Symbol), msg : Any, delay : Long) = adapter.sendEvery(name, msg, delay) 36 | 37 | def sendAfter(pid : Pid, msg : Any, delay : Long) = adapter.sendAfter(pid, msg, delay) 38 | def sendAfter(name : Symbol, msg : Any, delay : Long) = adapter.sendAfter(name, msg, delay) 39 | def sendAfter(dest : (Symbol,Symbol), msg : Any, delay : Long) = adapter.sendAfter(dest, msg, delay) 40 | 41 | override def handleMessage(msg : Any) { 42 | onMessage(msg) 43 | } 44 | 45 | override def handleExit(from : Pid, msg : Any) { 46 | trapExit(from, msg) 47 | } 48 | 49 | /** 50 | * Subclasses should override this method with their own message handlers 51 | */ 52 | def onMessage(msg : Any) 53 | 54 | /** 55 | * Subclasses wishing to trap exits should override this method. 56 | */ 57 | def trapExit(from : Pid, msg : Any) { 58 | exit(msg) 59 | } 60 | 61 | def handleMonitorExit(monitored : Any, ref : Reference, reason : Any) { 62 | trapMonitorExit(monitored, ref, reason) 63 | } 64 | 65 | /** 66 | * Subclasses wishing to trap monitor exits should override this method. 67 | */ 68 | def trapMonitorExit(monitored : Any, ref : Reference, reason : Any) { 69 | } 70 | 71 | } 72 | 73 | class PidSend(to : Pid, proc : Process) { 74 | def !(msg : Any) { 75 | proc.adapter.notifySend(to,msg) 76 | } 77 | } 78 | 79 | class SymSend(to : Symbol, proc : Process) { 80 | def !(msg : Any) { 81 | proc.adapter.notifySend(to, msg) 82 | } 83 | } 84 | 85 | class DestSend(to : (Symbol,Symbol), from : Pid, proc : Process) { 86 | def !(msg : Any) { 87 | proc.adapter.notifySend(to, from, msg) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/main/scala/scalang/ProcessContext.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | import scalang.node._ 19 | import org.jetlang.fibers._ 20 | 21 | trait ProcessContext { 22 | def pid : Pid 23 | def node : ErlangNode 24 | def referenceCounter : ReferenceCounter 25 | def fiber : Fiber 26 | def replyRegistry : ReplyRegistry 27 | var adapter : ProcessAdapter 28 | } 29 | -------------------------------------------------------------------------------- /src/main/scala/scalang/Reference.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | case class Reference(node : Symbol, id : Seq[Int], creation : Int) 19 | -------------------------------------------------------------------------------- /src/main/scala/scalang/ReplyRegistry.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | import java.util.concurrent.BlockingQueue 19 | import org.cliffc.high_scale_lib.NonBlockingHashMap 20 | 21 | trait ReplyRegistry { 22 | val replyWaiters = new NonBlockingHashMap[(Pid,Reference),BlockingQueue[Any]] 23 | 24 | /** 25 | * Returns true if the reply delivery succeeded. False otherwise. 26 | */ 27 | def tryDeliverReply(pid : Pid, msg : Any) = msg match { 28 | case (tag : Reference, reply : Any) => 29 | val waiter = replyWaiters.remove((pid,tag)) 30 | if (waiter == null) { 31 | false 32 | } else { 33 | waiter.offer(reply) 34 | true 35 | } 36 | case _ => false 37 | } 38 | 39 | def removeReplyQueue(pid : Pid, ref : Reference) { 40 | replyWaiters.remove((pid, ref)) 41 | } 42 | 43 | def registerReplyQueue(pid : Pid, tag : Reference, queue : BlockingQueue[Any]) { 44 | replyWaiters.put((pid,tag), queue) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/main/scala/scalang/Service.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | import node._ 19 | import org.jetlang._ 20 | import channels._ 21 | import core._ 22 | import fibers._ 23 | import java.util.concurrent.{LinkedBlockingQueue, TimeUnit} 24 | 25 | /** 26 | * Service is a class that provides a responder in the shape expected 27 | * by gen_lb. A service may define handlers for either cast, call, or both. 28 | */ 29 | abstract class Service[A <: Product](ctx : ServiceContext[A]) extends Process(ctx) { 30 | /** 31 | * Init callback for any context that the service might need. 32 | */ 33 | /* def init(args : Any) { 34 | // noop 35 | } 36 | */ 37 | /** 38 | * Handle a call style of message which will expect a response. 39 | */ 40 | def handleCall(tag : (Pid,Reference), request : Any) : Any = { 41 | throw new Exception(getClass + " did not define a call handler.") 42 | } 43 | 44 | /** 45 | * Handle a cast style of message which will receive no response. 46 | */ 47 | def handleCast(request : Any) { 48 | throw new Exception(getClass + " did not define a cast handler.") 49 | } 50 | 51 | /** 52 | * Handle any messages that do not fit the call or cast pattern. 53 | */ 54 | def handleInfo(request : Any) { 55 | throw new Exception(getClass + " did not define an info handler.") 56 | } 57 | 58 | override def onMessage(msg : Any) = msg match { 59 | case ('ping, from : Pid, ref : Reference) => 60 | from ! ('pong, ref) 61 | case (Symbol("$gen_call"), (from : Pid, ref : Reference), request : Any) => 62 | handleCall((from, ref), request) match { 63 | case ('reply, reply) => 64 | from ! (ref, reply) 65 | case 'noreply => 66 | Unit 67 | case reply => 68 | from ! (ref, reply) 69 | } 70 | case (Symbol("$gen_cast"), request : Any) => 71 | handleCast(request) 72 | case _ => 73 | handleInfo(msg) 74 | } 75 | 76 | def call(to : Pid, msg : Any) : Any = node.call(self,to,msg) 77 | def call(to : Pid, msg : Any, timeout : Long) : Any = node.call(self,to,msg,timeout) 78 | def call(to : Symbol, msg : Any) : Any = node.call(self,to,msg) 79 | def call(to : Symbol, msg : Any, timeout : Long) : Any = node.call(self,to,msg,timeout) 80 | def call(to : (Symbol,Symbol), msg : Any) : Any = node.call(self,to,msg) 81 | def call(to : (Symbol,Symbol), msg : Any, timeout : Long) : Any = node.call(self,to,msg,timeout) 82 | 83 | def cast(to : Pid, msg : Any) = node.cast(to,msg) 84 | def cast(to : Symbol, msg : Any) = node.cast(to,msg) 85 | def cast(to : (Symbol,Symbol), msg : Any) = node.cast(to,msg) 86 | 87 | } 88 | -------------------------------------------------------------------------------- /src/main/scala/scalang/ServiceContext.scala: -------------------------------------------------------------------------------- 1 | package scalang 2 | 3 | trait ServiceContext[A <: Product] extends ProcessContext { 4 | def args : A 5 | } 6 | 7 | case class NoArgs() 8 | 9 | object NoArgs extends NoArgs 10 | -------------------------------------------------------------------------------- /src/main/scala/scalang/TypeFactory.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang 17 | 18 | import node._ 19 | import org.jboss.netty.buffer.ChannelBuffer 20 | 21 | trait TypeFactory { 22 | def createType(name : Symbol, arity : Int, reader : TermReader) : Option[Any] 23 | } 24 | 25 | trait TypeEncoder { 26 | def unapply(obj : Any) : Option[Any] 27 | 28 | def encode(obj : Any, buffer : ChannelBuffer) 29 | } 30 | 31 | trait TypeDecoder { 32 | def unapply(typeOrdinal : Int) : Option[Int] 33 | 34 | def decode(typeOrdinal : Int, buffer : ChannelBuffer) : Any 35 | } 36 | 37 | class TermReader(val buffer : ChannelBuffer, decoder : ScalaTermDecoder) { 38 | var m : Int = 0 39 | 40 | def mark : TermReader = { 41 | m = buffer.readerIndex 42 | this 43 | } 44 | 45 | def reset : TermReader = { 46 | buffer.readerIndex(m) 47 | this 48 | } 49 | 50 | def readTerm : Any = { 51 | decoder.readTerm(buffer) 52 | } 53 | 54 | def readAs[A] : A = { 55 | readTerm.asInstanceOf[A] 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/main/scala/scalang/epmd/Epmd.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.epmd 17 | 18 | import java.net._ 19 | import org.jboss.{netty => netty} 20 | import netty.bootstrap._ 21 | import netty.channel._ 22 | import socket.nio._ 23 | import overlock.threadpool._ 24 | import java.util.concurrent.Callable 25 | 26 | object Epmd { 27 | val defaultPort = 4369 28 | lazy val bossPool = ThreadPool.instrumentedFixed("scalang.epmd", "boss", 20) 29 | lazy val workerPool = ThreadPool.instrumentedFixed("scalang.epmd", "worker", 20) 30 | 31 | def apply(host : String) : Epmd = { 32 | val port = Option(System.getenv("ERL_EPMD_PORT")).map(_.toInt).getOrElse(defaultPort) 33 | new Epmd(host, port) 34 | } 35 | 36 | def apply(host : String, port : Int) : Epmd = { 37 | new Epmd(host, port) 38 | } 39 | } 40 | 41 | class Epmd(val host : String, val port : Int) { 42 | val bootstrap = new ClientBootstrap( 43 | new NioClientSocketChannelFactory( 44 | Epmd.bossPool, 45 | Epmd.workerPool)) 46 | 47 | val handler = new EpmdHandler 48 | 49 | bootstrap.setPipelineFactory(new ChannelPipelineFactory { 50 | def getPipeline : ChannelPipeline = { 51 | Channels.pipeline( 52 | new EpmdEncoder, 53 | new EpmdDecoder, 54 | handler) 55 | } 56 | }) 57 | 58 | val connectFuture = bootstrap.connect(new InetSocketAddress(host, port)) 59 | val channel = connectFuture.awaitUninterruptibly.getChannel 60 | if(!connectFuture.isSuccess) { 61 | throw connectFuture.getCause 62 | } 63 | 64 | def close { 65 | channel.close 66 | } 67 | 68 | def alive(portNo : Int, nodeName : String) : Option[Int] = { 69 | var response: Callable[Any] = null 70 | handler.synchronized { 71 | response = handler.response 72 | channel.write(AliveReq(portNo,nodeName)) 73 | } 74 | val aliveResponse = response.call.asInstanceOf[AliveResp] 75 | if (aliveResponse.result == 0) { 76 | Some(aliveResponse.creation) 77 | } else { 78 | sys.error("Epmd response was: " + aliveResponse.result) 79 | None 80 | } 81 | } 82 | 83 | def lookupPort(nodeName : String) : Option[Int] = { 84 | var response: Callable[Any] = null 85 | handler.synchronized { 86 | response = handler.response 87 | channel.write(PortPleaseReq(nodeName)) 88 | } 89 | response.call match { 90 | case PortPleaseResp(portNo, _) => Some(portNo) 91 | case PortPleaseError(_) => None 92 | } 93 | } 94 | } 95 | 96 | -------------------------------------------------------------------------------- /src/main/scala/scalang/epmd/EpmdDecoder.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.epmd 17 | 18 | import org.jboss.{netty => netty} 19 | import netty.buffer._ 20 | import netty.channel._ 21 | import netty.handler.codec.frame._ 22 | 23 | class EpmdDecoder extends FrameDecoder { 24 | override def decode(ctx : ChannelHandlerContext, channel : Channel, buffer : ChannelBuffer) : Object = { 25 | if (buffer.readableBytes < 1) return null 26 | val header = buffer.getByte(0) 27 | header match { 28 | case 121 => //decode alive2 resp 29 | if (buffer.readableBytes < 4) return null 30 | val result = buffer.getByte(1) 31 | val creation = buffer.getUnsignedShort(2) 32 | buffer.skipBytes(4) 33 | AliveResp(result, creation) 34 | case 119 => //decode port2 resp 35 | val result = buffer.getByte(1) 36 | if (result > 0) { 37 | buffer.skipBytes(2) 38 | PortPleaseError(result) 39 | } else { 40 | if (buffer.readableBytes < 12) return null 41 | val nlen = buffer.getUnsignedShort(10) 42 | if (buffer.readableBytes < (14 + nlen)) return null 43 | val elen = buffer.getUnsignedShort(12 + nlen) 44 | if (buffer.readableBytes < (14 + nlen + elen)) return null 45 | val portNo = buffer.getUnsignedShort(2) 46 | val bytes = new Array[Byte](nlen) 47 | buffer.getBytes(12, bytes) 48 | val nodeName = new String(bytes) 49 | buffer.skipBytes(14+nlen+elen) 50 | PortPleaseResp(portNo, nodeName) 51 | } 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/main/scala/scalang/epmd/EpmdEncoder.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.epmd 17 | 18 | import org.jboss.{netty => netty} 19 | import netty.buffer._ 20 | import netty.channel._ 21 | import netty.handler.codec.oneone._ 22 | 23 | object EpmdConst { 24 | val ntypeR6 = 110 25 | val ntypeR4Erlang = 109 26 | val ntypeR4Hidden = 104 27 | } 28 | 29 | class EpmdEncoder extends OneToOneEncoder { 30 | import EpmdConst._ 31 | 32 | override def encode(ctx : ChannelHandlerContext, channel : Channel, msg : Object) : Object = { 33 | val bout = new ChannelBufferOutputStream(ChannelBuffers.dynamicBuffer(24, ctx.getChannel.getConfig.getBufferFactory)) 34 | bout.writeShort(0) //length placeholder 35 | msg match { 36 | case AliveReq(portNo, nodeName) => 37 | bout.writeByte(120) 38 | bout.writeShort(portNo) 39 | bout.writeByte(ntypeR6) //node type 40 | bout.writeByte(0) //protocol 41 | bout.writeShort(5) // highest version 42 | bout.writeShort(5) // lowest version 43 | bout.writeShort(nodeName.size) // name length 44 | bout.writeBytes(nodeName) // name 45 | bout.writeShort(0) //extra len 46 | case PortPleaseReq(nodeName) => 47 | bout.writeByte(122) 48 | bout.writeBytes(nodeName) 49 | } 50 | val encoded = bout.buffer 51 | encoded.setShort(0, encoded.writerIndex - 2) 52 | encoded 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/main/scala/scalang/epmd/EpmdHandler.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.epmd 17 | 18 | import java.util.concurrent.{ConcurrentLinkedQueue, Callable, CountDownLatch, atomic => atomic} 19 | import atomic._ 20 | import org.jboss.{netty => netty} 21 | import netty.channel._ 22 | import java.util.concurrent.TimeUnit 23 | import com.boundary.logula.Logging 24 | 25 | class EpmdHandler extends SimpleChannelUpstreamHandler with Logging { 26 | val queue = new ConcurrentLinkedQueue[EpmdResponse] 27 | 28 | def response : Callable[Any] = { 29 | val call = new EpmdResponse 30 | queue.add(call) 31 | call 32 | } 33 | 34 | override def channelClosed(ctx : ChannelHandlerContext, e : ChannelStateEvent) { 35 | log.debug("Oh snap channel closed.") 36 | } 37 | 38 | override def channelDisconnected(ctx : ChannelHandlerContext, e : ChannelStateEvent) { 39 | log.debug("Uh oh disconnect.") 40 | } 41 | 42 | override def exceptionCaught(ctx : ChannelHandlerContext, e : ExceptionEvent) { 43 | var rsp = queue.poll 44 | while (rsp != null) { 45 | rsp.setError(e.getCause) 46 | rsp = queue.poll 47 | } 48 | } 49 | 50 | override def messageReceived(ctx : ChannelHandlerContext, e : MessageEvent) { 51 | val response = e.getMessage 52 | val rsp = queue.poll() 53 | if (rsp != null) { 54 | rsp.set(response) 55 | } 56 | else { 57 | log.warn("Unable to find EpmdResponse for: %s", response) 58 | } 59 | } 60 | 61 | class EpmdResponse extends Callable[Any] { 62 | val response = new AtomicReference[Any] 63 | val error = new AtomicReference[Throwable] 64 | val lock = new CountDownLatch(1) 65 | 66 | def setError(t : Throwable) { 67 | error.set(t) 68 | lock.countDown() 69 | 70 | } 71 | 72 | def set(v : Any) { 73 | response.set(v) 74 | lock.countDown() 75 | } 76 | 77 | def call : Any = { 78 | if (lock.await(5000, TimeUnit.MILLISECONDS)) { 79 | if (error.get != null) { 80 | throw new Exception("EPMD Registration failed.", error.get) 81 | } else { 82 | response.get 83 | } 84 | } else { 85 | throw new Exception("EPMD Registration timed out.") 86 | } 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/main/scala/scalang/epmd/EpmdMessages.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.epmd 17 | 18 | case class AliveReq(portNo : Int, nodeName : String) 19 | 20 | case class AliveResp(result : Int, creation : Int) 21 | 22 | case class PortPleaseReq(nodeName : String) 23 | 24 | case class PortPleaseError(result : Int) 25 | 26 | case class PortPleaseResp(portNo : Int, nodeName : String) 27 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/CaseClassFactory.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | import overlock.atomicmap._ 20 | import scalang.util.UnderToCamel._ 21 | 22 | class CaseClassFactory(searchPrefixes : Seq[String], typeMappings : Map[String,Class[_]]) extends TypeFactory { 23 | //it's important to cache the negative side as well 24 | val classCache = AtomicMap.atomicNBHM[String,Option[Class[_]]] 25 | 26 | def createType(name : Symbol, arity : Int, reader : TermReader) : Option[Any] = { 27 | classCache.getOrElseUpdate(name.name, lookupClass(name.name)).flatMap { clazz => 28 | tryCreateInstance(reader, clazz, arity) 29 | } 30 | } 31 | 32 | /** 33 | * Arity is the length of the tuple after the header 34 | */ 35 | protected def tryCreateInstance(reader : TermReader, clazz : Class[_], arity : Int) : Option[Any] = { 36 | val candidates = for (constructor <- clazz.getConstructors if constructor.getParameterTypes.length == arity-1) yield {constructor} 37 | if (candidates.isEmpty) return None 38 | reader.mark 39 | val parameters = for (i <- (1 until arity)) yield { reader.readTerm } 40 | val classes = parameters.map { case param : AnyRef => 41 | param.getClass 42 | } 43 | candidates.find { constructor => 44 | val params = constructor.getParameterTypes 45 | boxEquals(classes.toList, params.toList) 46 | }.flatMap { constructor => 47 | try { 48 | Some(constructor.newInstance(parameters.asInstanceOf[Seq[Object]] : _*)) 49 | } catch { 50 | case _ => None 51 | } 52 | }.orElse { 53 | reader.reset 54 | None 55 | } 56 | } 57 | 58 | protected def boxEquals(a : List[Class[_]], b : List[Class[_]]) : Boolean = { 59 | def scrubPrimitive(a : Class[_]) : Class[_] = a match { 60 | case java.lang.Byte.TYPE => classOf[java.lang.Byte] 61 | case java.lang.Short.TYPE => classOf[java.lang.Short] 62 | case java.lang.Integer.TYPE => classOf[java.lang.Integer] 63 | case java.lang.Long.TYPE => classOf[java.lang.Long] 64 | case java.lang.Boolean.TYPE => classOf[java.lang.Boolean] 65 | case java.lang.Character.TYPE => classOf[java.lang.Character] 66 | case java.lang.Float.TYPE => classOf[java.lang.Float] 67 | case java.lang.Double.TYPE => classOf[java.lang.Double] 68 | case x => x 69 | } 70 | 71 | (a,b) match { 72 | case (classA :: tailA, classB :: tailB) => 73 | if (! (scrubPrimitive(classA) == scrubPrimitive(classB))) { 74 | return false 75 | } 76 | boxEquals(tailA, tailB) 77 | case (Nil, Nil) => true 78 | case _ => false 79 | } 80 | 81 | } 82 | 83 | 84 | protected def lookupClass(name : String) : Option[Class[_]] = { 85 | typeMappings.get(name) match { 86 | case Some(c) => Some(c) 87 | case None => 88 | for (prefix <- searchPrefixes) { 89 | try { 90 | return Some(Class.forName(prefix + "." + name.underToCamel)) 91 | } catch { 92 | case e : Exception => 93 | e.printStackTrace 94 | Unit 95 | } 96 | } 97 | None 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ClientHandshakeHandler.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import java.net._ 19 | import java.util.concurrent._ 20 | import atomic._ 21 | import org.jboss.{netty => netty} 22 | import netty.bootstrap._ 23 | import netty.channel._ 24 | import netty.handler.codec.frame._ 25 | import scalang._ 26 | import util._ 27 | import java.util.ArrayDeque 28 | import scala.annotation._ 29 | import scala.math._ 30 | import scala.collection.JavaConversions._ 31 | import java.security.{SecureRandom,MessageDigest} 32 | 33 | class ClientHandshakeHandler(name : Symbol, cookie : String, posthandshake : (Symbol,ChannelPipeline) => Unit) extends HandshakeHandler(posthandshake) { 34 | states( 35 | state('disconnected, { 36 | case ConnectedMessage => 37 | sendName 38 | 'connected 39 | }), 40 | 41 | state('connected, { 42 | case StatusMessage("ok") => 43 | 'status_ok 44 | case StatusMessage("ok_simultaneous") => 45 | 'status_ok 46 | case StatusMessage("alive") => //means the other node sees another conn from us. reconnecting too quick. 47 | sendStatus("true") 48 | 'status_ok 49 | case StatusMessage(status) => 50 | throw new ErlangAuthException("Bad status message: " + status) 51 | }), 52 | 53 | state('status_ok, { 54 | case ChallengeMessage(version, flags, c, name) => 55 | peer = Symbol(name) 56 | sendChallengeReply(c) 57 | 'reply_sent 58 | }), 59 | 60 | state('reply_sent, { 61 | case ChallengeAckMessage(digest) => 62 | verifyChallengeAck(digest) 63 | drainQueue 64 | handshakeSucceeded 65 | 'verified 66 | }), 67 | 68 | state('verified, { 69 | case _ => 'verified 70 | })) 71 | 72 | protected def sendStatus(st : String) { 73 | val channel = ctx.getChannel 74 | val future = Channels.future(channel) 75 | val msg = StatusMessage(st) 76 | ctx.sendDownstream(new DownstreamMessageEvent(channel,future,msg,null)) 77 | } 78 | 79 | protected def sendName { 80 | val channel = ctx.getChannel 81 | val future = Channels.future(channel) 82 | val msg = NameMessage(5, DistributionFlags.default, name.name) 83 | ctx.sendDownstream(new DownstreamMessageEvent(channel,future,msg,null)) 84 | } 85 | 86 | protected def sendChallengeReply(c : Int) { 87 | val channel = ctx.getChannel 88 | val future = Channels.future(channel) 89 | this.peerChallenge = c 90 | this.challenge = random.nextInt 91 | val d = digest(peerChallenge, cookie) 92 | val msg = ChallengeReplyMessage(challenge, d) 93 | ctx.sendDownstream(new DownstreamMessageEvent(channel,future,msg,null)) 94 | } 95 | 96 | protected def verifyChallengeAck(peerDigest : Array[Byte]) { 97 | val ourDigest = digest(challenge, cookie) 98 | if (!digestEquals(ourDigest, peerDigest)) { 99 | throw new ErlangAuthException("Peer authentication error.") 100 | } 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/Clock.scala: -------------------------------------------------------------------------------- 1 | package scalang.node 2 | 3 | trait Clock { 4 | def currentTimeMillis : Long 5 | } 6 | 7 | class SystemClock extends Clock { 8 | override def currentTimeMillis = System.currentTimeMillis 9 | } 10 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ErlangHandler.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import org.jboss.netty._ 19 | import channel._ 20 | import scalang._ 21 | import com.boundary.logula.Logging 22 | 23 | class ErlangHandler( 24 | node : ErlangNode, 25 | afterHandshake : Channel => Unit = { _ => Unit }) extends SimpleChannelUpstreamHandler with Logging { 26 | 27 | @volatile var peer : Symbol = null 28 | 29 | override def exceptionCaught(ctx : ChannelHandlerContext, e : ExceptionEvent) { 30 | log.error(e.getCause, "error caught in erlang handler %s", peer) 31 | ctx.getChannel.close 32 | } 33 | 34 | override def messageReceived(ctx : ChannelHandlerContext, e : MessageEvent) { 35 | val msg = e.getMessage 36 | log.debug("handler message %s", msg) 37 | msg match { 38 | case Tick => 39 | ctx.getChannel.write(Tock) //channel heartbeat for erlang 40 | case HandshakeFailed(name) => 41 | //not much we can do here? 42 | ctx.getChannel.close 43 | case HandshakeSucceeded(name, channel) => 44 | peer = name 45 | node.registerConnection(name, channel) 46 | afterHandshake(channel) 47 | case LinkMessage(from, to) => 48 | log.debug("received link request from %s.", from) 49 | node.linkWithoutNotify(from, to, e.getChannel) 50 | case SendMessage(to, msg) => 51 | node.handleSend(to, msg) 52 | case ExitMessage(from, to, reason) => 53 | node.remoteBreak(Link(from, to), reason) 54 | case Exit2Message(from, to, reason) => 55 | node.remoteBreak(Link(from, to), reason) 56 | case UnlinkMessage(from, to) => 57 | node.unlink(from, to) 58 | case RegSend(from, to, msg) => 59 | node.handleSend(to, msg) 60 | case MonitorMessage(monitoring, monitored, ref) => 61 | node.monitorWithoutNotify(monitoring, monitored, ref, e.getChannel) 62 | case DemonitorMessage(monitoring, monitored, ref) => 63 | node.demonitor(monitoring, monitored, ref) 64 | case MonitorExitMessage(monitored, monitoring, ref, reason) => 65 | node.remoteMonitorExit(Monitor(monitoring, monitored, ref), reason) 66 | } 67 | } 68 | 69 | override def channelDisconnected(ctx : ChannelHandlerContext, e : ChannelStateEvent) { 70 | log.info("channel disconnected %s %s. peer: %s", ctx, e, peer) 71 | if (peer != null) { 72 | node.disconnected(peer, e.getChannel) 73 | } 74 | } 75 | 76 | } 77 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ErlangNodeClient.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import java.net.InetSocketAddress 19 | import org.jboss.{netty => netty} 20 | import scalang._ 21 | import netty.channel._ 22 | import netty.bootstrap._ 23 | import netty.handler.codec.frame._ 24 | import socket.nio.NioClientSocketChannelFactory 25 | import com.boundary.logula.Logging 26 | 27 | 28 | class ErlangNodeClient( 29 | node : ErlangNode, 30 | peer : Symbol, 31 | host : String, 32 | port : Int, 33 | control : Option[Any], 34 | typeFactory : TypeFactory, 35 | typeEncoder : TypeEncoder, 36 | typeDecoder : TypeDecoder, 37 | afterHandshake : Channel => Unit) extends Logging 38 | { 39 | val bootstrap = new ClientBootstrap( 40 | new NioClientSocketChannelFactory( 41 | node.poolFactory.createBossPool, 42 | node.poolFactory.createWorkerPool)) 43 | bootstrap.setPipelineFactory(new ChannelPipelineFactory { 44 | def getPipeline : ChannelPipeline = { 45 | val pipeline = Channels.pipeline 46 | 47 | val handshakeDecoder = new HandshakeDecoder 48 | handshakeDecoder.mode = 'challenge //first message on the client side is challenge, not name 49 | pipeline.addLast("executionHandler", node.executionHandler) 50 | pipeline.addLast("handshakeFramer", new LengthFieldBasedFrameDecoder(Short.MaxValue, 0, 2, 0, 2)) 51 | pipeline.addLast("handshakeDecoder", handshakeDecoder) 52 | pipeline.addLast("handshakeEncoder", new HandshakeEncoder) 53 | pipeline.addLast("handshakeHandler", new ClientHandshakeHandler(node.name, node.cookie, node.posthandshake)) 54 | pipeline.addLast("erlangFramer", new LengthFieldBasedFrameDecoder(Int.MaxValue, 0, 4, 0, 4)) 55 | pipeline.addLast("encoderFramer", new LengthFieldPrepender(4)) 56 | pipeline.addLast("erlangDecoder", new ScalaTermDecoder(peer, typeFactory, typeDecoder)) 57 | pipeline.addLast("erlangEncoder", new ScalaTermEncoder(peer, typeEncoder)) 58 | pipeline.addLast("erlangHandler", new ErlangHandler(node, afterHandshake)) 59 | 60 | pipeline 61 | } 62 | }) 63 | 64 | val future = bootstrap.connect(new InetSocketAddress(host, port)) 65 | val channel = future.getChannel 66 | future.addListener(new ChannelFutureListener { 67 | def operationComplete(f : ChannelFuture) { 68 | if (f.isSuccess) { 69 | for (c <- control) { 70 | channel.write(c) 71 | } 72 | } else { 73 | node.disconnected(peer, channel) 74 | } 75 | } 76 | }) 77 | } 78 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ErlangNodeServer.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import java.net.InetSocketAddress 19 | import org.jboss.{netty => netty} 20 | import scalang._ 21 | import netty.channel._ 22 | import netty.bootstrap._ 23 | import netty.handler.codec.frame._ 24 | import socket.nio.NioServerSocketChannelFactory 25 | import com.boundary.logula.Logging 26 | 27 | class ErlangNodeServer(node : ErlangNode, typeFactory : TypeFactory, typeEncoder: TypeEncoder, 28 | typeDecoder : TypeDecoder) extends Logging { 29 | val bootstrap = new ServerBootstrap( 30 | new NioServerSocketChannelFactory( 31 | node.poolFactory.createBossPool, 32 | node.poolFactory.createWorkerPool)) 33 | bootstrap.setPipelineFactory(new ChannelPipelineFactory { 34 | def getPipeline : ChannelPipeline = { 35 | val pipeline = Channels.pipeline 36 | pipeline.addLast("executionHandler", node.executionHandler) 37 | pipeline.addLast("handshakeFramer", new LengthFieldBasedFrameDecoder(Short.MaxValue, 0, 2, 0, 2)) 38 | pipeline.addLast("handshakeDecoder", new HandshakeDecoder) 39 | pipeline.addLast("handshakeEncoder", new HandshakeEncoder) 40 | pipeline.addLast("handshakeHandler", new ServerHandshakeHandler(node.name, node.cookie, node.posthandshake)) 41 | pipeline.addLast("erlangFramer", new LengthFieldBasedFrameDecoder(Int.MaxValue, 0, 4, 0, 4)) 42 | pipeline.addLast("encoderFramer", new LengthFieldPrepender(4)) 43 | pipeline.addLast("erlangDecoder", new ScalaTermDecoder('server, typeFactory, typeDecoder)) 44 | pipeline.addLast("erlangEncoder", new ScalaTermEncoder('server, typeEncoder)) 45 | pipeline.addLast("erlangHandler", new ErlangHandler(node)) 46 | 47 | pipeline 48 | } 49 | }) 50 | 51 | val channel = bootstrap.bind(new InetSocketAddress(0)) 52 | def port = channel.getLocalAddress.asInstanceOf[InetSocketAddress].getPort 53 | } 54 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ExitListenable.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | 20 | trait ExitListenable { 21 | @volatile var exitListeners : List[ExitListener] = Nil 22 | 23 | def addExitListener(listener : ExitListener) { 24 | exitListeners = listener :: exitListeners 25 | } 26 | 27 | def notifyExit(from : Pid, reason : Any) { 28 | for (l <- exitListeners) { 29 | l.handleExit(from, reason) 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ExitListener.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | 20 | /** 21 | Exit notifications are intended for internal book-keeping tasks. They are not meant for link breakages, 22 | which require the pid of both ends. 23 | */ 24 | 25 | trait ExitListener { 26 | def handleExit(from : Pid, reason : Any) 27 | } 28 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/FailureDetectionHandler.scala: -------------------------------------------------------------------------------- 1 | package scalang.node 2 | 3 | import org.jboss.{netty => netty} 4 | import netty.channel._ 5 | import netty.util._ 6 | import netty.handler.timeout._ 7 | import java.util.concurrent._ 8 | import com.boundary.logula.Logging 9 | 10 | class FailureDetectionHandler(node : Symbol, clock : Clock, tickTime : Int, timer : Timer) extends SimpleChannelHandler with Logging { 11 | @volatile var nextTick : Timeout = null 12 | @volatile var lastTimeReceived = 0l 13 | @volatile var ctx : ChannelHandlerContext = null 14 | val exception = new ReadTimeoutException 15 | 16 | override def channelOpen(ctx : ChannelHandlerContext, e : ChannelStateEvent) { 17 | this.ctx = ctx 18 | lastTimeReceived = clock.currentTimeMillis 19 | scheduleTick 20 | } 21 | 22 | override def channelClosed(ctx : ChannelHandlerContext, e : ChannelStateEvent) { 23 | if (nextTick != null) nextTick.cancel 24 | } 25 | 26 | override def messageReceived(ctx : ChannelHandlerContext, e : MessageEvent) { 27 | lastTimeReceived = clock.currentTimeMillis 28 | e.getMessage match { 29 | case Tick => 30 | if (nextTick != null) nextTick.cancel 31 | ctx.getChannel.write(Tock) 32 | case _ => 33 | ctx.sendUpstream(e); 34 | } 35 | } 36 | 37 | object TickTask extends TimerTask { 38 | override def run(timeout : Timeout) { 39 | val last = (clock.currentTimeMillis - lastTimeReceived) / 1000 40 | if (last > (tickTime - tickTime/4)) { 41 | log.warn("Connection to %s has failed for %d seconds. Closing the connection.", node, last) 42 | Channels.fireExceptionCaught(ctx, exception); 43 | } 44 | ctx.getChannel.write(Tick) 45 | scheduleTick 46 | } 47 | } 48 | 49 | def scheduleTick { 50 | nextTick = timer.newTimeout(TickTask, tickTime / 4, TimeUnit.SECONDS) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/HandshakeDecoder.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import org.jboss.{netty => netty} 19 | import netty.handler.codec.oneone._ 20 | import netty.channel._ 21 | import netty.buffer._ 22 | 23 | class HandshakeDecoder extends OneToOneDecoder { 24 | 25 | //we need to have a dirty fucking mode context 26 | //because name messages and challenge replies have 27 | //the same identifier 28 | @volatile var mode = 'name 29 | 30 | def decode(ctx : ChannelHandlerContext, channel : Channel, obj : Any) : Object = { 31 | //dispatch on first byte 32 | val buffer = obj.asInstanceOf[ChannelBuffer] 33 | if (!buffer.readable) { 34 | return buffer 35 | } 36 | buffer.markReaderIndex 37 | (mode, buffer.readByte) match { 38 | case ('name, 110) => //name message 39 | val version = buffer.readShort 40 | val flags = buffer.readInt 41 | val nameLength = buffer.readableBytes 42 | val bytes = new Array[Byte](nameLength) 43 | buffer.readBytes(bytes) 44 | mode = 'challenge 45 | NameMessage(version, flags, new String(bytes)) 46 | case ('challenge, 110) => //challenge message 47 | val version = buffer.readShort 48 | val flags = buffer.readInt 49 | val challenge = buffer.readInt 50 | val nameLength = buffer.readableBytes 51 | val bytes = new Array[Byte](nameLength) 52 | buffer.readBytes(bytes) 53 | ChallengeMessage(version, flags, challenge, new String(bytes)) 54 | case (_, 115) => //status message 55 | val statusLength = buffer.readableBytes 56 | val bytes = new Array[Byte](statusLength) 57 | buffer.readBytes(bytes) 58 | StatusMessage(new String(bytes)) 59 | case (_, 114) => //reply message 60 | val challenge = buffer.readInt 61 | val digestLength = buffer.readableBytes 62 | val bytes = new Array[Byte](digestLength) 63 | buffer.readBytes(bytes) 64 | ChallengeReplyMessage(challenge, bytes) 65 | case (_, 97) => //ack message 66 | val digestLength = buffer.readableBytes 67 | val bytes = new Array[Byte](digestLength) 68 | buffer.readBytes(bytes) 69 | ChallengeAckMessage(bytes) 70 | case (_, _) => // overwhelmingly likely to be race between first message in and removal from pipeline 71 | buffer.resetReaderIndex 72 | buffer 73 | } 74 | } 75 | 76 | } 77 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/HandshakeEncoder.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import org.jboss.{netty => netty} 19 | import netty.handler.codec.oneone._ 20 | import netty.channel._ 21 | import netty.buffer._ 22 | 23 | class HandshakeEncoder extends OneToOneEncoder { 24 | 25 | def encode(ctx : ChannelHandlerContext, channel : Channel, obj : Any) : Object = { 26 | obj match { 27 | case NameMessage(version, flags, name) => 28 | val bytes = name.getBytes 29 | val length = 7 + bytes.length 30 | val buffer = ChannelBuffers.dynamicBuffer(length+2) 31 | buffer.writeShort(length) 32 | buffer.writeByte(110) 33 | buffer.writeShort(version) 34 | buffer.writeInt(flags) 35 | buffer.writeBytes(bytes) 36 | buffer 37 | case StatusMessage(status) => 38 | val bytes = status.getBytes 39 | val length = 1 + bytes.length 40 | val buffer = ChannelBuffers.dynamicBuffer(length+2) 41 | buffer.writeShort(length) 42 | buffer.writeByte(115) 43 | buffer.writeBytes(bytes) 44 | buffer 45 | case ChallengeMessage(version, flags, challenge, name) => 46 | val bytes = name.getBytes 47 | val length = 11 + bytes.length 48 | val buffer = ChannelBuffers.dynamicBuffer(length+2) 49 | buffer.writeShort(length) 50 | buffer.writeByte(110) 51 | buffer.writeShort(version) 52 | buffer.writeInt(flags) 53 | buffer.writeInt(challenge) 54 | buffer.writeBytes(bytes) 55 | buffer 56 | case ChallengeReplyMessage(challenge, digest) => 57 | val length = 5 + digest.length 58 | val buffer = ChannelBuffers.dynamicBuffer(length+2) 59 | buffer.writeShort(length) 60 | buffer.writeByte(114) 61 | buffer.writeInt(challenge) 62 | buffer.writeBytes(digest) 63 | buffer 64 | case ChallengeAckMessage(digest) => 65 | val length = 1 + digest.length 66 | val buffer = ChannelBuffers.dynamicBuffer(length+2) 67 | buffer.writeShort(length) 68 | buffer.writeByte(97) 69 | buffer.writeBytes(digest) 70 | buffer 71 | case buffer : ChannelBuffer => //we have yet to be removed from the conn 72 | buffer 73 | } 74 | } 75 | 76 | } 77 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/HandshakeHandler.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import org.jboss.{netty => netty} 19 | import netty.channel._ 20 | import scalang._ 21 | import util._ 22 | import java.util.ArrayDeque 23 | import scala.math._ 24 | import scala.collection.JavaConversions._ 25 | import java.security.{SecureRandom,MessageDigest} 26 | import com.boundary.logula.Logging 27 | 28 | abstract class HandshakeHandler(posthandshake : (Symbol,ChannelPipeline) => Unit) extends SimpleChannelHandler with StateMachine with Logging { 29 | override val start = 'disconnected 30 | @volatile var ctx : ChannelHandlerContext = null 31 | @volatile var peer : Symbol = null 32 | @volatile var challenge : Int = 0 33 | @volatile var peerChallenge : Int = 0 34 | 35 | val messages = new ArrayDeque[MessageEvent] 36 | val random = SecureRandom.getInstance("SHA1PRNG") 37 | 38 | def isVerified = currentState == 'verified 39 | 40 | //handler callbacks 41 | override def messageReceived(ctx : ChannelHandlerContext, e : MessageEvent) { 42 | this.ctx = ctx 43 | val msg = e.getMessage 44 | if (isVerified) { 45 | super.messageReceived(ctx, e) 46 | return 47 | } 48 | 49 | event(msg) 50 | } 51 | 52 | override def channelConnected(ctx : ChannelHandlerContext, e : ChannelStateEvent) { 53 | this.ctx = ctx 54 | val channel = ctx.getChannel 55 | val future = Channels.future(channel) 56 | event(ConnectedMessage) 57 | } 58 | 59 | override def channelClosed(ctx : ChannelHandlerContext, e : ChannelStateEvent) { 60 | this.ctx = ctx 61 | log.error("Channel closed during handshake") 62 | handshakeFailed 63 | } 64 | 65 | override def exceptionCaught(ctx : ChannelHandlerContext, e : ExceptionEvent) { 66 | this.ctx = ctx 67 | log.error(e.getCause, "Exception caught during erlang handshake: ") 68 | handshakeFailed 69 | } 70 | 71 | override def writeRequested(ctx : ChannelHandlerContext, e : MessageEvent) { 72 | this.ctx = ctx 73 | if (isVerified) { 74 | super.writeRequested(ctx,e) 75 | } else { 76 | messages.offer(e) 77 | } 78 | } 79 | 80 | //utility methods 81 | protected def digest(challenge : Int, cookie : String) : Array[Byte] = { 82 | val masked = mask(challenge) 83 | val md5 = MessageDigest.getInstance("MD5") 84 | md5.update(cookie.getBytes) 85 | md5.update(masked.toString.getBytes) 86 | md5.digest 87 | } 88 | 89 | def mask(challenge : Int) : Long = { 90 | if (challenge < 0) { 91 | (1L << 31) | (challenge & 0x7FFFFFFFL) 92 | } else { 93 | challenge.toLong 94 | } 95 | } 96 | 97 | protected def digestEquals(a : Array[Byte], b : Array[Byte]) : Boolean = { 98 | var equals = true 99 | if (a.length != b.length) { 100 | equals = false 101 | } 102 | val length = min(a.length,b.length) 103 | for (i <- (0 until length)) { 104 | equals &&= (a(i) == b(i)) 105 | } 106 | equals 107 | } 108 | 109 | protected def drainQueue { 110 | val p = ctx.getPipeline 111 | val keys = p.toMap.keySet 112 | for (name <- List("handshakeFramer", "handshakeDecoder", "handshakeEncoder", "handshakeHandler"); if keys.contains(name)) { 113 | p.remove(name) 114 | } 115 | posthandshake(peer,p) 116 | 117 | for (msg <- messages) { 118 | ctx.sendDownstream(msg) 119 | } 120 | messages.clear 121 | } 122 | 123 | protected def handshakeSucceeded { 124 | ctx.sendUpstream(new UpstreamMessageEvent(ctx.getChannel, HandshakeSucceeded(peer, ctx.getChannel), null)) 125 | } 126 | 127 | protected def handshakeFailed { 128 | ctx.getChannel.close 129 | ctx.sendUpstream(new UpstreamMessageEvent(ctx.getChannel, HandshakeFailed(peer), null)) 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/HandshakeMessages.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import org.jboss.netty.channel.Channel 19 | import scala.collection.mutable.StringBuilder 20 | 21 | case object ConnectedMessage 22 | 23 | case class NameMessage(version : Short, flags : Int, name : String) 24 | 25 | case class StatusMessage(status : String) 26 | 27 | case class ChallengeMessage(version : Short, flags : Int, challenge : Int, name : String) 28 | 29 | case class ChallengeReplyMessage(challenge : Int, digest : Array[Byte]) { 30 | override def toString : String = { 31 | val b = new StringBuilder("ChallengeReplyMessage(") 32 | b ++= challenge.toString 33 | b ++= ", " 34 | b ++= digest.deep.toString 35 | b ++= ")" 36 | b.toString 37 | } 38 | } 39 | 40 | case class ChallengeAckMessage(digest : Array[Byte]) { 41 | override def toString : String = { 42 | val b = new StringBuilder("ChallengeAckMessage(") 43 | b ++= digest.deep.toString 44 | b ++= ")" 45 | b.toString 46 | } 47 | } 48 | 49 | case class HandshakeSucceeded(node : Symbol, channel : Channel) 50 | 51 | case class HandshakeFailed(node : Symbol) 52 | 53 | object DistributionFlags { 54 | val published = 1 55 | val atomCache = 2 56 | val extendedReferences = 4 57 | val distMonitor = 8 58 | val funTags = 0x10 59 | val distMonitorName = 0x20 60 | val hiddenAtomCache = 0x40 61 | val newFunTags = 0x80 62 | val extendedPidsPorts = 0x100 63 | val exportPtrTag = 0x200 64 | val bitBinaries = 0x400 65 | val newFloats = 0x800 66 | val smallAtomTags = 0x4000 67 | 68 | val default = extendedReferences | extendedPidsPorts | 69 | bitBinaries | newFloats | funTags | newFunTags | 70 | distMonitor | distMonitorName | smallAtomTags 71 | } 72 | 73 | class ErlangAuthException(msg : String) extends Exception(msg) 74 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/Link.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | 20 | case class Link(from : Pid, to : Pid) extends LinkListenable { 21 | def break(reason : Any) { 22 | notifyBreak(this, reason) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/LinkListenable.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | 20 | trait LinkListenable { 21 | @volatile var linkListeners : List[LinkListener] = Nil 22 | 23 | def addLinkListener(listener : LinkListener) { 24 | linkListeners = listener :: linkListeners 25 | } 26 | 27 | def notifyBreak(link : Link, reason : Any) { 28 | for (listener <- linkListeners) { 29 | listener.break(link, reason) 30 | } 31 | } 32 | 33 | def notifyDeliverLink(link : Link) { 34 | for (listener <- linkListeners) { 35 | listener.deliverLink(link) 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/LinkListener.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | 20 | trait LinkListener { 21 | def deliverLink(link : Link) 22 | 23 | def break(link : Link, reason : Any) 24 | } 25 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/Mailbox.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | import java.util.concurrent.TimeUnit 20 | import concurrent.forkjoin.LinkedTransferQueue 21 | 22 | trait Mailbox { 23 | def self : Pid 24 | def receive : Any 25 | def receive(timeout : Long) : Option[Any] 26 | def send(pid : Pid, msg : Any) : Any 27 | def send(name : Symbol, msg : Any) : Any 28 | def send(dest : (Symbol, Symbol), from : Pid, msg : Any) : Any 29 | def exit(reason : Any) 30 | def link(to : Pid) 31 | } 32 | 33 | class MailboxProcess(ctx : ProcessContext) extends ProcessAdapter with Mailbox { 34 | val self = ctx.pid 35 | val fiber = ctx.fiber 36 | val referenceCounter = ctx.referenceCounter 37 | 38 | val queue = new LinkedTransferQueue[Any] 39 | def cleanup {} 40 | val adapter = ctx.adapter 41 | 42 | def receive : Any = { 43 | queue.take 44 | } 45 | 46 | def receive(timeout : Long) : Option[Any] = { 47 | Option(queue.poll(timeout, TimeUnit.MILLISECONDS)) 48 | } 49 | 50 | override def handleMessage(msg : Any) { 51 | queue.offer(msg) 52 | } 53 | 54 | override def handleExit(from : Pid, reason : Any) { 55 | exit(reason) 56 | } 57 | 58 | override def handleMonitorExit(monitored : Any, ref : Reference, reason : Any) { 59 | queue.offer(('DOWN, ref, 'process, monitored, reason)) 60 | } 61 | 62 | def send(to : Pid, msg : Any) = notifySend(to, msg) 63 | def send(dest : (Symbol, Symbol), from : Pid, msg : Any) = notifySend(dest, from, msg) 64 | def send(name : Symbol, msg : Any) = notifySend(name, msg) 65 | } 66 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/Monitor.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2012, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | 20 | case class Monitor(monitoring : Pid, monitored : Any, ref : Reference) extends MonitorListenable { 21 | def monitorExit(reason : Any) { 22 | notifyMonitorExit(this, reason) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/MonitorListenable.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2012, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | 20 | trait MonitorListenable { 21 | @volatile var monitorListeners : List[MonitorListener] = Nil 22 | 23 | def addMonitorListener(listener : MonitorListener) { 24 | monitorListeners = listener :: monitorListeners 25 | } 26 | 27 | def notifyMonitorExit(monitor : Monitor, reason : Any) { 28 | for (listener <- monitorListeners) { 29 | listener.monitorExit(monitor, reason) 30 | } 31 | } 32 | 33 | def notifyDeliverMonitor(monitor : Monitor) { 34 | for (listener <- monitorListeners) { 35 | listener.deliverMonitor(monitor) 36 | } 37 | } 38 | 39 | } 40 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/MonitorListener.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2012, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | 20 | trait MonitorListener { 21 | def deliverMonitor(monitor : Monitor) 22 | 23 | def monitorExit(monitor : Monitor, reason : Any) 24 | } 25 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/NetKernel.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | 20 | class NetKernel(ctx : ProcessContext) extends Process(ctx : ProcessContext) { 21 | 22 | override def onMessage(msg : Any) = msg match { 23 | case (Symbol("$gen_call"), (pid : Pid, ref : Reference), ('is_auth, node : Symbol)) => 24 | pid ! (ref, 'yes) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/NodeMessages.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | 20 | case class LinkMessage(from : Pid, to : Pid) 21 | 22 | case class SendMessage(to : Pid, msg : Any) 23 | 24 | case class ExitMessage(from : Pid, to : Pid, reason : Any) 25 | 26 | case class Exit2Message(from : Pid, to : Pid, reason : Any) 27 | 28 | case class UnlinkMessage(from : Pid, to : Pid) 29 | 30 | case class RegSend(from : Pid, to : Symbol, msg : Any) 31 | 32 | case object Tick 33 | 34 | case object Tock 35 | 36 | //must implement trace tags later 37 | 38 | case class MonitorMessage(monitoring : Pid, monitored : Any, ref : Reference) 39 | 40 | case class DemonitorMessage(monitoring : Pid, monitored : Any, ref : Reference) 41 | 42 | case class MonitorExitMessage(monitored : Any, monitoring : Pid, ref : Reference, reason : Any) 43 | 44 | class DistributedProtocolException(msg : String) extends Exception(msg) 45 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/PacketCounter.scala: -------------------------------------------------------------------------------- 1 | package scalang.node 2 | 3 | import org.jboss.netty 4 | import netty.bootstrap._ 5 | import netty.channel._ 6 | import netty.handler.codec.frame._ 7 | import com.yammer.metrics.scala._ 8 | import java.util.concurrent._ 9 | 10 | class PacketCounter(name : String) extends SimpleChannelHandler with Instrumented { 11 | val ingress = metrics.meter("ingress", "packets", name, TimeUnit.SECONDS) 12 | val egress = metrics.meter("egress", "packets", name, TimeUnit.SECONDS) 13 | val exceptions = metrics.meter("exceptions", "exceptions", name, TimeUnit.SECONDS) 14 | 15 | override def messageReceived(ctx : ChannelHandlerContext, e : MessageEvent) { 16 | ingress.mark 17 | super.messageReceived(ctx, e) 18 | } 19 | 20 | override def exceptionCaught(ctx : ChannelHandlerContext, e : ExceptionEvent) { 21 | exceptions.mark 22 | super.exceptionCaught(ctx, e) 23 | } 24 | 25 | override def writeRequested(ctx : ChannelHandlerContext, e : MessageEvent) { 26 | egress.mark 27 | super.writeRequested(ctx, e) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ProcessAdapter.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2012, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | import com.yammer.metrics.scala._ 20 | import org.jetlang.fibers._ 21 | import org.jetlang.channels._ 22 | import org.jetlang.core._ 23 | import java.util.concurrent.TimeUnit 24 | import org.cliffc.high_scale_lib.NonBlockingHashSet 25 | import org.cliffc.high_scale_lib.NonBlockingHashMap 26 | import scala.collection.JavaConversions._ 27 | import com.boundary.logula.Logging 28 | 29 | abstract class ProcessHolder(ctx : ProcessContext) extends ProcessAdapter { 30 | val self = ctx.pid 31 | val fiber = ctx.fiber 32 | val messageRate = metrics.meter("messages", "messages", instrumentedName) 33 | val executionTimer = metrics.timer("execution", instrumentedName) 34 | def process : ProcessLike 35 | 36 | val msgChannel = new MemoryChannel[Any] 37 | msgChannel.subscribe(fiber, new Callback[Any] { 38 | def onMessage(msg : Any) { 39 | executionTimer.time { 40 | try { 41 | process.handleMessage(msg) 42 | } catch { 43 | case e : Throwable => 44 | log.error(e, "An error occurred in actor %s", process) 45 | process.exit(e.getMessage) 46 | } 47 | } 48 | } 49 | }) 50 | 51 | val exitChannel = new MemoryChannel[(Pid,Any)] 52 | exitChannel.subscribe(fiber, new Callback[(Pid,Any)] { 53 | def onMessage(msg : (Pid,Any)) { 54 | try { 55 | process.handleExit(msg._1, msg._2) 56 | } catch { 57 | case e : Throwable => 58 | log.error(e, "An error occurred during handleExit in actor %s", this) 59 | process.exit(e.getMessage) 60 | } 61 | } 62 | }) 63 | 64 | val monitorChannel = new MemoryChannel[(Any,Reference,Any)] 65 | monitorChannel.subscribe(fiber, new Callback[(Any,Reference,Any)] { 66 | def onMessage(msg : (Any,Reference,Any)) { 67 | try { 68 | process.handleMonitorExit(msg._1, msg._2, msg._3) 69 | } catch { 70 | case e : Throwable => 71 | log.error(e, "An error occurred during handleMonitorExit in actor %s", this) 72 | process.exit(e.getMessage) 73 | } 74 | } 75 | }) 76 | 77 | override def handleMessage(msg : Any) { 78 | messageRate.mark 79 | msgChannel.publish(msg) 80 | } 81 | 82 | override def handleExit(from : Pid, msg : Any) { 83 | exitChannel.publish((from,msg)) 84 | } 85 | 86 | override def handleMonitorExit(monitored : Any, ref : Reference, reason : Any) { 87 | monitorChannel.publish((monitored,ref,reason)) 88 | } 89 | 90 | def cleanup { 91 | fiber.dispose 92 | metricsRegistry.removeMetric(getClass, "messages", instrumentedName) 93 | metricsRegistry.removeMetric(getClass, "execution", instrumentedName) 94 | } 95 | } 96 | 97 | trait ProcessAdapter extends ExitListenable with SendListenable with LinkListenable with MonitorListenable with Instrumented with Logging { 98 | var state = 'alive 99 | def self : Pid 100 | def fiber : Fiber 101 | def referenceCounter : ReferenceCounter 102 | val links = new NonBlockingHashSet[Link] 103 | val monitors = new NonBlockingHashMap[Reference, Monitor] 104 | def instrumentedName = self.toErlangString 105 | def cleanup 106 | 107 | def handleMessage(msg : Any) 108 | def handleExit(from : Pid, msg : Any) 109 | def handleMonitorExit(monitored : Any, ref : Reference, reason : Any) 110 | 111 | def exit(reason : Any) { 112 | synchronized { 113 | if (state != 'alive) return 114 | state = 'dead 115 | } 116 | 117 | // Exit listeners first, so that process is removed from table. 118 | for(e <- exitListeners) { 119 | e.handleExit(self, reason) 120 | } 121 | for (link <- links) { 122 | link.break(reason) 123 | } 124 | for (m <- monitors.values) { 125 | m.monitorExit(reason) 126 | } 127 | cleanup 128 | } 129 | 130 | def unlink(to : Pid) { 131 | links.remove(Link(self, to)) 132 | } 133 | 134 | def link(to : Pid) { 135 | val l = registerLink(to) 136 | for (listener <- linkListeners) { 137 | listener.deliverLink(l) 138 | } 139 | } 140 | 141 | def registerLink(to : Pid) : Link = { 142 | val l = Link(self, to) 143 | for (listener <- linkListeners) { 144 | l.addLinkListener(listener) 145 | } 146 | synchronized { 147 | if (state != 'alive) 148 | l.break('noproc) 149 | else 150 | links.add(l) 151 | } 152 | l 153 | } 154 | 155 | def monitor(monitored : Any) : Reference = { 156 | val m = Monitor(self, monitored, makeRef) 157 | for (listener <- monitorListeners) { 158 | listener.deliverMonitor(m) 159 | } 160 | m.ref 161 | } 162 | 163 | def demonitor(ref : Reference) { 164 | monitors.remove(ref) 165 | } 166 | 167 | def registerMonitor(monitoring : Pid, ref : Reference): Monitor = { 168 | registerMonitor(Monitor(monitoring, self, ref)) 169 | } 170 | 171 | private def registerMonitor(m : Monitor): Monitor = { 172 | for (listener <- monitorListeners) { 173 | m.addMonitorListener(listener) 174 | } 175 | synchronized { 176 | if (state != 'alive) 177 | m.monitorExit('noproc) 178 | else 179 | monitors.put(m.ref, m) 180 | } 181 | m 182 | } 183 | 184 | def makeRef = referenceCounter.makeRef 185 | 186 | def sendEvery(pid : Pid, msg : Any, delay : Long) { 187 | val runnable = new Runnable { 188 | def run = notifySend(pid,msg) 189 | } 190 | fiber.scheduleAtFixedRate(runnable, delay, delay, TimeUnit.MILLISECONDS) 191 | } 192 | 193 | def sendEvery(name : Symbol, msg : Any, delay : Long) { 194 | val runnable = new Runnable { 195 | def run = notifySend(name,msg) 196 | } 197 | fiber.scheduleAtFixedRate(runnable, delay, delay, TimeUnit.MILLISECONDS) 198 | } 199 | 200 | def sendEvery(name : (Symbol,Symbol), msg : Any, delay : Long) { 201 | val runnable = new Runnable { 202 | def run = notifySend(name,self,msg) 203 | } 204 | fiber.scheduleAtFixedRate(runnable, delay, delay, TimeUnit.MILLISECONDS) 205 | } 206 | 207 | def sendAfter(pid : Pid, msg : Any, delay : Long) { 208 | val runnable = new Runnable { 209 | def run { 210 | notifySend(pid, msg) 211 | } 212 | } 213 | fiber.schedule(runnable, delay, TimeUnit.MILLISECONDS) 214 | } 215 | 216 | def sendAfter(name : Symbol, msg : Any, delay : Long) { 217 | val runnable = new Runnable { 218 | def run { 219 | notifySend(name, msg) 220 | } 221 | } 222 | fiber.schedule(runnable, delay, TimeUnit.MILLISECONDS) 223 | } 224 | 225 | def sendAfter(dest : (Symbol,Symbol), msg : Any, delay : Long) { 226 | val runnable = new Runnable { 227 | def run { 228 | notifySend(dest, self, msg) 229 | } 230 | } 231 | fiber.schedule(runnable, delay, TimeUnit.MILLISECONDS) 232 | } 233 | } 234 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ProcessLauncher.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2012, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | import org.cliffc.high_scale_lib.NonBlockingHashSet 20 | import org.cliffc.high_scale_lib.NonBlockingHashMap 21 | import scala.collection.JavaConversions._ 22 | 23 | class ProcessLauncher[T <: Process](clazz : Class[T], ctx : ProcessContext) extends ProcessHolder(ctx) { 24 | val referenceCounter = ctx.referenceCounter 25 | var process : Process = null 26 | 27 | def init { 28 | val constructor = clazz.getConstructor(classOf[ProcessContext]) 29 | ctx.adapter = this 30 | process = constructor.newInstance(ctx) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ProcessLike.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | import com.yammer.metrics.scala._ 20 | import com.boundary.logula.Logging 21 | 22 | trait ProcessLike extends Instrumented with Logging { 23 | def adapter : ProcessAdapter 24 | def self : Pid 25 | 26 | def send(pid : Pid, msg : Any) = adapter.notifySend(pid,msg) 27 | def send(name : Symbol, msg : Any) = adapter.notifySend(name,msg) 28 | def send(dest : (Symbol,Symbol), from : Pid, msg : Any) = adapter.notifySend(dest,from,msg) 29 | 30 | def handleMessage(msg : Any) 31 | 32 | def handleExit(from : Pid, reason : Any) { 33 | exit(reason) 34 | } 35 | 36 | def handleMonitorExit(monitored : Any, ref : Reference, reason : Any) 37 | 38 | def exit(reason : Any) { 39 | adapter.exit(reason) 40 | } 41 | 42 | def makeRef = adapter.makeRef 43 | 44 | def unlink(to : Pid) { 45 | adapter.unlink(to) 46 | } 47 | 48 | def link(to : Pid) { 49 | adapter.link(to) 50 | } 51 | 52 | def monitor(monitored : Any): Reference = { 53 | adapter.monitor(monitored) 54 | } 55 | 56 | def demonitor(ref : Reference) { 57 | adapter.demonitor(ref) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ReferenceCounter.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | import scala.annotation.tailrec 20 | import java.util.concurrent._ 21 | import atomic._ 22 | import locks._ 23 | import java.util.Arrays 24 | 25 | class ReferenceCounter(name : Symbol, creation : Int) { 26 | @volatile var refid = Array(0,0,0) 27 | val lock = new ReentrantLock 28 | 29 | protected def increment { 30 | val newRefid = Arrays.copyOf(refid, 3) 31 | newRefid(0) += 1 32 | if (newRefid(0) > 0x3ffff) { 33 | newRefid(0) = 0 34 | newRefid(1) += 1 35 | if (newRefid(1) == 0) { 36 | newRefid(2) += 1 37 | } 38 | } 39 | refid = newRefid 40 | } 41 | 42 | def makeRef : Reference = { 43 | lock.lock 44 | try { 45 | val ref = Reference(name, refid, creation) 46 | increment 47 | ref 48 | } finally { 49 | lock.unlock 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ScalaTermDecoder.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import org.jboss.netty 19 | import netty.handler.codec.oneone._ 20 | import netty.channel._ 21 | import java.nio._ 22 | import netty.buffer._ 23 | import scala.annotation.tailrec 24 | import scalang._ 25 | import com.yammer.metrics.scala._ 26 | import scala.collection.mutable.ArrayBuffer 27 | import overlock.cache.CachedSymbol 28 | import sun.misc.Unsafe 29 | 30 | object ScalaTermDecoder { 31 | private val field = classOf[Unsafe].getDeclaredField("theUnsafe") 32 | field.setAccessible(true) 33 | val unsafe = field.get(classOf[ScalaTermDecoder]).asInstanceOf[Unsafe] 34 | 35 | val stringValueOffset = unsafe.objectFieldOffset(classOf[String].getDeclaredField("value")) 36 | private[this] val stringUsesCount = classOf[String].getDeclaredFields.map{f => f.getName}.contains("count") 37 | val stringCountOffset = { 38 | if (stringUsesCount) 39 | unsafe.objectFieldOffset(classOf[String].getDeclaredField("count")) 40 | else 41 | 0 42 | } 43 | 44 | // Initializes a string by creating an empty String object, then populating it with our own 45 | // backing char[] to prevent allocating / copying between byte[] or char[] twice. 46 | def fastString(buffer: ChannelBuffer, length: Int) : String = { 47 | val destArray = new Array[Char](length) 48 | 49 | var i = 0 50 | while (i < length) { 51 | destArray(i) = buffer.readByte().asInstanceOf[Char] 52 | i+=1 53 | } 54 | 55 | val result = unsafe.allocateInstance(classOf[String]).asInstanceOf[String] 56 | unsafe.putObject(result, stringValueOffset, destArray) 57 | if (stringUsesCount) { 58 | unsafe.putInt(result, stringCountOffset, length) 59 | } 60 | result 61 | } 62 | } 63 | 64 | class ScalaTermDecoder(peer : Symbol, factory : TypeFactory, decoder : TypeDecoder = NoneTypeDecoder) extends OneToOneDecoder with Instrumented { 65 | val decodeTimer = metrics.timer("decoding", peer.name) 66 | 67 | def decode(ctx : ChannelHandlerContext, channel : Channel, obj : Any) : Object = obj match { 68 | case buffer : ChannelBuffer => 69 | if (buffer.readableBytes > 0) { 70 | decodeTimer.time { 71 | readMessage(buffer) 72 | } 73 | } else { 74 | Tick 75 | } 76 | case _ => 77 | obj.asInstanceOf[AnyRef] 78 | } 79 | 80 | def readMessage(buffer : ChannelBuffer) : AnyRef = { 81 | val t = buffer.readByte 82 | if (t != 112) throw new DistributedProtocolException("Got message of type " + t) 83 | 84 | val version = buffer.readUnsignedByte 85 | if (version != 131) throw new DistributedProtocolException("Version mismatch " + version) 86 | readTerm(buffer) match { 87 | case (1, from : Pid, to : Pid) => 88 | LinkMessage(from, to) 89 | case (2, _, to : Pid) => 90 | buffer.skipBytes(1) 91 | val msg = readTerm(buffer) 92 | SendMessage(to, msg) 93 | case (3, from : Pid, to : Pid, reason : Any) => 94 | ExitMessage(from, to, reason) 95 | case (4, from : Pid, to : Pid) => 96 | UnlinkMessage(from, to) 97 | case (6, from : Pid, _, to : Symbol) => 98 | buffer.skipBytes(1) 99 | val msg = readTerm(buffer) 100 | RegSend(from, to, msg) 101 | case (8, from : Pid, to : Pid, reason : Any) => 102 | Exit2Message(from, to, reason) 103 | case (19, monitoring : Pid, monitored: Any, ref : Reference) => 104 | MonitorMessage(monitoring, monitored, ref) 105 | case (20, monitoring : Pid, monitored : Any, ref : Reference) => 106 | DemonitorMessage(monitoring, monitored, ref) 107 | case (21, monitored : Any, monitoring : Pid, ref : Reference, reason : Any) => 108 | MonitorExitMessage(monitored, monitoring, ref, reason) 109 | } 110 | } 111 | 112 | def readTerm(buffer : ChannelBuffer) : Any = { 113 | val typeOrdinal : Int = buffer.readUnsignedByte 114 | typeOrdinal match { 115 | case decoder(_) => 116 | decoder.decode(typeOrdinal, buffer) 117 | case 131 => //version derp 118 | readTerm(buffer) 119 | case 97 => //small integer 120 | buffer.readUnsignedByte.toInt 121 | case 98 => //integer 122 | buffer.readInt 123 | case 99 => //float string 124 | val floatString = ScalaTermDecoder.fastString(buffer, 31) 125 | floatString.toDouble 126 | case 100 => //atom OR boolean 127 | val len = buffer.readShort 128 | val str = ScalaTermDecoder.fastString(buffer, len) 129 | CachedSymbol(str) match { 130 | case 'true => true 131 | case 'false => false 132 | case atom => atom 133 | } 134 | case 101 => //reference 135 | val node = readTerm(buffer).asInstanceOf[Symbol] 136 | val id = buffer.readInt 137 | val creation = buffer.readUnsignedByte 138 | Reference(node, Seq(id), creation) 139 | case 102 => //port 140 | val node = readTerm(buffer).asInstanceOf[Symbol] 141 | val id = buffer.readInt 142 | val creation = buffer.readByte 143 | Port(node, id, creation) 144 | case 103 => //pid 145 | val node = readTerm(buffer).asInstanceOf[Symbol] 146 | val id = buffer.readInt 147 | val serial = buffer.readInt 148 | val creation = buffer.readUnsignedByte 149 | Pid(node,id,serial,creation) 150 | case 104 => //small tuple -- will be a scala tuple up to size 22 151 | val arity = buffer.readUnsignedByte 152 | readTuple(arity, buffer) 153 | case 105 => //large tuple -- will be an untyped erlang tuple 154 | val arity = buffer.readInt 155 | readTuple(arity, buffer) 156 | case 106 => //nil 157 | Nil 158 | case 107 => //string 159 | val length = buffer.readShort 160 | ScalaTermDecoder.fastString(buffer, length) 161 | case 108 => //list 162 | val length = buffer.readInt 163 | val (list, improper) = readList(length, buffer) 164 | improper match { 165 | case None => list 166 | case Some(imp) => new ImproperList(list, imp) 167 | } 168 | case 109 => //binary 169 | val length = buffer.readInt 170 | val byteBuffer = ByteBuffer.allocate(length) 171 | buffer.readBytes(byteBuffer) 172 | byteBuffer.flip 173 | byteBuffer 174 | case 110 => //small bignum 175 | val length = buffer.readUnsignedByte 176 | val sign = buffer.readUnsignedByte match { 177 | case 0 => 1 178 | case _ => -1 179 | } 180 | if (length <= 8) { 181 | readLittleEndianLong(length, sign, buffer) 182 | } else { 183 | val bytes = readReversed(length, buffer) 184 | BigInt(sign, bytes) 185 | } 186 | case 111 => //large bignum 187 | val length = buffer.readInt 188 | val sign = buffer.readUnsignedByte match { 189 | case 0 => 1 190 | case _ => -1 191 | } 192 | if (length <= 8) { 193 | readLittleEndianLong(length, sign, buffer) 194 | } else { 195 | val bytes = readReversed(length, buffer) 196 | BigInt(sign, bytes) 197 | } 198 | case 114 => //new reference 199 | val length = buffer.readShort 200 | val node = readTerm(buffer).asInstanceOf[Symbol] 201 | val creation = buffer.readUnsignedByte 202 | val id = (for(n <- (0 until length)) yield { 203 | buffer.readInt 204 | }).toSeq 205 | Reference(node, id, creation) 206 | case 115 => //small atom 207 | val length = buffer.readUnsignedByte 208 | val str = ScalaTermDecoder.fastString(buffer, length) 209 | CachedSymbol(str) match { 210 | case 'true => true 211 | case 'false => false 212 | case atom => atom 213 | } 214 | case 117 => //fun 215 | val numFree = buffer.readInt 216 | val pid = readTerm(buffer).asInstanceOf[Pid] 217 | val module = readTerm(buffer).asInstanceOf[Symbol] 218 | val index = readTerm(buffer).asInstanceOf[Int] 219 | val uniq = readTerm(buffer).asInstanceOf[Int] 220 | val vars = (for(n <- (0 until numFree)) yield { 221 | readTerm(buffer) 222 | }).toSeq 223 | Fun(pid,module,index,uniq,vars) 224 | case 112 => //new fun 225 | val size = buffer.readInt 226 | val arity = buffer.readUnsignedByte 227 | val uniq = new Array[Byte](16) 228 | buffer.readBytes(uniq) 229 | val index = buffer.readInt 230 | val numFree = buffer.readInt 231 | val module = readTerm(buffer).asInstanceOf[Symbol] 232 | val oldIndex = readTerm(buffer).asInstanceOf[Int] 233 | val oldUniq = readTerm(buffer).asInstanceOf[Int] 234 | val pid = readTerm(buffer).asInstanceOf[Pid] 235 | val vars = (for(n <- (0 until numFree)) yield { 236 | readTerm(buffer) 237 | }).toSeq 238 | NewFun(pid, module, oldIndex, oldUniq, arity, index, uniq, vars) 239 | case 113 => //export 240 | val module = readTerm(buffer).asInstanceOf[Symbol] 241 | val function = readTerm(buffer).asInstanceOf[Symbol] 242 | val arity = readTerm(buffer).asInstanceOf[Int] 243 | ExportFun(module, function, arity) 244 | case 77 => //bit binary 245 | val length = buffer.readInt 246 | val bits = buffer.readUnsignedByte 247 | val byteBuffer = ByteBuffer.allocate(length) 248 | buffer.readBytes(byteBuffer) 249 | byteBuffer.flip 250 | BitString(byteBuffer, bits) 251 | case 70 => //new float 252 | buffer.readDouble 253 | } 254 | } 255 | 256 | def readLittleEndianLong(length : Int, sign : Int, buffer : ChannelBuffer) : Long = { 257 | val bytes = new Array[Byte](8) 258 | buffer.readBytes(bytes, 0, length) 259 | val little = ChannelBuffers.wrappedBuffer(ByteOrder.LITTLE_ENDIAN, bytes) 260 | little.readLong 261 | } 262 | 263 | def readReversed(length : Int, buffer : ChannelBuffer) : Array[Byte] = { 264 | val bytes = new Array[Byte](length) 265 | for (n <- (1 to length)) { 266 | bytes(length-n) = buffer.readByte 267 | } 268 | bytes 269 | } 270 | 271 | def readList(length : Int, buffer : ChannelBuffer) : (List[Any], Option[Any]) = { 272 | var i = 0 273 | val b = new ArrayBuffer[Any](length) 274 | while (i <= length) { 275 | val term = readTerm(buffer) 276 | if (i == length) { 277 | term match { 278 | case Nil => return (b.toList, None) 279 | case improper => return (b.toList, Some(improper)) 280 | } 281 | } else { 282 | b += term 283 | i += 1 284 | } 285 | } 286 | (b.toList, None) 287 | } 288 | 289 | def readTuple(arity : Int, buffer : ChannelBuffer) = { 290 | readTerm(buffer) match { 291 | case name : Symbol => 292 | val reader = new TermReader(buffer, this) 293 | factory.createType(name, arity, reader) match { 294 | case Some(obj) => obj 295 | case None => 296 | readVanillaTuple(name, arity, buffer) 297 | } 298 | case first => 299 | readVanillaTuple(first, arity, buffer) 300 | } 301 | } 302 | 303 | def readVanillaTuple(first : Any, arity : Int, buffer : ChannelBuffer) : Any = arity match { 304 | case 1 => (first) 305 | case 2 => (first, readTerm(buffer)) 306 | case 3 => (first, readTerm(buffer), readTerm(buffer)) 307 | case 4 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer)) 308 | case 5 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 309 | case 6 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 310 | case 7 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 311 | case 8 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 312 | case 9 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 313 | case 10 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 314 | case 11 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 315 | case 12 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 316 | case 13 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 317 | case 14 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 318 | case 15 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 319 | case 16 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 320 | case 17 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 321 | case 18 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 322 | case 19 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 323 | case 20 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 324 | case 21 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 325 | case 22 => (first, readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer), readTerm(buffer)) 326 | case _ => readBigTuple(first, arity, buffer) 327 | } 328 | 329 | def readBigTuple(first : Any, arity : Int, buffer : ChannelBuffer) : BigTuple = { 330 | val elements = (for(n <- (1 until arity)) yield { 331 | readTerm(buffer) 332 | }).toSeq 333 | new BigTuple(Seq(first) ++ elements) 334 | } 335 | } 336 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ScalaTermEncoder.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import org.jboss.netty 19 | import netty.handler.codec.oneone._ 20 | import netty.channel._ 21 | import java.nio._ 22 | import java.math.BigInteger 23 | import netty.buffer._ 24 | import scalang._ 25 | import java.util.Formatter 26 | import java.util.{List => JList} 27 | import scalang.util.ByteArray 28 | import scalang.util.CamelToUnder._ 29 | import com.yammer.metrics.scala._ 30 | import com.boundary.logula.Logging 31 | 32 | class ScalaTermEncoder(peer: Symbol, encoder: TypeEncoder = NoneTypeEncoder) extends OneToOneEncoder with Logging with Instrumented { 33 | 34 | val encodeTimer = metrics.timer("encoding", peer.name) 35 | 36 | override def encode(ctx : ChannelHandlerContext, channel : Channel, obj : Any) : Object = { 37 | log.debug("sending msg %s", obj) 38 | encodeTimer.time { 39 | val buffer = ChannelBuffers.dynamicBuffer(512) 40 | //write distribution header 41 | buffer.writeBytes(ByteArray(112,131)) 42 | obj match { 43 | case Tock => 44 | buffer.clear() 45 | case LinkMessage(from, to) => 46 | encodeObject(buffer, (1, from, to)) 47 | case SendMessage(to, msg) => 48 | encodeObject(buffer, (2, Symbol(""), to)) 49 | buffer.writeByte(131) 50 | encodeObject(buffer, msg) 51 | case ExitMessage(from, to, reason) => 52 | encodeObject(buffer, (3, from, to, reason)) 53 | case UnlinkMessage(from, to) => 54 | encodeObject(buffer, (4, from, to)) 55 | case RegSend(from, to, msg) => 56 | encodeObject(buffer, (6, from, Symbol(""), to)) 57 | buffer.writeByte(131) 58 | encodeObject(buffer, msg) 59 | case Exit2Message(from, to, reason) => 60 | encodeObject(buffer, (8, from, to, reason)) 61 | case MonitorMessage(monitoring, monitored, ref) => 62 | encodeObject(buffer, (19, monitoring, monitored, ref)) 63 | case DemonitorMessage(monitoring, monitored, ref) => 64 | encodeObject(buffer, (20, monitoring, monitored, ref)) 65 | case MonitorExitMessage(monitored, monitoring, ref, reason) => 66 | encodeObject(buffer, (21, monitoring, monitored, ref, reason)) 67 | } 68 | 69 | buffer 70 | } 71 | } 72 | 73 | def encodeObject(buffer : ChannelBuffer, obj : Any) : Unit = obj match { 74 | case encoder(_) => 75 | encoder.encode(obj, buffer) 76 | case i : Int if i >= 0 && i <= 255 => 77 | writeSmallInteger(buffer, i) 78 | case i : Int => 79 | writeInteger(buffer, i) 80 | case l : Long => 81 | writeLong(buffer, l) 82 | case f : Float => 83 | writeFloat(buffer, f) 84 | case d : Double => 85 | writeFloat(buffer, d) 86 | case true => 87 | writeAtom(buffer, 'true) 88 | case false => 89 | writeAtom(buffer, 'false) 90 | case s : Symbol => 91 | writeAtom(buffer, s) 92 | case Reference(node, id, creation) => //we only emit new references 93 | buffer.writeByte(114) 94 | buffer.writeShort(id.length) 95 | writeAtom(buffer, node) 96 | buffer.writeByte(creation) 97 | for (i <- id) { 98 | buffer.writeInt(i) 99 | } 100 | case Port(node, id, creation) => 101 | buffer.writeByte(102) 102 | writeAtom(buffer, node) 103 | buffer.writeInt(id) 104 | buffer.writeByte(creation) 105 | case Pid(node, id, serial, creation) => 106 | buffer.writeByte(103) 107 | writeAtom(buffer, node) 108 | buffer.writeInt(id) 109 | buffer.writeInt(serial) 110 | buffer.writeByte(creation) 111 | case Fun(pid, module, index, uniq, vars) => 112 | buffer.writeByte(117) 113 | buffer.writeInt(vars.length) 114 | encodeObject(buffer, pid) 115 | writeAtom(buffer, module) 116 | encodeObject(buffer, index) 117 | encodeObject(buffer, uniq) 118 | for (v <- vars) { 119 | encodeObject(buffer, v) 120 | } 121 | case s : String => 122 | buffer.writeByte(107) 123 | val bytes = s.getBytes 124 | buffer.writeShort(bytes.length) 125 | buffer.writeBytes(bytes) 126 | case ImproperList(list, tail) => 127 | writeList(buffer, list, tail) 128 | case Nil => 129 | buffer.writeByte(106) 130 | case l : JList[Any] => 131 | writeJList(buffer, l, Nil) 132 | case l : List[Any] => 133 | writeList(buffer, l, Nil) 134 | case b : BigInteger => 135 | writeBigInt(buffer, b) 136 | case a : Array[Byte] => 137 | writeBinary(buffer, a) 138 | case b : ByteBuffer => 139 | writeBinary(buffer, b) 140 | case BitString(b, i) => 141 | writeBinary(buffer, b, i) 142 | case b : BigTuple => 143 | writeBigTuple(buffer, b) 144 | case p : Product => 145 | writeProduct(buffer, p) 146 | } 147 | 148 | def writeBinary(buffer : ChannelBuffer, b : ByteBuffer) { 149 | val length = b.remaining 150 | buffer.writeByte(109) 151 | buffer.writeInt(length) 152 | buffer.writeBytes(b) 153 | } 154 | 155 | def writeBinary(buffer : ChannelBuffer, a : Array[Byte]) { 156 | val length = a.length 157 | buffer.writeByte(109) 158 | buffer.writeInt(length) 159 | buffer.writeBytes(a) 160 | } 161 | 162 | def writeBinary(buffer : ChannelBuffer, b : ByteBuffer, i : Int) { 163 | val length = b.remaining 164 | buffer.writeByte(77) 165 | buffer.writeInt(length) 166 | buffer.writeByte(i) 167 | buffer.writeBytes(b) 168 | } 169 | 170 | def writeLong(buffer : ChannelBuffer, l : Long) { 171 | val sign = if (l < 0) 1 else 0 172 | val bytes = ByteBuffer.allocate(8) 173 | bytes.order(ByteOrder.LITTLE_ENDIAN) 174 | bytes.putLong(l) 175 | buffer.writeByte(110) 176 | buffer.writeByte(8) 177 | buffer.writeByte(sign) 178 | bytes.flip 179 | buffer.writeBytes(bytes) 180 | } 181 | 182 | def writeBigInt(buffer : ChannelBuffer, big : BigInteger) { 183 | val bytes = big.toByteArray 184 | val sign = if (big.signum < 0) 1 else 0 185 | val length = bytes.length 186 | if (length < 255) { 187 | buffer.writeByte(110) 188 | buffer.writeByte(length) 189 | } else { 190 | buffer.writeByte(111) 191 | buffer.writeInt(length) 192 | } 193 | buffer.writeByte(sign) 194 | for (i <- (1 to length)) { 195 | buffer.writeByte(bytes(length - i)) 196 | } 197 | } 198 | 199 | def writeList(buffer : ChannelBuffer, list : List[Any], tail : Any) { 200 | buffer.writeByte(108) 201 | buffer.writeInt(list.size) 202 | for (element <- list) { 203 | encodeObject(buffer, element) 204 | } 205 | encodeObject(buffer, tail) 206 | } 207 | 208 | def writeJList(buffer : ChannelBuffer, list : JList[Any], tail : Any) { 209 | buffer.writeByte(108) 210 | buffer.writeInt(list.size) 211 | val i = list.iterator() 212 | while(i.hasNext) { 213 | encodeObject(buffer, i.next()) 214 | } 215 | encodeObject(buffer, tail) 216 | } 217 | 218 | def writeAtom(buffer : ChannelBuffer, s : Symbol) { 219 | val bytes = s.name.getBytes 220 | if (bytes.length < 256) { 221 | buffer.writeByte(115) 222 | buffer.writeByte(bytes.length) 223 | } else { 224 | buffer.writeByte(100) 225 | buffer.writeShort(bytes.length) 226 | } 227 | buffer.writeBytes(bytes) 228 | } 229 | 230 | def writeSmallInteger(buffer : ChannelBuffer, i : Int) { 231 | buffer.writeByte(97) 232 | buffer.writeByte(i) 233 | } 234 | 235 | def writeInteger(buffer : ChannelBuffer, i : Int) { 236 | buffer.writeByte(98) 237 | buffer.writeInt(i) 238 | } 239 | 240 | def writeFloat(buffer : ChannelBuffer, d : Double) { 241 | if (d.isNaN) { 242 | writeAtom(buffer, 'nan) 243 | } else if (d.isPosInfinity) { 244 | writeAtom(buffer, 'infinity) 245 | } else if (d.isNegInfinity) { 246 | writeAtom(buffer, Symbol("-infinity")) 247 | } else { 248 | buffer.writeByte(70) 249 | buffer.writeLong(java.lang.Double.doubleToLongBits(d)) 250 | } 251 | } 252 | 253 | def writeStringFloat(buffer : ChannelBuffer, d : Double) { 254 | val formatter = new Formatter 255 | formatter.format("%.20e", d.asInstanceOf[AnyRef]) 256 | val str = formatter.toString.getBytes 257 | buffer.writeByte(99) 258 | buffer.writeBytes(str) 259 | } 260 | 261 | def writeProduct(buffer : ChannelBuffer, p : Product) { 262 | val name = p.productPrefix 263 | if (name.startsWith("Tuple")) { 264 | writeTuple(buffer, None, p) 265 | } else { 266 | writeTuple(buffer, Some(name.camelToUnderscore), p) 267 | } 268 | } 269 | 270 | def writeTuple(buffer : ChannelBuffer, tag : Option[String], p : Product) { 271 | val length = tag.size + p.productArity 272 | buffer.writeByte(104) 273 | buffer.writeByte(length) 274 | for (t <- tag) { 275 | writeAtom(buffer, Symbol(t)) 276 | } 277 | for (element <- p.productIterator) { 278 | encodeObject(buffer, element) 279 | } 280 | } 281 | 282 | def writeBigTuple(buffer : ChannelBuffer, tuple : BigTuple) { 283 | val length = tuple.productArity 284 | if (length < 255) { 285 | buffer.writeByte(104) 286 | buffer.writeByte(length) 287 | } else { 288 | buffer.writeByte(105) 289 | buffer.writeInt(length) 290 | } 291 | for (element <- tuple.productIterator) { 292 | encodeObject(buffer, element) 293 | } 294 | } 295 | } 296 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/SendListenable.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | 20 | trait SendListenable { 21 | @volatile var sendListeners : List[SendListener] = Nil 22 | 23 | def addSendListener(listener : SendListener) { 24 | sendListeners = listener :: sendListeners 25 | } 26 | 27 | def notifySend(pid : Pid, msg : Any) : Any = { 28 | for (l <- sendListeners) { 29 | l.handleSend(pid, msg) 30 | } 31 | } 32 | 33 | def notifySend(name : Symbol, msg : Any) : Any = { 34 | for (l <- sendListeners) { 35 | l.handleSend(name, msg) 36 | } 37 | } 38 | 39 | def notifySend(dest : (Symbol,Symbol), from : Pid, msg : Any) : Any = { 40 | for (l <- sendListeners) { 41 | l.handleSend(dest, from, msg) 42 | } 43 | } 44 | 45 | } 46 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/SendListener.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | 20 | trait SendListener { 21 | def handleSend(to : Pid, msg : Any) 22 | def handleSend(to : Symbol, msg : Any) 23 | def handleSend(to : (Symbol,Symbol), from : Pid, msg : Any) 24 | } 25 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ServerHandshakeHandler.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2011, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import java.net._ 19 | import java.util.concurrent._ 20 | import atomic._ 21 | import org.jboss.{netty => netty} 22 | import netty.bootstrap._ 23 | import netty.channel._ 24 | import netty.handler.codec.frame._ 25 | import scalang._ 26 | import util._ 27 | import java.util.ArrayDeque 28 | import scala.annotation._ 29 | import scala.math._ 30 | import scala.collection.JavaConversions._ 31 | import java.security.{SecureRandom,MessageDigest} 32 | 33 | class ServerHandshakeHandler(name : Symbol, cookie : String, posthandshake : (Symbol,ChannelPipeline) => Unit) extends HandshakeHandler(posthandshake) { 34 | states( 35 | state('disconnected, { 36 | case ConnectedMessage => 'connected 37 | }), 38 | 39 | state('connected, { 40 | case msg : NameMessage => 41 | receiveName(msg) 42 | sendStatus 43 | sendChallenge 44 | 'challenge_sent 45 | }), 46 | 47 | state('challenge_sent, { 48 | case msg : ChallengeReplyMessage => 49 | verifyChallenge(msg) 50 | sendChallengeAck(msg) 51 | drainQueue 52 | handshakeSucceeded 53 | 'verified 54 | }), 55 | 56 | state('verified, { case _ => 'verified})) 57 | 58 | //state machine callbacks 59 | protected def receiveName(msg : NameMessage) { 60 | peer = Symbol(msg.name) 61 | } 62 | 63 | protected def sendStatus { 64 | val channel = ctx.getChannel 65 | val future = Channels.future(channel) 66 | ctx.sendDownstream(new DownstreamMessageEvent(channel,future,StatusMessage("ok"),null)) 67 | } 68 | 69 | protected def sendChallenge { 70 | val channel = ctx.getChannel 71 | val future = Channels.future(channel) 72 | challenge = random.nextInt 73 | val msg = ChallengeMessage(5, DistributionFlags.default, challenge, name.name) 74 | ctx.sendDownstream(new DownstreamMessageEvent(channel,future,msg,null)) 75 | } 76 | 77 | protected def verifyChallenge(msg : ChallengeReplyMessage) { 78 | val ourDigest = digest(challenge, cookie) 79 | if (!digestEquals(ourDigest, msg.digest)) { 80 | throw new ErlangAuthException("Peer authentication error.") 81 | } 82 | } 83 | 84 | protected def sendChallengeAck(msg : ChallengeReplyMessage) { 85 | val channel = ctx.getChannel 86 | val future = Channels.future(channel) 87 | val md5 = digest(msg.challenge, cookie) 88 | val msgOut = ChallengeAckMessage(md5) 89 | ctx.sendDownstream(new DownstreamMessageEvent(channel,future,msgOut,null)) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/main/scala/scalang/node/ServiceLauncher.scala: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2012, Boundary 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | package scalang.node 17 | 18 | import scalang._ 19 | import org.cliffc.high_scale_lib.NonBlockingHashSet 20 | import org.cliffc.high_scale_lib.NonBlockingHashMap 21 | import scala.collection.JavaConversions._ 22 | 23 | class ServiceLauncher[A <: Product, T <: Process](clazz : Class[T], ctx : ServiceContext[A]) extends ProcessHolder(ctx) { 24 | val referenceCounter = ctx.referenceCounter 25 | var process : Process = null 26 | 27 | def init { 28 | val constructor = clazz.getConstructor(classOf[ServiceContext[_]]) 29 | ctx.adapter = this 30 | process = constructor.newInstance(ctx) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/main/scala/scalang/util/BatchPoolExecutor.scala: -------------------------------------------------------------------------------- 1 | package scalang.util 2 | 3 | import org.jetlang.core._ 4 | import java.util._ 5 | import java.util.concurrent._ 6 | import overlock.threadpool._ 7 | 8 | class BatchPoolExecutor(path : String, 9 | name : String, 10 | coreSize : Int, 11 | maxSize : Int, 12 | keepAlive : Long, 13 | unit : TimeUnit, 14 | queue : BlockingQueue[Runnable], 15 | factory : ThreadFactory) extends 16 | InstrumentedThreadPoolExecutor(path, name, coreSize, maxSize, keepAlive, unit, queue, factory) with 17 | BatchExecutor { 18 | 19 | override def execute(reader : EventReader) { 20 | 21 | // build a job which will run available work sequentially in a separate thread 22 | 23 | val tasks = new ArrayList[Runnable](reader.size()) 24 | var i = 0 25 | while(i < reader.size()) { 26 | tasks.add(reader.get(i)) 27 | i = i + 1 28 | } 29 | 30 | val job = 31 | new Runnable { 32 | def run() { 33 | val ti = tasks.iterator() 34 | while(ti.hasNext) { 35 | ti.next().run() 36 | } 37 | } 38 | } 39 | 40 | execute(job) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/main/scala/scalang/util/ByteArray.scala: -------------------------------------------------------------------------------- 1 | package scalang.util 2 | 3 | object ByteArray { 4 | def apply(values : Int*) : Array[Byte] = { 5 | Array[Byte](values.map(_.toByte) : _*) 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /src/main/scala/scalang/util/CamelToUnder.scala: -------------------------------------------------------------------------------- 1 | package scalang.util 2 | 3 | import java.lang.Character._ 4 | import scala.collection.mutable.StringBuilder 5 | 6 | object CamelToUnder { 7 | implicit def stringWrap(str : String) = new CamelToUnder(str) 8 | } 9 | 10 | class CamelToUnder(str : String) { 11 | def camelToUnderscore : String = { 12 | val b = new StringBuilder 13 | for (char <- str) { 14 | if (isUpperCase(char) && b.size == 0) { 15 | b += toLowerCase(char) 16 | } else if (isUpperCase(char)) { 17 | b ++= "_" 18 | b += toLowerCase(char) 19 | } else { 20 | b += char 21 | } 22 | } 23 | b.toString 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /src/main/scala/scalang/util/StateMachine.scala: -------------------------------------------------------------------------------- 1 | package scalang.util 2 | 3 | /** 4 | * State Machine 5 | * example usage: 6 | * 7 | */ 8 | 9 | trait StateMachine { 10 | 11 | def start : Symbol 12 | val mutex = new Object 13 | protected var stateList : List[State] = null 14 | @volatile protected var currentState = start 15 | 16 | def event(evnt : Any) { 17 | mutex.synchronized { 18 | if (currentState == null) { 19 | currentState = start 20 | } 21 | val state = stateList.find(_.name == currentState).getOrElse(throw new UndefinedStateException("state " + currentState + " is undefined")) 22 | val nextState = state.event(evnt) 23 | stateList.find(_.name == nextState).getOrElse(throw new UndefinedStateException("state " + currentState + " is undefined")) 24 | currentState = nextState 25 | } 26 | } 27 | 28 | protected def states(states : State*) { 29 | stateList = states.toList 30 | } 31 | 32 | protected def state(name : Symbol, transitions : PartialFunction[Any,Symbol]) = State(name, transitions) 33 | 34 | case class State(name : Symbol, transitions : PartialFunction[Any,Symbol]) { 35 | def event(evnt : Any) : Symbol = { 36 | if (!transitions.isDefinedAt(evnt)) { 37 | throw new UnexpectedEventException("State " + name + " does not have a transition for event " + evnt) 38 | } else { 39 | transitions(evnt) 40 | } 41 | } 42 | } 43 | 44 | class UnexpectedEventException(msg : String) extends Exception(msg) 45 | 46 | class UndefinedStateException(msg : String) extends Exception(msg) 47 | } 48 | -------------------------------------------------------------------------------- /src/main/scala/scalang/util/ThreadPoolFactory.scala: -------------------------------------------------------------------------------- 1 | package scalang.util 2 | 3 | import overlock.threadpool._ 4 | import java.util.concurrent._ 5 | import atomic._ 6 | import org.jetlang.core._ 7 | import org.jboss.netty.handler.execution.{MemoryAwareThreadPoolExecutor, OrderedMemoryAwareThreadPoolExecutor} 8 | import org.jboss.netty.util.ObjectSizeEstimator 9 | 10 | object ThreadPoolFactory { 11 | @volatile var factory : ThreadPoolFactory = new DefaultThreadPoolFactory 12 | } 13 | 14 | trait ThreadPoolFactory { 15 | 16 | /** 17 | * This creates threadpool intended for use as the "boss" pool in netty connections. 18 | * The boss pool typically will only ever need one thread. It handles bookkeeping for netty. 19 | */ 20 | def createBossPool : Executor 21 | 22 | /** 23 | * The netty worker pool. These thread pools deal with the actual connection handling threads 24 | * in netty. If you want to tightly cap the number of threads that netty uses, only return a 25 | * single instance from this method. 26 | */ 27 | def createWorkerPool : Executor 28 | 29 | def createExecutorPool : Executor 30 | 31 | /** 32 | * The jetlang actor pool. This thread pool will be responsible for executing any actors 33 | * that are launched with an unthreaded batch executor. 34 | */ 35 | def createActorPool : Executor 36 | 37 | /** 38 | * Batch exector for pool backed actors. If this returns the default unthreaded executor then 39 | * only one thread will be active in an actor's onMessage method at a time. If this returns a 40 | * a threadpool backed batch executor then multiple threads will be active in a single actor. 41 | */ 42 | def createBatchExecutor(name : String, reentrant : Boolean) : BatchExecutor 43 | 44 | def createBatchExecutor(reentrant : Boolean) : BatchExecutor 45 | } 46 | 47 | object StupidObjectSizeEstimator extends ObjectSizeEstimator { 48 | def estimateSize(o : Any) = 1 49 | } 50 | 51 | class DefaultThreadPoolFactory extends ThreadPoolFactory { 52 | val cpus = Runtime.getRuntime.availableProcessors 53 | val max_threads = if ((2 * cpus) < 8) 8 else 2*cpus 54 | val max_memory = Runtime.getRuntime.maxMemory / 2 55 | //100 mb 56 | /*lazy val bossPool = new OrderedMemoryAwareThreadPoolExecutor(max_threads, 104857600, 104857600, 1000, TimeUnit.SECONDS, new NamedThreadFactory("boss")) 57 | //500 mb 58 | lazy val workerPool = new OrderedMemoryAwareThreadPoolExecutor(max_threads, 524288000, 524288000, 1000, TimeUnit.SECONDS, new NamedThreadFactory("worker")) 59 | lazy val actorPool = new MemoryAwareThreadPoolExecutor(max_threads, 100, 100, 1000, TimeUnit.SECONDS, StupidObjectSizeEstimator, new NamedThreadFactory("actor")) 60 | **/ 61 | lazy val bossPool = ThreadPool.instrumentedFixed("scalang", "boss", max_threads) 62 | lazy val workerPool = ThreadPool.instrumentedFixed("scalang", "worker", max_threads) 63 | lazy val executorPool = new OrderedMemoryAwareThreadPoolExecutor(max_threads, max_memory, max_memory, 1000, TimeUnit.SECONDS, new NamedThreadFactory("executor")) 64 | lazy val actorPool = ThreadPool.instrumentedFixed("scalang", "actor", max_threads) 65 | lazy val batchExecutor = new BatchExecutorImpl 66 | 67 | val poolNameCounter = new AtomicInteger(0) 68 | 69 | def createBossPool : Executor = { 70 | bossPool 71 | } 72 | 73 | def createWorkerPool : Executor = { 74 | workerPool 75 | } 76 | 77 | def createExecutorPool : Executor = { 78 | executorPool 79 | } 80 | 81 | def createActorPool : Executor = { 82 | actorPool 83 | } 84 | 85 | def createBatchExecutor(name : String, reentrant : Boolean) : BatchExecutor = { 86 | if (reentrant) { 87 | val queue = new LinkedBlockingQueue[Runnable] 88 | val pool = new BatchPoolExecutor("scalang", name, max_threads, max_threads, 60l, TimeUnit.SECONDS, queue, new NamedThreadFactory(name)) 89 | pool 90 | } else { 91 | batchExecutor 92 | } 93 | } 94 | 95 | def createBatchExecutor(reentrant : Boolean) : BatchExecutor = { 96 | createBatchExecutor("pool-" + poolNameCounter.getAndIncrement, reentrant) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/main/scala/scalang/util/UnderToCamel.scala: -------------------------------------------------------------------------------- 1 | package scalang.util 2 | 3 | import java.lang.Character._ 4 | import scala.collection.mutable.StringBuilder 5 | 6 | object UnderToCamel { 7 | implicit def underStringWrap(str : String) = new UnderToCamel(str) 8 | } 9 | 10 | class UnderToCamel(str : String) { 11 | def underToCamel : String = { 12 | val b = new StringBuilder 13 | var nextUpper = true 14 | for (char <- str) { 15 | if (char == '_') { 16 | nextUpper = true 17 | } else if (nextUpper) { 18 | b += toUpperCase(char) 19 | nextUpper = false 20 | } else { 21 | b += char 22 | } 23 | } 24 | b.toString 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/test/resources/echo.escript: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env escript 2 | %%! -smp enable -sname test@localhost -setcookie test 3 | 4 | main([]) -> 5 | register(echo, self()), 6 | io:format("ok~n"), 7 | receive 8 | {Pid, Msg} -> Pid ! Msg 9 | end. 10 | -------------------------------------------------------------------------------- /src/test/resources/link_delivery.escript: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env escript 2 | %%! -smp enable -sname test@localhost -setcookie test 3 | 4 | main([]) -> 5 | process_flag(trap_exit, true), 6 | Pid = spawn_link(fun() -> 7 | process_flag(trap_exit, true), 8 | {mbox,scala@localhost} ! self(), 9 | receive 10 | {'EXIT', _From, Reason} -> {scala, scala@localhost} ! Reason; 11 | M -> exit(M) 12 | end 13 | end), 14 | receive 15 | {'EXIT', _, _} -> 16 | halt(), 17 | receive after infinity -> 0 end 18 | end. 19 | 20 | -------------------------------------------------------------------------------- /src/test/resources/monitor.escript: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env escript 2 | %%! -smp enable -sname test@localhost -setcookie test 3 | 4 | main([]) -> 5 | {mbox,scala@localhost} ! self(), 6 | loop(). 7 | 8 | loop() -> 9 | receive 10 | {monitor, Pid} -> 11 | Ref = monitor(process, Pid), 12 | respond(Ref), 13 | loop(); 14 | {demonitor, Ref} -> 15 | demonitor(Ref), 16 | respond({demonitor, Ref}), 17 | loop(); 18 | {'DOWN', _, _, _, Reason} -> 19 | respond({down, Reason}), 20 | loop(); 21 | {exit, Reason} -> 22 | exit({exit, Reason}) 23 | after 2000 -> 24 | respond(timeout) 25 | end. 26 | 27 | respond(Msg) -> 28 | {scala,scala@localhost} ! Msg. 29 | 30 | 31 | -------------------------------------------------------------------------------- /src/test/resources/receive_connection.escript: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env escript 2 | %%! -smp enable -sname test@localhost -setcookie test 3 | 4 | main([]) -> 5 | ok = net_kernel:monitor_nodes(true, [{node_type,all}]), 6 | io:format("ready~n"), 7 | receive 8 | {nodeup, Node, _} -> io:format("~p~n", [Node]) 9 | end, 10 | receive 11 | ok -> ok 12 | end. 13 | -------------------------------------------------------------------------------- /src/test/scala/scalang/EchoProcess.scala: -------------------------------------------------------------------------------- 1 | package scalang 2 | 3 | class EchoProcess(ctx : ProcessContext) extends Process(ctx) { 4 | 5 | override def onMessage(msg : Any) = msg match { 6 | case (from : Pid, echo : Any) => 7 | from ! echo 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /src/test/scala/scalang/FailProcess.scala: -------------------------------------------------------------------------------- 1 | package scalang 2 | 3 | class FailProcess(ctx : ProcessContext) extends Process(ctx) { 4 | 5 | override def onMessage(msg : Any) { 6 | exit(msg) 7 | } 8 | 9 | } 10 | -------------------------------------------------------------------------------- /src/test/scala/scalang/LinkProcess.scala: -------------------------------------------------------------------------------- 1 | package scalang 2 | 3 | class LinkProcess(ctx : ProcessContext) extends Process(ctx) { 4 | 5 | override def onMessage(msg : Any) = msg match { 6 | case (linkTo : Pid, sendTo : Pid) => 7 | link(linkTo) 8 | sendTo ! 'ok 9 | } 10 | 11 | } 12 | -------------------------------------------------------------------------------- /src/test/scala/scalang/MonitorProcess.scala: -------------------------------------------------------------------------------- 1 | package scalang 2 | 3 | class MonitorProcess(ctx : ProcessContext) extends Process(ctx) { 4 | 5 | override def onMessage(msg : Any) = msg match { 6 | case (pid : Pid, sendTo : Pid) => 7 | expectedRef = monitor(pid) 8 | expectedPid = pid 9 | this.sendTo = sendTo 10 | sendTo ! 'ok 11 | case ('exit, msg : Any) => 12 | 'ok 13 | case ('timeout) => 14 | 'ok 15 | } 16 | 17 | override def trapMonitorExit(pid : Any, ref : Reference, reason : Any) { 18 | if (expectedPid == pid && expectedRef == ref) { 19 | sendTo ! 'monitor_exit 20 | } 21 | else 22 | sendTo ! 'mismatch 23 | } 24 | 25 | var sendTo : Pid = null 26 | var expectedPid : Pid = null 27 | var expectedRef : Reference = null 28 | } 29 | -------------------------------------------------------------------------------- /src/test/scala/scalang/NodeSpec.scala: -------------------------------------------------------------------------------- 1 | package scalang 2 | 3 | import org.specs._ 4 | import org.specs.runner._ 5 | import scalang.node._ 6 | import java.lang.{Process => JProc} 7 | import java.io._ 8 | import scala.collection.JavaConversions._ 9 | 10 | class NodeSpec extends SpecificationWithJUnit { 11 | "Node" should { 12 | var epmd : JProc = null 13 | var erl : JProc = null 14 | var node : ErlangNode = null 15 | doBefore { 16 | epmd = EpmdCmd() 17 | } 18 | 19 | doAfter { 20 | epmd.destroy 21 | epmd.waitFor 22 | if (node != null) { node.shutdown } 23 | if (erl != null) { 24 | erl.destroy 25 | erl.waitFor 26 | } 27 | } 28 | 29 | val cookie = "test" 30 | 31 | "get connections from a remote node" in { 32 | node = Node(Symbol("test@localhost"), cookie) 33 | erl = ErlangVM("tmp@localhost", cookie, Some("io:format(\"~p~n\", [net_kernel:connect_node('test@localhost')]).")) 34 | val read = new BufferedReader(new InputStreamReader(erl.getInputStream)) 35 | read.readLine 36 | node.channels.keySet.toSet must contain(Symbol("tmp@localhost")) 37 | } 38 | 39 | "connect to a remote node" in { 40 | node = Node(Symbol("scala@localhost"), cookie) 41 | erl = Escript("receive_connection.escript") 42 | ReadLine(erl) //ready 43 | val pid = node.createPid 44 | node.connectAndSend(Symbol("test@localhost"), None) 45 | val result = ReadLine(erl) 46 | result must ==("scala@localhost") 47 | node.channels.keySet.toSet must contain(Symbol("test@localhost")) 48 | } 49 | 50 | "accept pings" in { 51 | node = Node(Symbol("scala@localhost"), cookie) 52 | erl = ErlangVM("tmp@localhost", cookie, Some("io:format(\"~p~n\", [net_adm:ping('scala@localhost')]).")) 53 | val result = ReadLine(erl) 54 | result must ==("pong") 55 | node.channels.keySet.toSet must contain(Symbol("tmp@localhost")) 56 | } 57 | 58 | "send pings" in { 59 | node = Node(Symbol("scala@localhost"), cookie) 60 | erl = Escript("receive_connection.escript") 61 | ReadLine(erl) 62 | node.ping(Symbol("test@localhost"), 1000) must ==(true) 63 | } 64 | 65 | "invalid pings should fail" in { 66 | node = Node(Symbol("scala@localhost"), cookie) 67 | node.ping(Symbol("taco_truck@localhost"), 1000) must ==(false) 68 | } 69 | 70 | "send local regname" in { 71 | node = Node(Symbol("scala@localhost"), cookie) 72 | val echoPid = node.spawn[EchoProcess]('echo) 73 | val mbox = node.spawnMbox 74 | node.send('echo, (mbox.self, 'blah)) 75 | mbox.receive must ==('blah) 76 | } 77 | 78 | "send remote regname" in { 79 | node = Node(Symbol("scala@localhost"), cookie) 80 | erl = Escript("echo.escript") 81 | ReadLine(erl) 82 | val mbox = node.spawnMbox 83 | node.send(('echo, Symbol("test@localhost")), mbox.self, (mbox.self, 'blah)) 84 | mbox.receive must ==('blah) 85 | } 86 | 87 | "receive remove regname" in { 88 | node = Node(Symbol("scala@localhost"), cookie) 89 | erl = Escript("echo.escript") 90 | ReadLine(erl) 91 | val mbox = node.spawnMbox("mbox") 92 | node.send(('echo, Symbol("test@localhost")), mbox.self, (('mbox, Symbol("scala@localhost")), 'blah)) 93 | mbox.receive must ==('blah) 94 | } 95 | 96 | "remove processes on exit" in { 97 | node = Node(Symbol("scala@localhost"), cookie) 98 | val pid = node.spawn[FailProcess] 99 | node.processes.get(pid) must beLike { case f : ProcessLauncher[_] => true } 100 | node.handleSend(pid, 'bah) 101 | Thread.sleep(100) 102 | Option(node.processes.get(pid)) must beNone 103 | } 104 | 105 | "deliver local breakages" in { 106 | node = Node(Symbol("scala@localhost"), cookie) 107 | val linkProc = node.spawn[LinkProcess] 108 | val failProc = node.spawn[FailProcess] 109 | val mbox = node.spawnMbox 110 | node.send(linkProc, (failProc, mbox.self)) 111 | Thread.sleep(100) 112 | mbox.receive must ==('ok) 113 | node.send(failProc, 'fail) 114 | Thread.sleep(100) 115 | node.isAlive(failProc) must ==(false) 116 | node.isAlive(linkProc) must ==(false) 117 | } 118 | 119 | "deliver remote breakages" in { 120 | node = Node(Symbol("scala@localhost"), cookie) 121 | val mbox = node.spawnMbox('mbox) 122 | val scala = node.spawnMbox('scala) 123 | erl = Escript("link_delivery.escript") 124 | val remotePid = mbox.receive.asInstanceOf[Pid] 125 | mbox.link(remotePid) 126 | mbox.exit('blah) 127 | scala.receive must ==('blah) 128 | } 129 | 130 | "deliver local breakages" in { 131 | node = Node(Symbol("scala@localhost"), cookie) 132 | val mbox = node.spawnMbox('mbox) 133 | erl = Escript("link_delivery.escript") 134 | val remotePid = mbox.receive.asInstanceOf[Pid] 135 | mbox.link(remotePid) 136 | node.send(remotePid, 'blah) 137 | Thread.sleep(200) 138 | node.isAlive(mbox.self) must ==(false) 139 | } 140 | 141 | "deliver breaks on channel disconnect" in { 142 | println("discon") 143 | node = Node(Symbol("scala@localhost"), cookie) 144 | val mbox = node.spawnMbox('mbox) 145 | erl = Escript("link_delivery.escript") 146 | val remotePid = mbox.receive.asInstanceOf[Pid] 147 | mbox.link(remotePid) 148 | erl.destroy 149 | erl.waitFor 150 | Thread.sleep(100) 151 | node.isAlive(mbox.self) must ==(false) 152 | } 153 | 154 | "deliver local monitor exits" in { 155 | node = Node(Symbol("scala@localhost"), cookie) 156 | val monitorProc = node.spawn[MonitorProcess] 157 | val failProc = node.spawn[FailProcess] 158 | val mbox = node.spawnMbox 159 | node.send(monitorProc, (failProc, mbox.self)) 160 | Thread.sleep(100) 161 | mbox.receive must ==('ok) 162 | node.send(failProc, 'fail) 163 | Thread.sleep(100) 164 | mbox.receive must ==('monitor_exit) 165 | node.isAlive(failProc) must ==(false) 166 | node.isAlive(monitorProc) must ==(true) 167 | } 168 | 169 | "deliver remote monitor exits" in { 170 | node = Node(Symbol("scala@localhost"), cookie) 171 | val mbox = node.spawnMbox('mbox) 172 | val scala = node.spawnMbox('scala) 173 | erl = Escript("monitor.escript") 174 | val remotePid = mbox.receive.asInstanceOf[Pid] 175 | 176 | // tell remote node to monitor our mbox. 177 | node.send(remotePid, ('monitor, mbox.self)) 178 | val remoteRef = scala.receive.asInstanceOf[Reference] 179 | 180 | // kill our mbox and await notification from remote node. 181 | mbox.exit('blah) 182 | scala.receive must ==(('down, 'blah)) 183 | } 184 | 185 | "don't deliver remote monitor exit after demonitor" in { 186 | node = Node(Symbol("scala@localhost"), cookie) 187 | val mbox = node.spawnMbox('mbox) 188 | val scala = node.spawnMbox('scala) 189 | erl = Escript("monitor.escript") 190 | val remotePid = mbox.receive.asInstanceOf[Pid] 191 | 192 | // tell remote node to monitor our mbox. 193 | node.send(remotePid, ('monitor, mbox.self)) 194 | val remoteRef = scala.receive.asInstanceOf[Reference] 195 | 196 | // tell remote node to stop monitoring our mbox. 197 | node.send(remotePid, ('demonitor, remoteRef)) 198 | scala.receive must ==(('demonitor, remoteRef)) 199 | 200 | // kill our mbox and expect no notification from remote node. 201 | mbox.exit('blah) 202 | scala.receive(100) must ==(None) 203 | } 204 | 205 | "receive remote monitor exits" in { 206 | node = Node(Symbol("scala@localhost"), cookie) 207 | val monitorProc = node.spawn[MonitorProcess] 208 | val mbox = node.spawnMbox('mbox) 209 | val scala = node.spawn[MonitorProcess]('scala) 210 | erl = Escript("monitor.escript") 211 | val remotePid = mbox.receive.asInstanceOf[Pid] 212 | 213 | node.send(monitorProc, (remotePid, mbox.self)) 214 | Thread.sleep(100) 215 | mbox.receive must ==('ok) 216 | node.send(monitorProc, ('exit, 'blah)) 217 | Thread.sleep(100) 218 | mbox.receive must ==('monitor_exit) 219 | node.isAlive(monitorProc) must ==(true) 220 | } 221 | 222 | "deliver local monitor exit for unregistered process" in { 223 | node = Node(Symbol("scala@localhost"), cookie) 224 | val mbox = node.spawnMbox 225 | val ref = mbox.monitor('foo) 226 | Thread.sleep(100) 227 | mbox.receive must ==('DOWN, ref, 'process, 'foo, 'noproc) 228 | } 229 | 230 | "deliver remote monitor exit for unregistered process" in { 231 | node = Node(Symbol("scala@localhost"), cookie) 232 | val mbox = node.spawnMbox('mbox) 233 | val scala = node.spawnMbox('scala) 234 | erl = Escript("monitor.escript") 235 | val remotePid = mbox.receive.asInstanceOf[Pid] 236 | node.send(remotePid, ('monitor, 'foo)) 237 | val remoteRef = scala.receive.asInstanceOf[Reference] 238 | scala.receive must ==(('down, 'noproc)) 239 | } 240 | 241 | } 242 | } 243 | -------------------------------------------------------------------------------- /src/test/scala/scalang/ServiceSpec.scala: -------------------------------------------------------------------------------- 1 | package scalang 2 | 3 | import org.specs._ 4 | import java.lang.{Process => JProc} 5 | 6 | class ServiceSpec extends SpecificationWithJUnit { 7 | "Service" should { 8 | val cookie = "test" 9 | var epmd : JProc = null 10 | var node : ErlangNode = null 11 | doBefore { 12 | epmd = EpmdCmd() 13 | } 14 | 15 | doAfter { 16 | epmd.destroy 17 | epmd.waitFor 18 | node.shutdown 19 | } 20 | 21 | "deliver casts" in { 22 | node = Node(Symbol("test@localhost"), cookie) 23 | val service = node.spawnService[CastNoopService,NoArgs](NoArgs) 24 | node.send(service, (Symbol("$gen_cast"),'blah)) 25 | node.isAlive(service) must ==(true) 26 | } 27 | 28 | "deliver calls" in { 29 | node = Node(Symbol("test@localhost"), cookie) 30 | val service = node.spawnService[CallEchoService,NoArgs](NoArgs) 31 | val mbox = node.spawnMbox 32 | val ref = node.makeRef 33 | node.send(service, (Symbol("$gen_call"), (mbox.self, ref), 'blah)) 34 | mbox.receive must ==((ref,'blah)) 35 | } 36 | 37 | "respond to pings" in { 38 | node = Node(Symbol("test@localhost"), cookie) 39 | val service = node.spawnService[CastNoopService,NoArgs](NoArgs) 40 | val mbox = node.spawnMbox 41 | val ref = node.makeRef 42 | node.send(service, ('ping, mbox.self, ref)) 43 | mbox.receive must ==(('pong, ref)) 44 | } 45 | 46 | "call and response" in { 47 | node = Node(Symbol("test@localhost"), cookie) 48 | val service = node.spawnService[CallAndReceiveService,NoArgs](NoArgs) 49 | val mbox = node.spawnMbox 50 | node.send(service, mbox.self) 51 | val (Symbol("$gen_call"), (_, ref : Reference), req) = mbox.receive 52 | req must ==("blah") 53 | node.send(service, (ref, "barf")) 54 | mbox.receive must ==("barf") 55 | } 56 | 57 | "trap exits" in { 58 | node = Node(Symbol("test@localhost"), cookie) 59 | val service = node.spawnService[TrapExitService,NoArgs](NoArgs) 60 | val mbox = node.spawnMbox 61 | mbox.link(service) 62 | mbox.exit('terminate) 63 | Thread.sleep(1000) 64 | node.isAlive(service) must ==(true) 65 | } 66 | } 67 | } 68 | 69 | class TrapExitService(ctx : ServiceContext[NoArgs]) extends Service(ctx) { 70 | 71 | override def trapExit(from : Pid, reason : Any) { 72 | println("herp " + reason) 73 | } 74 | 75 | } 76 | 77 | class CallAndReceiveService(ctx : ServiceContext[NoArgs]) extends Service(ctx) { 78 | 79 | override def handleCast(msg : Any) { 80 | throw new Exception 81 | } 82 | 83 | override def handleCall(tag : (Pid, Reference), msg : Any) : Any = { 84 | throw new Exception 85 | } 86 | 87 | override def handleInfo(msg : Any) { 88 | val pid = msg.asInstanceOf[Pid] 89 | val response = call(pid, "blah") 90 | pid ! response 91 | } 92 | } 93 | 94 | class CastNoopService(ctx : ServiceContext[NoArgs]) extends Service(ctx) { 95 | override def handleCast(msg : Any) { 96 | println("cast received " + msg) 97 | } 98 | 99 | override def handleCall(tag : (Pid, Reference), msg : Any) : Any = { 100 | throw new Exception 101 | } 102 | 103 | override def handleInfo(msg : Any) { 104 | throw new Exception 105 | } 106 | } 107 | 108 | class CallEchoService(ctx : ServiceContext[NoArgs]) extends Service(ctx) { 109 | 110 | override def handleCast(msg : Any) { 111 | throw new Exception 112 | } 113 | 114 | override def handleCall(tag : (Pid, Reference), msg : Any) : Any = { 115 | msg 116 | } 117 | 118 | override def handleInfo(msg : Any) { 119 | throw new Exception 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /src/test/scala/scalang/TestHelper.scala: -------------------------------------------------------------------------------- 1 | package scalang 2 | 3 | import java.lang.ProcessBuilder 4 | import java.lang.{Process => SysProcess} 5 | import java.io._ 6 | import scala.collection.JavaConversions._ 7 | import scala.collection.mutable.StringBuilder 8 | 9 | object ErlangVM { 10 | def apply(name : String, cookie : String, eval : Option[String]) : SysProcess = { 11 | val commands = List("erl", "-sname", name, "-setcookie", cookie, "-noshell", "-smp") ++ 12 | (for (ev <- eval) yield { 13 | List("-eval", ev) 14 | }).getOrElse(Nil) 15 | val builder = new ProcessBuilder(commands) 16 | builder.start 17 | } 18 | } 19 | 20 | object Escript { 21 | def apply(command : String, args : String*) : SysProcess = { 22 | val url = getClass.getClassLoader.getResource(command) 23 | val file = new File(url.getFile) 24 | file.setExecutable(true) 25 | val builder = new ProcessBuilder(List(url.getFile) ++ args.toList) 26 | builder.start 27 | } 28 | } 29 | 30 | object EpmdCmd { 31 | def apply() : SysProcess = { 32 | val builder = new ProcessBuilder("epmd") 33 | builder.start 34 | } 35 | } 36 | 37 | object ReadLine { 38 | def apply(proc : SysProcess) : String = { 39 | val read = new BufferedReader(new InputStreamReader(proc.getInputStream)) 40 | val line = read.readLine 41 | if(line == null) { 42 | throw new RuntimeException("error getting result from escript. ensure that erlang is installed and available on the path.") 43 | } 44 | line 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/test/scala/scalang/epmd/EpmdDecoderSpec.scala: -------------------------------------------------------------------------------- 1 | package scalang.epmd 2 | 3 | import org.specs._ 4 | import scalang.util._ 5 | import org.jboss.{netty => netty} 6 | import netty.handler.codec.embedder._ 7 | import netty.buffer.ChannelBuffers._ 8 | import EpmdConst._ 9 | 10 | class EpmdDecoderSpec extends SpecificationWithJUnit { 11 | "EpmdDecoder" should { 12 | "decode alive responses" in { 13 | val embedder = new DecoderEmbedder[AliveResp](new EpmdDecoder) 14 | val buffer = copiedBuffer(ByteArray(121, 0, 0, 20)) 15 | embedder.offer(buffer) 16 | val resp = embedder.poll 17 | resp must ==(AliveResp(0,20)) 18 | } 19 | 20 | "decode port please response" in { 21 | val embedder = new DecoderEmbedder[PortPleaseResp](new EpmdDecoder) 22 | val buffer = copiedBuffer(ByteArray(119, 0, 20, 140, ntypeR6, 0, 0, 5, 0, 5, 0, 4, 102, 117, 99, 107, 0, 0)) 23 | embedder.offer(buffer) 24 | val resp = embedder.poll 25 | resp must ==(PortPleaseResp(5260, "fuck")) 26 | } 27 | 28 | "decode port please error" in { 29 | val embedder = new DecoderEmbedder[PortPleaseError](new EpmdDecoder) 30 | val buffer = copiedBuffer(ByteArray(119, 1)) 31 | embedder.offer(buffer) 32 | val resp = embedder.poll 33 | resp must ==(PortPleaseError(1)) 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/test/scala/scalang/epmd/EpmdEncoderSpec.scala: -------------------------------------------------------------------------------- 1 | package scalang.epmd 2 | 3 | import org.specs._ 4 | import scalang.util._ 5 | import org.jboss.{netty => netty} 6 | import netty.handler.codec.embedder._ 7 | import netty.buffer._ 8 | 9 | class EpmdEncoderSpec extends SpecificationWithJUnit { 10 | "EpmdEncoder" should { 11 | "encode alive requests" in { 12 | val embedder = new EncoderEmbedder[ChannelBuffer](new EpmdEncoder) 13 | embedder.offer(AliveReq(5430, "fuck")) 14 | val buffer = embedder.poll 15 | val bytes = new Array[Byte](buffer.readableBytes) 16 | buffer.readBytes(bytes) 17 | bytes.deep must ==(ByteArray(0,17,120,21,54,110,0,0,5,0,5,0,4,102,117,99,107,0,0).deep) 18 | } 19 | 20 | "encode a port please request" in { 21 | val embedder = new EncoderEmbedder[ChannelBuffer](new EpmdEncoder) 22 | embedder.offer(PortPleaseReq("fuck")) 23 | val buffer = embedder.poll 24 | val bytes = new Array[Byte](buffer.readableBytes) 25 | buffer.readBytes(bytes) 26 | bytes.deep must ==(ByteArray(0,5,122,102,117,99,107).deep) 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/test/scala/scalang/epmd/EpmdSpec.scala: -------------------------------------------------------------------------------- 1 | package scalang.epmd 2 | 3 | import org.specs._ 4 | import org.specs.runner._ 5 | import java.lang.{Process => SysProcess} 6 | import scalang._ 7 | 8 | class EpmdSpec extends SpecificationWithJUnit { 9 | "Epmd" should { 10 | var proc : SysProcess = null 11 | doBefore { 12 | proc = EpmdCmd() 13 | } 14 | 15 | doAfter { 16 | proc.destroy 17 | proc.waitFor 18 | } 19 | 20 | "publish a port to a running epmd instance" in { 21 | val epmd = Epmd("localhost") 22 | val creation = epmd.alive(5480, "fuck@you.com") 23 | creation must beLike { case Some(v : Int) => true } 24 | epmd.close 25 | } 26 | 27 | "retrieve a port" in { 28 | val epmdPublish = Epmd("localhost") 29 | epmdPublish.alive(5480, "fuck@you.com") 30 | 31 | val epmdQuery = Epmd("localhost") 32 | val portPlease = epmdQuery.lookupPort("fuck@you.com") 33 | portPlease must beSome(5480) 34 | 35 | epmdPublish.close 36 | epmdQuery.close 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/test/scala/scalang/node/CaseClasses.scala: -------------------------------------------------------------------------------- 1 | package scalang.node 2 | 3 | object CaseClasses 4 | 5 | case class Derp(long : Long, double : Double, gack : String) 6 | case class Foo(balls : String, integer : Int, float : Double) 7 | 8 | -------------------------------------------------------------------------------- /src/test/scala/scalang/node/ClientHandshakeHandlerSpec.scala: -------------------------------------------------------------------------------- 1 | package scalang.node 2 | 3 | import org.specs._ 4 | import org.specs.runner._ 5 | import org.jboss.{netty => netty} 6 | import netty.buffer._ 7 | import netty.channel._ 8 | import java.security.MessageDigest 9 | import netty.handler.codec.embedder.TwoWayCodecEmbedder 10 | 11 | class ClientHandshakeHandlerSpec extends SpecificationWithJUnit { 12 | val cookie = "DRSJLFJLGIYPEAVFYFCY" 13 | val node = Symbol("tmp@moonpolysoft.local") 14 | 15 | "ClientHandshakeHandler" should { 16 | "complete a standard handshake" in { 17 | val handshake = new ClientHandshakeHandler(node, cookie, { (peer : Symbol, p : ChannelPipeline) => 18 | 19 | }) 20 | val embedder = new TwoWayCodecEmbedder[Any](handshake) 21 | val nameMsg = embedder.poll 22 | nameMsg must beLike { case NameMessage(5, _, node) => true } 23 | embedder.upstreamMessage(StatusMessage("ok")) 24 | embedder.upstreamMessage(ChallengeMessage(5, 32765, 15000, "tmp@blah")) 25 | val respMsg = embedder.poll 26 | var challenge = 0 27 | respMsg must beLike { case ChallengeReplyMessage(c, digest) => 28 | challenge = c 29 | val md5 = MessageDigest.getInstance("MD5") 30 | md5.update(cookie.getBytes) 31 | md5.update("15000".getBytes) 32 | digest.deep == md5.digest.deep 33 | } 34 | val md5 = MessageDigest.getInstance("MD5") 35 | md5.update(cookie.getBytes) 36 | md5.update(handshake.mask(challenge).toString.getBytes) 37 | embedder.upstreamMessage(ChallengeAckMessage(md5.digest)) 38 | handshake.isVerified must ==(true) 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/test/scala/scalang/node/FailureDetectionHandlerSpec.scala: -------------------------------------------------------------------------------- 1 | package scalang.node 2 | 3 | import org.specs._ 4 | import org.specs.runner._ 5 | import scalang.util._ 6 | import org.jboss.{netty => netty} 7 | import netty.handler.codec.embedder._ 8 | import netty.buffer._ 9 | import netty.util._ 10 | import netty.handler.timeout._ 11 | import java.util.{Set => JSet} 12 | import java.util.concurrent._ 13 | 14 | class FailureDetectionHandlerSpec extends SpecificationWithJUnit { 15 | class SeqClock(seq : Long*) extends Clock { 16 | val iterator = seq.iterator 17 | 18 | override def currentTimeMillis : Long = { 19 | iterator.next 20 | } 21 | } 22 | 23 | class MockTimer extends Timer { 24 | var t : Timeout = null 25 | val self = this 26 | 27 | def fire { 28 | if (t != null) t.getTask.run(t) 29 | } 30 | 31 | override def newTimeout(task : TimerTask, delay : Long, unit : TimeUnit) : Timeout = { 32 | t = new Timeout { 33 | def cancel { 34 | t = null 35 | } 36 | 37 | def getTask = task 38 | 39 | def getTimer = self 40 | 41 | def isCancelled = false 42 | 43 | def isExpired = false 44 | } 45 | t 46 | } 47 | 48 | override def stop : JSet[Timeout] = { 49 | null 50 | } 51 | } 52 | 53 | "FailureDetectionHandler" should { 54 | "cause an exception after 4 ticks" in { 55 | val timer = new MockTimer 56 | val clock = new SeqClock(1000,2000,3000,4000,5000) 57 | val handler = new FailureDetectionHandler(Symbol("test@localhost.local"), clock, 4, timer) 58 | val embedder = new DecoderEmbedder[Any](handler) 59 | timer.fire 60 | timer.fire 61 | timer.fire 62 | timer.fire must throwA[Exception] 63 | } 64 | 65 | "pass through messages" in { 66 | val timer = new MockTimer 67 | val clock = new SeqClock(1000,2000,3000,4000,5000) 68 | val handler = new FailureDetectionHandler(Symbol("test@localhost.local"), clock, 4, timer) 69 | val embedder = new DecoderEmbedder[Any](handler) 70 | embedder.offer(LinkMessage(null, null)) 71 | embedder.poll must beLike { case LinkMessage(null, null) => true } 72 | } 73 | 74 | "tolerate occasional missed ticks" in { 75 | val timer = new MockTimer 76 | val clock = new SeqClock(1000,2000,3000,4000,5000,6000,7000) 77 | val handler = new FailureDetectionHandler(Symbol("test@localhost.local"), clock, 4, timer) 78 | val embedder = new DecoderEmbedder[Any](handler) 79 | timer.fire 80 | timer.fire 81 | embedder.offer(LinkMessage(null, null)) 82 | embedder.poll must notBeNull 83 | timer.fire 84 | timer.fire 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/test/scala/scalang/node/HandshakeDecoderSpec.scala: -------------------------------------------------------------------------------- 1 | package scalang.node 2 | 3 | import org.specs._ 4 | import org.specs.runner._ 5 | import scalang.util._ 6 | import org.jboss.{netty => netty} 7 | import netty.handler.codec.embedder._ 8 | import netty.buffer.ChannelBuffers._ 9 | 10 | class HandshakeDecoderSpec extends SpecificationWithJUnit { 11 | "HandshakeDecoder" should { 12 | "decode name messages" in { 13 | val decoder = new HandshakeDecoder 14 | val embedder = new DecoderEmbedder[NameMessage](decoder) 15 | 16 | val bytes = ByteArray(110, 0,5, 0,0,127,253, 116,109,112,64,98,108,97,104) 17 | val buffer = copiedBuffer(bytes) 18 | embedder.offer(buffer) 19 | val msg = embedder.poll 20 | msg must ==(NameMessage(5, 32765, "tmp@blah")) 21 | 22 | decoder.mode must ==('challenge) //decoding a name message should trigger a state change 23 | } 24 | 25 | "decode status messages" in { 26 | val decoder = new HandshakeDecoder 27 | val embedder = new DecoderEmbedder[StatusMessage](decoder) 28 | 29 | val bytes = ByteArray(115, 111,107) 30 | val buffer = copiedBuffer(bytes) 31 | embedder.offer(buffer) 32 | val msg = embedder.poll 33 | msg must ==(StatusMessage("ok")) 34 | } 35 | 36 | "decode challenge messages" in { 37 | val decoder = new HandshakeDecoder 38 | val embedder = new DecoderEmbedder[ChallengeMessage](decoder) 39 | decoder.mode = 'challenge 40 | val bytes = ByteArray(110, 0,5, 0,0,127,253, 0,1,56,213, 116,109,112,64,98,108,97,104) 41 | val buffer = copiedBuffer(bytes) 42 | embedder.offer(buffer) 43 | val msg = embedder.poll 44 | msg must ==(ChallengeMessage(5, 32765, 80085, "tmp@blah")) 45 | } 46 | 47 | "decode reply messages" in { 48 | val decoder = new HandshakeDecoder 49 | val embedder = new DecoderEmbedder[ChallengeReplyMessage](decoder) 50 | val bytes = ByteArray(114, 0,1,56,213, 112,111,111,111,111,111,111,111,111,111,111,111,111,111,111,112) 51 | val buffer = copiedBuffer(bytes) 52 | embedder.offer(buffer) 53 | val msg = embedder.poll 54 | msg must beLike { case ChallengeReplyMessage(80085, digest) => 55 | digest.deep == ByteArray(112,111,111,111,111,111,111,111,111,111,111,111,111,111,111,112).deep 56 | } 57 | } 58 | 59 | "decode ack messages" in { 60 | val decoder = new HandshakeDecoder 61 | val embedder = new DecoderEmbedder[ChallengeAckMessage](decoder) 62 | val bytes = ByteArray(97, 112,111,111,111,111,111,111,111,111,111,111,111,111,111,111,112) 63 | val buffer = copiedBuffer(bytes) 64 | embedder.offer(buffer) 65 | val msg = embedder.poll 66 | msg must beLike { case ChallengeAckMessage(digest) => 67 | digest.deep == ByteArray(112,111,111,111,111,111,111,111,111,111,111,111,111,111,111,112).deep 68 | } 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/test/scala/scalang/node/HandshakeEncoderSpec.scala: -------------------------------------------------------------------------------- 1 | package scalang.node 2 | 3 | import org.specs._ 4 | import org.specs.runner._ 5 | import scalang.util._ 6 | import org.jboss.{netty => netty} 7 | import netty.handler.codec.embedder._ 8 | import netty.buffer._ 9 | import netty.buffer.ChannelBuffers._ 10 | 11 | class HandshakeEncoderSpec extends SpecificationWithJUnit { 12 | "HandshakeEncoder" should { 13 | "encode name messages" in { 14 | val encoder = new HandshakeEncoder 15 | val embedder = new EncoderEmbedder[ChannelBuffer](encoder) 16 | embedder.offer(NameMessage(5, 32765, "tmp@blah")) 17 | 18 | val buffer = embedder.poll 19 | val bytes = buffer.array 20 | bytes.deep must ==(ByteArray(0,15, 110, 0,5, 0,0,127,253, 116,109,112,64,98,108,97,104).deep) 21 | } 22 | 23 | "encode status messages" in { 24 | val encoder = new HandshakeEncoder 25 | val embedder = new EncoderEmbedder[ChannelBuffer](encoder) 26 | embedder.offer(StatusMessage("ok")) 27 | 28 | val buffer = embedder.poll 29 | val bytes = buffer.array 30 | bytes.deep must ==(ByteArray(0,3, 115, 111,107).deep) 31 | } 32 | 33 | "encode challenge messages" in { 34 | val encoder = new HandshakeEncoder 35 | val embedder = new EncoderEmbedder[ChannelBuffer](encoder) 36 | embedder.offer(ChallengeMessage(5, 32765, 80085, "tmp@blah")) 37 | 38 | val buffer = embedder.poll 39 | val bytes = buffer.array 40 | bytes.deep must ==(ByteArray(0,19, 110, 0,5, 0,0,127,253, 0,1,56,213, 116,109,112,64,98,108,97,104).deep) 41 | } 42 | 43 | "encode challenge reply messages" in { 44 | val encoder = new HandshakeEncoder 45 | val embedder = new EncoderEmbedder[ChannelBuffer](encoder) 46 | embedder.offer(ChallengeReplyMessage(80085, ByteArray(112,111,111,111,111,111,111,111,111,111,111,111,111,111,111,112))) 47 | 48 | val buffer = embedder.poll 49 | val bytes = buffer.array 50 | bytes.deep must ==(ByteArray(0,21, 114, 0,1,56,213, 112,111,111,111,111,111,111,111,111,111,111,111,111,111,111,112).deep) 51 | } 52 | 53 | "encode ack messages" in { 54 | val encoder = new HandshakeEncoder 55 | val embedder = new EncoderEmbedder[ChannelBuffer](encoder) 56 | embedder.offer(ChallengeAckMessage(ByteArray(112,111,111,111,111,111,111,111,111,111,111,111,111,111,111,112))) 57 | 58 | val buffer = embedder.poll 59 | val bytes = buffer.array 60 | bytes.deep must ==(ByteArray(0,17, 97, 112,111,111,111,111,111,111,111,111,111,111,111,111,111,111,112).deep) 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/test/scala/scalang/node/ScalaTermDecoderSpec.scala: -------------------------------------------------------------------------------- 1 | package scalang.node 2 | 3 | import org.specs.SpecificationWithJUnit 4 | import org.jboss.netty.buffer.ChannelBuffers 5 | import java.nio.charset.Charset 6 | 7 | class ScalaTermDecoderSpec extends SpecificationWithJUnit { 8 | "ScalaTermDecoder" should { 9 | "decode a string" in { 10 | val buffer = ChannelBuffers.copiedBuffer("abc", Charset.forName("utf-8")) 11 | val decoded = ScalaTermDecoder.fastString(buffer, 3) 12 | decoded must ==("abc") 13 | decoded.length must ==(3) 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/test/scala/scalang/node/ServerHandshakeHandlerSpec.scala: -------------------------------------------------------------------------------- 1 | package scalang.node 2 | 3 | import org.specs._ 4 | import org.specs.runner._ 5 | import scalang.util._ 6 | import org.jboss.{netty => netty} 7 | import netty.buffer._ 8 | import netty.channel._ 9 | import ChannelBuffers._ 10 | import java.security.MessageDigest 11 | import netty.handler.codec.embedder.TwoWayCodecEmbedder 12 | 13 | class ServerHandshakeHandlerSpec extends SpecificationWithJUnit { 14 | val cookie = "DRSJLFJLGIYPEAVFYFCY" 15 | 16 | "ServerHandshakeHandler" should { 17 | "complete a standard handshake" in { 18 | val handshake = new ServerHandshakeHandler(Symbol("tmp@blah"), cookie, { (peer : Symbol, p : ChannelPipeline) => 19 | 20 | }) 21 | val embedder = new TwoWayCodecEmbedder[Any](handshake) 22 | embedder.upstreamMessage(NameMessage(5, 32765, "tmp@moonpolysoft.local")) 23 | val status = embedder.poll 24 | status must ==(StatusMessage("ok")) 25 | var challenge = 0 26 | val challengeMsg = embedder.poll 27 | challengeMsg must beLike { case ChallengeMessage(5, _, c : Int, _) => 28 | challenge = c 29 | true } 30 | val md5 = MessageDigest.getInstance("MD5") 31 | md5.update(cookie.getBytes) 32 | md5.update(handshake.mask(challenge).toString.getBytes) 33 | val digest = md5.digest 34 | //we reuse the same challenge to make the test easier 35 | embedder.upstreamMessage(ChallengeReplyMessage(challenge, digest)) 36 | val ackMsg = embedder.poll 37 | ackMsg must beLike { case ChallengeAckMessage(d) => 38 | d.deep == digest.deep 39 | } 40 | handshake.isVerified must ==(true) 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/test/scala/scalang/terms/ScalaTermDecoderSpec.scala: -------------------------------------------------------------------------------- 1 | package scalang.terms 2 | 3 | import org.specs._ 4 | import scalang.node.{Foo, Derp, CaseClassFactory, ScalaTermDecoder} 5 | import org.jboss.netty._ 6 | import handler.codec.embedder._ 7 | import java.nio._ 8 | import buffer.ChannelBuffers._ 9 | import scalang.util._ 10 | import scalang._ 11 | 12 | class ScalaTermDecoderSpec extends SpecificationWithJUnit { 13 | "ScalaTermDecoder" should { 14 | "decode regular terms" in { 15 | val decoder = new ScalaTermDecoder('test, NoneTypeFactory, NoneTypeDecoder) 16 | 17 | "read small integers" in { 18 | val thing = decoder.readTerm(copiedBuffer(ByteArray(97,2))) 19 | thing must ==(2) 20 | } 21 | 22 | "read 32 bit ints" in { 23 | val thing = decoder.readTerm(copiedBuffer(ByteArray(98,0,0,78,32))) 24 | thing must ==(20000) 25 | } 26 | 27 | "read string floats" in { 28 | val thing = decoder.readTerm(copiedBuffer(ByteArray(99,49,46,49,52,49,53,57,48,48,48,48,48,48,48,48,48,48,49,48,52,54,54,101,43,48,48,0,0,0,0,0))) 29 | thing must ==(1.14159) 30 | } 31 | 32 | "read atoms" in { 33 | val thing = decoder.readTerm(copiedBuffer(ByteArray(100,0,4,98,108,97,104))) 34 | thing must ==('blah) 35 | } 36 | 37 | "read strings" in { 38 | val thing = decoder.readTerm(copiedBuffer(ByteArray(107,0,4,98,108,97,104))) 39 | thing must ==("blah") 40 | } 41 | 42 | "read pids" in { 43 | val thing = decoder.readTerm(copiedBuffer(ByteArray(103,100,0,13,110,111,110,111,100,101,64,110,111,104, 44 | 111,115,116,0,0,0,31,0,0,0,0,0))) 45 | thing must ==(Pid(Symbol("nonode@nohost"), 31,0,0)) 46 | } 47 | 48 | "read small tuples" in { 49 | val thing = decoder.readTerm(copiedBuffer(ByteArray(104,2,97,1,97,2))) 50 | thing must ==((1,2)) 51 | } 52 | 53 | 54 | "read large tuples" in { 55 | val thing = decoder.readTerm(copiedBuffer(ByteArray(104,31,97,0,97,1,97,2,97,3,97,4,97,5,97,6,97,7,97,8, 56 | 97,9,97,10,97,11,97,12,97,13,97,14,97,15,97,16,97,17,97,18,97,19,97,20,97,21,97,22,97,23, 57 | 97,24,97,25,97,26,97,27,97,28,97,29,97,30))) 58 | thing must ==(new BigTuple((0 to 30).toSeq)) 59 | } 60 | 61 | "read nils" in { 62 | val thing = decoder.readTerm(copiedBuffer(ByteArray(106))) 63 | thing must ==(Nil) 64 | } 65 | 66 | "read lists" in { 67 | val thing = decoder.readTerm(copiedBuffer(ByteArray(108,0,0,0,3,100,0,1,97,100,0,1,98,100,0,1,99,106))) 68 | thing must ==(List('a,'b,'c)) 69 | } 70 | 71 | "read improper lists" in { 72 | val thing = decoder.readTerm(copiedBuffer(ByteArray(108,0,0,0,3,100,0,1,97,100,0,1,98,100,0,1,99,100,0, 73 | 1,100))) 74 | thing must ==(ImproperList(List('a,'b,'c), 'd)) 75 | } 76 | 77 | "read binaries" in { 78 | val thing = decoder.readTerm(copiedBuffer(ByteArray(109,0,0,0,4,98,108,97,104))) 79 | thing must ==(ByteBuffer.wrap(ByteArray(98,108,97,104))) 80 | } 81 | 82 | "read longs" in { 83 | val thing = decoder.readTerm(copiedBuffer(ByteArray(110,8,0,0,0,0,0,0,0,0,10))) 84 | thing must ==(720575940379279360L) 85 | } 86 | 87 | "read references" in { 88 | val thing = decoder.readTerm(copiedBuffer(ByteArray(114,0,3,100,0,13,110,111,110,111,100,101,64,110,111,104,111,115,116,0,0,0,0,99,0,0,0,0,0,0,0,0))) 89 | thing must ==(Reference(Symbol("nonode@nohost"), Seq(99,0,0), 0)) 90 | } 91 | 92 | "small atoms" in { 93 | val thing = decoder.readTerm(copiedBuffer(ByteArray(115,1,97))) 94 | thing must ==('a) 95 | } 96 | 97 | "bit binaries" in { 98 | val thing = decoder.readTerm(copiedBuffer(ByteArray(77,0,0,0,1,7,120))) 99 | thing must ==(BitString(ByteBuffer.wrap(ByteArray(120)), 7)) 100 | } 101 | 102 | "read case objects" in { 103 | val dec = new ScalaTermDecoder('test, new CaseClassFactory(Seq("scalang.node"), Map[String,Class[_]]())) 104 | //{foo, "balls", 1245, 60.0} 105 | val foo = dec.readTerm(copiedBuffer(ByteArray(104,4,100,0,3,102,111,111,107,0,5,98,97,108,108,115,98,0,0,4,221,99,54, 106 | 46,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,101,43,48,49, 107 | 0,0,0,0,0))) 108 | foo must haveClass[Foo] 109 | val realFoo = foo.asInstanceOf[Foo] 110 | realFoo.balls must ==("balls") 111 | realFoo.integer must ==(1245) 112 | realFoo.float must ==(60.0) 113 | } 114 | 115 | "read typeMapped objects" in { 116 | val dec = new ScalaTermDecoder('test, new CaseClassFactory(Nil, Map("herp" -> classOf[Derp]))) 117 | //{herp, 6234234234234234234, 1260.0, "gack"} 118 | val derp = dec.readTerm(copiedBuffer(ByteArray(104,4,100,0,4,104,101,114,112,110,8,0,122,101,28,114,1,115,132,86,99,49, 119 | 46,50,54,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,101,43,48,51, 120 | 0,0,0,0,0,107,0,4,103,97,99,107))) 121 | derp must haveClass[Derp] 122 | val realDerp = derp.asInstanceOf[Derp] 123 | realDerp.long must ==(6234234234234234234L) 124 | realDerp.double must ==(1260.0) 125 | realDerp.gack must ==("gack") 126 | } 127 | } 128 | 129 | "decode full distribution packets" in { 130 | new DecoderEmbedder[Any](new ScalaTermDecoder('test, new CaseClassFactory(Nil, Map[String,Class[_]]()))) 131 | } 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/test/scala/scalang/util/TwoWayCodecEmbedder.scala: -------------------------------------------------------------------------------- 1 | package org.jboss.netty.handler.codec.embedder 2 | 3 | import org.jboss.{netty => netty} 4 | import netty.channel._ 5 | import netty.handler.codec.embedder._ 6 | 7 | class TwoWayCodecEmbedder[A](handlers : ChannelHandler*) extends AbstractCodecEmbedder[A](handlers : _*) { 8 | 9 | //derp 10 | def offer(input : Any) : Boolean = { 11 | true 12 | } 13 | 14 | def upstreamMessage(input : Any) { 15 | Channels.fireMessageReceived(getChannel, input) 16 | } 17 | 18 | def downstreamMessage(input : Any) { 19 | Channels.write(getChannel, input).setSuccess 20 | } 21 | } 22 | --------------------------------------------------------------------------------