├── .gitignore ├── .travis.yml ├── LICENSE ├── Makefile ├── README.md ├── RELEASE.md ├── apps ├── coop │ ├── docs │ │ ├── README.md │ │ ├── coopbody.dot │ │ ├── coopbody.pdf │ │ ├── coophead.dot │ │ ├── coophead.pdf │ │ ├── coops_poster.pages │ │ └── coops_poster.pdf │ ├── include │ │ ├── coop.hrl │ │ ├── coop_dag.hrl │ │ ├── coop_head.hrl │ │ └── coop_node.hrl │ └── src │ │ ├── coop.app.src │ │ ├── coop.erl │ │ ├── coop_flow.erl │ │ ├── coop_head.erl │ │ ├── coop_head_ctl_rcv.erl │ │ ├── coop_head_data_rcv.erl │ │ ├── coop_head_root_rcv.erl │ │ ├── coop_kill_link_rcv.erl │ │ ├── coop_node.erl │ │ ├── coop_node_ctl_rcv.erl │ │ ├── coop_node_data_rcv.erl │ │ └── coop_node_util.erl ├── ctest │ ├── coop.coverspec │ ├── coop.spec │ ├── coop │ │ ├── coop_SUITE.erl │ │ ├── coop_head_SUITE.erl │ │ └── coop_node_SUITE.erl │ ├── examples.coverspec │ ├── examples.spec │ ├── examples │ │ └── esp_cache_SUITE.erl │ └── logs │ │ └── README ├── erlangsp │ ├── include │ │ └── license_and_copyright.hrl │ └── src │ │ ├── erlangsp.app.src │ │ ├── erlangsp_app.erl │ │ └── erlangsp_sup.erl └── examples │ └── esp_cache │ ├── include │ └── esp_cache.hrl │ └── src │ ├── esp_cache.app.src │ └── esp_cache.erl ├── future_ideas.txt ├── notes.txt ├── rebar ├── rebar.config └── rel ├── files ├── app.config ├── erl ├── erlangsp ├── erlangsp.cmd ├── nodetool ├── start_erl.cmd └── vm.args └── reltool.config /.gitignore: -------------------------------------------------------------------------------- 1 | apps/ctest/logs 2 | rel/erlangsp 3 | .eunit 4 | *.beam 5 | deps 6 | ebin 7 | *~ 8 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: erlang 2 | 3 | otp_release: 4 | - R15B 5 | 6 | script: "make test" 7 | 8 | notifications: 9 | recipients: 10 | - jay@duomark.com 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012, DuoMark International, Inc. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright 11 | notice, this list of conditions and the following disclaimer in the 12 | documentation and/or other materials provided with the distribution. 13 | 14 | * Neither the name of DuoMark International, Inc. nor the 15 | names of its contributors may be used to endorse or promote products 16 | derived from this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL DUOMARK INTERNATIONAL, INC. BE LIABLE FOR ANY 22 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | REBAR=./rebar 2 | ALL_APPS_DIRS=apps/* 3 | ALL_EXAMPLE_DIRS=apps/examples/* 4 | CT_LOG_DIRS=apps/ctest/logs 5 | 6 | all: deps compile 7 | 8 | deps: deps/erlangsp 9 | 10 | deps/erlangsp: 11 | @${REBAR} get-deps 12 | 13 | compile: 14 | @${REBAR} compile 15 | 16 | dialyze: all 17 | @dialyzer -Wrace_conditions ${ALL_APPS_DIRS}/ebin ${ALL_EXAMPLE_DIRS}/ebin 18 | 19 | gc: crash 20 | @echo 'Removing all emacs backup files' 21 | @find . -name "*~" -exec rm -f {} \; 22 | @find . -name "erl_crash.dump" -exec rm -f {} \; 23 | @echo 'Removing all compile artifacts' 24 | @rm -f ${ALL_APPS_DIRS}/src/*.P 25 | @rm -f ${ALL_APPS_DIRS}/src/*/*.P 26 | @rm -f ${ALL_APPS_DIRS}/src/*.beam 27 | @rm -f ${ALL_APPS_DIRS}/src/*/*.beam 28 | @echo 'Removing all example compile artifacts' 29 | @rm -f ${ALL_EXAMPLE_DIRS}/src/*.P 30 | @rm -f ${ALL_EXAMPLE_DIRS}/src/*/*.P 31 | @rm -f ${ALL_EXAMPLE_DIRS}/src/*.beam 32 | @rm -f ${ALL_EXAMPLE_DIRS}/src/*/*.beam 33 | @echo 'Removing all common_test logs' 34 | @rm -rf ${CT_LOG_DIRS}/*.* 35 | @rm -f ${CT_LOG_DIRS}/variables-ct* 36 | 37 | rel: all 38 | @echo 'Generating erlangsp release' 39 | @(cd rel; .${REBAR} generate) 40 | 41 | clean: gc 42 | @${REBAR} clean 43 | 44 | crash: 45 | @find . -name "erl_crash.dump" -exec rm -f {} \; 46 | 47 | relclean: crash 48 | @rm -rf rel/erlangsp 49 | 50 | realclean: clean relclean 51 | @${REBAR} del-deps 52 | @rm -rf deps/* 53 | 54 | test: all coop_test examples_test 55 | 56 | coop_test: all 57 | @(cd apps/ctest; ct_run -spec coop.spec -pa ../coop/ebin -pa ../../deps/*/ebin) 58 | 59 | examples_test: all 60 | @(cd apps/ctest; ct_run -spec examples.spec -pa ../coop/ebin -pa ../examples/*/ebin -pa ../../deps/*/ebin) 61 | 62 | ct: coop_test 63 | 64 | et: examples_test 65 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Erlang Services Platform (Erlang/SP) 2 | ================================== 3 | 4 | Erlang/SP is a adjunct to Erlang/OTP that is designed to scale to 10Ks of processor cores. OTP allows a designer to architect distributed systems out of single process behaviours to create scaffoldings in the 100s-1000s of process range geared around a single permanent hierarchical application or a set of single hierarchical applications. SP eschews applications in favor of peer services that replicate and fade as needed, constructing solutions on an architecture functionally composed with patterns of Cooperating Processes (Co-ops). The fundamental unit is a Co-op rather than a Pid. 5 | 6 | The ideas behind Erlang/SP are evolving rapidly and constantly in flux. I welcome feedback, but expect any and all interfaces to change without notice. This is an experiment that can run comfortably alongside OTP but revisits the tradeoffs and assumptions underlying the philosphy of OTP. 7 | 8 | The basic components of an Erlang/SP solution are: 9 | 10 | * digraph: An instance of the erlang:digraph module to describe a co-op structure 11 | * co-op: A task-specific (pipeline, round-robin, broadcast) collection of cooperating processes 12 | 13 | Users of the library will primarily deal with Co-op instances. The digraphs are generally hidden inside of Co-ops and used to guide the behavior for replication and visualization / reflection. 14 | 15 | All components offer both Control and Data channels for more efficient management of services. Supervisor behaviour will not resemble the current incarnation of OTP, and allocation of processes will be more of a batch/bulk-oriented operation (probably eventually requiring VM tweaks to enhance process spawning speed). Some tests of parallel spawning will be done soon. 16 | 17 | Goals 18 | ===== 19 | 20 | The ultimate goal of this project is to provide the average erlang programmer a huge boost in productivity and performance by providing easy access to complex concurrent computation models. In addition the following are considered intermediate goals: 21 | 22 | * Enable dataflow algorithms 23 | * Promote graph computation and dynamic data storage/access 24 | * Provide a toolbox with much higher scalability patterns than OTP offers 25 | * Simplify the code to implement common concurrent architectural patterns 26 | * Display algorithms in action through the browser 27 | 28 | Related Work 29 | ============ 30 | 31 | As of April 12, 2012, I discovered the approach I have been designing is a reinvention of Algorithmic Skeletons (Murray Cole, "Algorithmic Skeletons: structured management of parallel computation" MIT Press, Cambdridge, MA, USA, 1989). I only discovered these references after implementing the first proof-of-concept. Workflow patterns as implemented in Business Processing Execution Languages (BPEL) also serve as a model for common concurrency patterns. 32 | 33 | There are also similarities to http://github.com/vladdu/erl-pipes although I expect this library to go far beyond Hartmann-style pipelines. 34 | 35 | On July 13, 2012 I came across http://github.com/bergie/noflo which is a Node.js implementation of Flow-Based Programming. This seems to have many similarities due to the basis on digraphs. They seem to be concentrating on integration with existing flow-based tools and declarative languages like dot (used by graphviz). Erlang/SP intends to be more comfortable to an erlang programmer, and hopefully more graphical with browser-based interactions, however internally adopting a language like dot might be an option. 36 | 37 | Travis CI 38 | ========= 39 | 40 | [![Build Status](https://travis-ci.org/duomark/erlangsp.png?branch=master)](https://travis-ci.org/duomark/erlangsp) 41 | 42 | 43 | [Travis-CI](http://about.travis-ci.org/) provides Continuous Integration. Travis automatically builds and runs the unit tests whenever the code is modified. The status of the current build is shown in the image badge directly above this paragraph. 44 | 45 | Integration with Travis is provided by the [.travis.yml file](https://raw.github.com/duomark/erlangsp/master/.travis.yml). The automated build is run on R15B. 46 | 47 | Included software 48 | ================= 49 | 50 | This project is built with dependencies on other open source software projects. 51 | 52 | The following software is used to build, test or validate the application during development: 53 | 54 | * erlang R14B or later (should work on any version with digraph) 55 | * meck (git://github.com/eproxus/meck.git) 56 | 57 | 58 | Compiling and testing erlangsp 59 | ============================== 60 | 61 | Download and install the source code, then perform the following at the command line: 62 | 63 | ``` 64 | % make realclean all test dialyze rel 65 | % rel/erlangsp/bin/erlangsp console 66 | ``` 67 | 68 | You will now be at a shell prompt with erlangsp, coop and any example projects loaded. Try the following erlang commands to ensure that everything compiled and loaded properly: 69 | 70 | ``` 71 | 1> coop:module_info(). 72 | 2> erlangsp:module_info(). 73 | 3> esp_cache:module_info(). 74 | ``` 75 | 76 | You must write erlang code that uses the erlangsp library for your application to take advantage of the services provided. The best way to do that is to use rebar and name erlangsp as an included application in your .app.src application file. 77 | 78 | Documentation 79 | ============= 80 | 81 | The current documentation is rudimentary because the underlying software is changing quickly. There should be enough information to try out the libraries and examples, but it is not formatted in a nice way. There is a separate 'docs' directory in each of the apps or examples directories written in markdown so that it can be easily browsed from github with a browser: 82 | 83 | 1. [Co-op docs](erlangsp/tree/master/apps/coop/docs/README.md) 84 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | Version Releases 2 | ================ 3 | 4 | 0.1.0 Planned 5 | 6 | * Browser-based visualization of Co-op graphs and data flow 7 | 8 | 0.0.2 Under development 9 | 10 | * coop_node:new/5,6 option 'raw_pid' 11 | 12 | 0.0.1 Released Sept 16, 2012 13 | 14 | * esp_cache Co-op example: Directory, Workers and Cached data 15 | * coop_node:new/5,6 option 'access_coop_head' 16 | * Initial creation of Co-op modules 17 | * Initial creation of Erlang/SP -------------------------------------------------------------------------------- /apps/coop/docs/README.md: -------------------------------------------------------------------------------- 1 | Co-op Library (Cooperating Processes) 2 | ===================================== 3 | 4 | A Co-op is a collection of processes arranged in a Directed Acyclic Graph (using Erlang's digraph module). Nodes of the graph are processes; edges represent messaging from an upstream process to a downstream process. The structure is maintained as a static digraph template when initially specified, a digraph containing live erlang processes at each graph node, and the same erlang processes connected via internal state references to downstream pids such that messages are sent directly to known pids without name lookup in the normal dataflow case. 5 | 6 | ``` 7 | Here is a simple example of a Pipeline Co-op calculating 3*(x+2) - Number_of_Data_Items_Seen: 8 | 9 | -module(example). 10 | -export([plus2/2, times3/2, minus_seen/2]). 11 | 12 | plus2 (Ignored_State, X) -> {Ignored_State, X+2}. 13 | times3(Ignored_State, X) -> {Ignored_State, X*3}. 14 | minus_seen(Num_Seen = _State, X) -> {Num_Seen+1, X-Num_Seen}. 15 | 16 | %% Pipeline_Fns is equivalent to [ {example, plus2}, {example, times3}, {example, minus5} ] 17 | %% Corresponding digraph is: plus2 => times3 => minus_seen => Receiver 18 | 19 | Coop_Head = coop:new_pipeline(Pipeline_Fns, self()), 20 | coop:relay_data(Coop_Head, 4), 21 | receive Any -> Any end. 22 | ``` 23 | 24 | Setting up the constituent pipeline functions involves filling in records with an init function for the initial State (which is consistently ignored in this example) plus setting the function task to execute. The call to coop:new_pipeline/2 creates a graph and a process for each stage of the pipeline, with each one executing the corresponding exported function. The data relayed in (4 in this case) is passed through the first stage (4+2 => 6) and then to the next stage (6*3 => 18) and finally to the last stage (18-0 => 18, while the State is incremented by one so that on the next pass it will be 1) with the result sent as a message to the Receiver which was set to self(), where we can safely fetch it from the erlang message queue. If the calculation were change to be 3*(x-Num_Seen) + 2 we could just rearrange the order of the functions in the pipeline and make a new instance of the Co-op to perform the new calculation. The focus is on representing the computation as the flow of data from function to function, allowing the VM to execute the functions concurrently on different problem instances simultaneously. 25 | 26 | In a pipeline the only choice is to serially pass the data from stage to stage. The current version also allows a fanout rather than a pipeline, in which case the data can be sent to one of the children nodes, or all of the children nodes. 27 | 28 | Co-op Structure 29 | =============== 30 | 31 | A Co-op is a single computation entity which can only accurately compute if all its constituent process components are present and operating normally. If any process fails, the entire Co-op network fails and is removed from memory in the erlang node. This is accomplished via a Kill_Switch process per Co-op which is connected to all processes used within a single Co-op Body (a second Kill_Switch can be maintained for just the Co-op Head so that the loss of a Body does not eliminate the Co-op Head and all pending messages for the Co-op). 32 | 33 | The Co-op Head serves as the publicly visible access point for delivering data to the Co-op (and unfortunately a potetially fatal choke point for certain traffic patterns) but it allows the passing of both control and data to the Co-op via dedicated processes, one for each type of message. Internally, both the data queue and the control queue are delivered to the Root process, which then forwards them to the Co-op Body's Root Co-op Node. Data messages are sent synchronously so that only one at a time may enter the Co-op Body, but control messages are sent asynchronously, so they get delivered more quickly by bypassing all pending data messages. Control messages should be infrequent and data messages should be very nearly 100% of the traffic to a Co-op. It is possible to send "high priority" data messages to a Co-op. Internally, the Data is sent via the control process so it bypasses all pending Data messages. Overuse of this option will flood the Root Pid with many messages, when its Erlang Mailbox should generally have just one Data message and one Control message at any given time. The Control Pid should generally have an empty queue, and the Data Pid may have many messages under load. 34 | 35 | 36 | ``` 37 | +------------+ 38 | | Control |-----------| 39 | +------------+ V 40 | +--------------+ +------------+ 41 | | Root Pid |------>| Root Node |---> 42 | +--------------+ +------------+ 43 | +------------+ ^ 44 | | Data |-----------| 45 | +------------+ 46 | ``` 47 | 48 | 49 | The Co-op Body is where computation takes place. The primary goal of the library is to encourage the use of more processes, but to do so using a principled and structured approach. If the VM is running on a chip with 10K cores, the program should have 50K or more processes to take full advantage of the hardware's capabilitites. The general strategy is to push towards one process per function (although internally the Coop Node will use 6 or more processes to fully represent the computation including realtime tap and trace, GUI reflection and other advanced options). The architect should use functional decomposition with the goals of: 50 | 51 | 1. Isolating subsystems and computational elements from failure 52 | 1. Encouraging overlapped computation (concurrent calculations) as much as possible 53 | 1. Expressing the algorithm as a traceable network of elements 54 | 1. Allowing variable data traffic and adaptive subsystems 55 | 56 | Many problems or transactions should be flowing through the system simultaneously, each one following an algorithmic path that touches on a sequence of processes and functions. Concurrency is not used to speed up a single task, but to allow many similar tasks to follow different solution paths at the same time, and also to encourage algorithmic decomposition for better understanding, tracing and debugging of logic. When performance is an issue, graphs can be transformed and processes can be merged in real time, to produce a more efficient computation at the expense of clarity of intermediate results and traceability of logic. 57 | 58 | Any Coop Node, and quite often the final tier of a graph, can emit results as one or more messages. Coop Nodes may deliver their messages to normal Erlang pids, other Coop Nodes or to Coops via their Coop Heads. A complete system snaps together graphs in a way similar to Lego(tm) bricks. Since Coops are implemented on the assumption that they are directed and acyclic, intermediary processes are normally used to introduce loops in the system. Loops can cause non-termination or just a multiplicative flood of message traffic, so care must be taken to use loops appropriately. There is currently a shortcut that allows access to the Head of the currently executing Coop, so it is prossible to emit messages without an intermediary process but it must be done programmatically rather than declaratively through the graph structure. 59 | 60 | Version 0.0.1 Code Layout 61 | ========================= 62 | 63 | The directory erlangsp/apps/coop/ contains the source and include code for the Co-op library. The coop.app.src specification does not start any application, it just bundles the modules into a library that can be included in another application. The src code is organized into the following modules: 64 | 65 | ``` 66 | coop: Create Co-ops (currently pipeline or fan out/fan in patterns only) 67 | coop_flow: Construct and return digraph instances 68 | coop_head: Interact with the head of a Co-op 69 | coop_head_ctl_rcv_erl: The message receive loop for the Co-op Head Control process 70 | coop_head_data_rcv_erl: The message receive loop for the Co-op Head Data process 71 | coop_head_root_rcv_erl: The message receive loop for the Co-op Head Root process 72 | coop_node: Interact with a body node of a Co-op 73 | coop_node_ctl_rcv_erl: The message receive loop for the Co-op Node Control process 74 | coop_node_data_rcv_erl: The message receive loop for the Co-op Node Data process 75 | coop_node_util: Miscellaneous Co-op Node functions 76 | ``` 77 | 78 | The erlangsp/apps/coop/include directory contains: 79 | 80 | ``` 81 | coop_dag.hrl: Type definitions and records for Co-ops 82 | coop_head.hrl: The internal state record definition for a coop_head instance 83 | coop_node.hrl: The internal state record definition for a coop_node instance 84 | ``` 85 | 86 | Version 0.0.1 Data Structures 87 | ============================= 88 | 89 | A Co-op consists of a Co-op Head and a Co-op Body (which is made up of one or more Co-op Nodes with message channels connected according to the computational graph structure from which it was constructed). Data is delivered to the Co-op Head, it flows to the Root Co-op Body Node (all co-op graphs emanate from a single root node). The Co-op module is a convenient wrapper and the primary external API, but Co-op Head and Co-op Node may need to be contacted directly. 90 | 91 | Co-op API 92 | ========= 93 | 94 | The Co-op external interface mainly allows creating and sending data to a Co-op: 95 | 96 | ``` 97 | new_pipeline(Pipe_Stage_Fns, Receiver) 98 | 99 | Creates a pipeline graph and populates it with one process per function specified 100 | in the Pipe_Stage_Fns (see #coop_dag_node{} below). The Receiver can be a pid(), 101 | a coop_head(), a coop_node(), or 'none'. 102 | 103 | new_fanout(Router_Fn, Worker_Fns, Receiver) 104 | 105 | Creates a fanout graph with an optional fan in to the Receiver and populates it 106 | with one process per function specified in the Worker_Fns (see #coop_dag_node{} 107 | below). The Receiver is the same fomrat as in new_pipeline, while the Router_Fn 108 | is also a #coop_dag_node{}. The Router_Fn determines the distribution method for 109 | the fanout: either round_robin, random or broadcast. 110 | 111 | get_kill_switch(Coop_Head): Gets the Kill_Switch pid for the Co-op Head 112 | relay_data(Receiver, Data): Sends a Data message to pid(), coop_head() or coop_node() 113 | relay_high_priority_data(Coop_Head, Data): Sends a Data message via Coop_Head_Ctl pid 114 | ``` 115 | 116 | coop_dag.hrl defines the records used in a Co-op. The #coop_dag_node{} is used to define a graph node. The generated graph could have a Name for each Node, but must have a Label which contains an executable function reference which will execute inside the process corresponding to the graph node. 117 | 118 | The record structure is as follows: 119 | 120 | ``` 121 | name: The name of the graph node, mainly for documentation purposes. 122 | label: A #coop_node_fn{} to identify the functionality and dataflow of the node. 123 | ``` 124 | 125 | The #coop_node_fn{} structure is: 126 | 127 | ``` 128 | init: {Mod, Fun, Args} for the initial internal state of a Coop_Node, called as Mod:Fun(Args) 129 | task: {Mod, Fun} for the computation performed in a Coop_Node, called as Mod:Fun(State, Data) 130 | flow: round_robin | random | broadcast to identify fanout policy 131 | ``` 132 | 133 | When a Co-op Node is created, it is initialized with a State which is passed to the receive loop. When a data message arrives, the receive loop performs: 134 | 135 | ``` 136 | Task_Module:Task_Function(State, Data) -> {New_State, Result} 137 | ``` 138 | 139 | The receive loop is called with New_State and the result is delivered to one or more downstream receivers based on the dataflow policy: 140 | 141 | ``` 142 | round_robin: the next fanout node when cycling through all listed 143 | random: a random fanout node of all listed 144 | broadcast: all fanout nodes listed 145 | ``` 146 | 147 | In the case of a pipeline, the dataflow policy defaults to broadcast and there is only one downstream node. 148 | 149 | Co-op Head 150 | ========== 151 | 152 | A Co-op Head is a single entity with 2 externally visible processes and 1 internally hidden process (the number of internal processes will increase when tracing, logging and display are enabled). 153 | 154 | ``` 155 | Identifier: {coop_head, Head_Ctl_Pid, Head_Data_Pid} 156 | Head_Ctl_Pid: normal Erlang pid, only messages the Coop Head Root asynchronously 157 | Head_Data_Pid: normal Erlang pid, only messages the Coop Head Root synchronously 158 | Coop Head Root: normal Erlang pid, only messages the Coop Root Node asynchronously 159 | ``` 160 | 161 | The following coop_head exported functions are the primary ones to use: 162 | 163 | ``` 164 | new(Kill_Switch, Coop_Root_Node): create a new Coop Head hooked to Coop_Root_Node 165 | get_kill_switch(Coop_Head): return the pid of the Coop Head Kill_Switch 166 | ``` 167 | 168 | The following coop_head exported functions are used for sys style debugging / tracing: 169 | 170 | ``` 171 | stop(Coop_Head): Terminates the Coop_Head_Root pid, which will kill the Co-op 172 | suspend_root(Coop_Head): Issues a sys:suspend to the Coop_Head_Root pid 173 | resume_root(Coop_Head): Issues a sys:resume to the Coop_Head_Root pid 174 | format_status(Coop_Head): Returns the status of the Coop_Head_Root pid's internal state 175 | ctl_stats(Coop_Head, Flag, From): Issues sys:statistics to be delivered to From 176 | ctl_log(Coop_Head, Flag, From): Issues sys:log to be delivered to From 177 | ctl_log_to_file(Coop_Head, Flag, From): Issues sys:log_to_file to be delivered to From 178 | ``` 179 | 180 | Co-op Node 181 | ========== 182 | 183 | A Co-op Node is a single entity with 2 externally visible processes and multiple internally hidden process. 184 | 185 | ``` 186 | Identifier: {coop_node, Node_Ctl_Pid, Node_Task_Pid} 187 | Node_Ctl_Pid: normal Erlang pid, handles control messages to the Co-op Node 188 | Node_Data_Pid: normal Erlang pid, executes task function on Co-op Node Data arriving 189 | ``` 190 | 191 | Normally (99.99% of the time) there is no need to access a Co-op Node directly. The built-in data delivery and execution mechanisms allow data to flow from the Co-op Head through the relevant nodes of the Co-op Body executing the task function on each data element arriving. 192 | 193 | The following coop_node exported functions are the primary ones to use if you are debugging erlangsp or a specific Co-op Node function that is broken: 194 | 195 | ``` 196 | new(Kill_Switch, Node_Fn, Init_Fn): create a coop_node instance with 'broadcast' dataflow 197 | new(Kill_Switch, Node_Fn, Init_Fn, Data_Flow_Method): create a coop_node instance 198 | ``` 199 | 200 | To diagnose messaging issues, access to the downstream receivers may be useful: 201 | 202 | ``` 203 | node_task_get_downstream_pids(Coop_Node): list of pid(), coop_head(), coop_node() or 'none' 204 | node_task_add_downstream_pids(Coop_Node, Receivers): add to downstream list 205 | ``` 206 | 207 | The following coop_node exported functions are used for sys style debugging / tracing: 208 | 209 | ``` 210 | node_ctl_stop(Coop_Node): Terminate this Coop Node and take down the whole Coop 211 | node_ctl_suspend(Coop_Node): Issue sys:suspend to Node_Task_Pid 212 | node_ctl_resume(Coop_Node): Issue sys:resume to Node_Task_Pid 213 | node_ctl_trace(Coop_Node): Issue sys:trace to Node_Task_Pid (in/out messages) 214 | node_ctl_untrace(Coop_Node): Issue sys:untrace to Node_Task_Pid 215 | node_ctl_stats(Coop_Node, Flag, From): Issue sys:stats to Node_Task_Pid 216 | node_ctl_log(Coop_Node, Flag, From): Issue sys:log to Node_Task_Pid 217 | node_ctl_log_to_file(Coop_Node, Flag, From): Issue sys:log_to_file to Node_Task_Pid 218 | node_ctl_install_trace_fn(Coop_Node, {Func, Func_State), From): Issue sys:install to Node_Task_Pid 219 | node_ctl_remove_trace_Fn(Coop_Node, Func, From): Issue sys:remove to Node_Task_Pid 220 | ``` 221 | 222 | Version 0.0.1 Graph Structures 223 | ============================== 224 | 225 | The following graph structures are supported: 226 | 227 | ``` 228 | 1. Pipeline - multiple functions executed serially, one process per function 229 | 1. Fanout - a single router leading to multiple worker processes 230 | ``` 231 | 232 | The fanout pattern can optional fan in to a single receiver. The dataflow style for a Pipeline defaults to 'broadcast' with a special optimization strategy for deliver data downstream when there is only one downstream receiver. The dataflow styles for Fanout can be round_robin, random or broadcast. On fan in, the dataflow parameter is ignored and the data is delivered to the single receiver. 233 | 234 | In all cases, the task functions must accept M:F(State, Data) and must return {New_State, Result}. 235 | -------------------------------------------------------------------------------- /apps/coop/docs/coopbody.dot: -------------------------------------------------------------------------------- 1 | digraph G { 2 | style=filled, shape=box, color=black, style=solid 3 | rankdir=TB 4 | 5 | subgraph cluster_0 { 6 | label = "Co-op Head", labeljust = c 7 | 8 | subgraph { 9 | rank=same; 10 | DAG [shape=record, label="{DAG Template | Internal Skeleton}"] 11 | Channel [shape=record, style=filled, fillcolor=lightgrey, label=" Monitor | Root"]; 12 | } 13 | } 14 | 15 | subgraph cluster_1 { 16 | label = "Co-op Body", labeljust = r; 17 | node [shape=box, style=filled, fillcolor=lightgrey]; 18 | anchor1 [style=invisible], split, anchor2 [style=invisible] 19 | 20 | DAG:skel:se -> anchor2 [style=dashed]; 21 | Channel:Root -> split; 22 | anchor1 -> Channel:Monitor; 23 | 24 | split -> i1; 25 | split -> i2; 26 | split -> i3; 27 | i1 -> j1 -> merge; 28 | i2 -> j2 -> merge; 29 | i3 -> j3 -> merge; 30 | } 31 | 32 | node [label="External Co-op", shape=box, style=filled, fillcolor=lightgrey]; 33 | subgraph {rank=same; out1, out2 out3 } 34 | 35 | node [label="", style=invisible]; 36 | i2 -> out1 37 | j3 -> out2 38 | merge -> out3 39 | } -------------------------------------------------------------------------------- /apps/coop/docs/coopbody.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/duomark/erlangsp/a0a47d4ef3b74c1138e264424e36c754a3024e0a/apps/coop/docs/coopbody.pdf -------------------------------------------------------------------------------- /apps/coop/docs/coophead.dot: -------------------------------------------------------------------------------- 1 | digraph G { 2 | style=filled, shape=box, color=black, style=solid 3 | rankdir=LR 4 | 5 | subgraph cluster_0 { 6 | label = "Co-op Head", labeljust = c 7 | 8 | subgraph { 9 | rank=same; 10 | MonProcs [shape=record, style=filled, fillcolor=lightgrey, label=" Log | Reflection | Trace"] 11 | CtlProcs [shape=record, style=filled, fillcolor=lightgrey, label=" Control | Data"] 12 | // CtlProcs [shape=record, style=filled, fillcolor=lightgrey, label="{ Control | Data} | Co-op Recever"] 13 | } 14 | subgraph { 15 | rank=same; 16 | DAG [shape=record, label="DAG Template | Internal Skeleton"] 17 | Channel [shape=record, style=filled, fillcolor=lightgrey, label=" Monitor | Root"]; 18 | DataProcs [shape=record,label=" Data Item 1 | Data Item 2 | ... | Data Item N"] 19 | } 20 | 21 | CtlProcs:Control -> Channel:Root; 22 | CtlProcs:Data -> DataProcs:datatop 23 | DataProcs:datatop -> Channel:Root; 24 | Channel:Monitor -> MonProcs:Log; 25 | Channel:Monitor -> MonProcs:Reflection; 26 | Channel:Monitor -> MonProcs:Trace; 27 | } 28 | 29 | 30 | subgraph { 31 | rank=same, 32 | Body [label="Co-op Body", shape=box, style=filled, fillcolor=lightgrey] 33 | } 34 | Channel:Root -> Body:sw; 35 | Body:w -> Channel:Monitor; 36 | DAG:skel -> Body:n [style=dashed]; 37 | 38 | node [label="", style=invisible]; 39 | 40 | out [label=" | | ", shape=record] 41 | out:logout -> MonProcs:Log [dir=back] 42 | out:refout -> MonProcs:Reflection [dir=back] 43 | out:traceout -> MonProcs:Trace [dir=back] 44 | 45 | in [label=" | ", shape=record] 46 | in:control -> CtlProcs:Control 47 | in:data -> CtlProcs:Data 48 | } -------------------------------------------------------------------------------- /apps/coop/docs/coophead.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/duomark/erlangsp/a0a47d4ef3b74c1138e264424e36c754a3024e0a/apps/coop/docs/coophead.pdf -------------------------------------------------------------------------------- /apps/coop/docs/coops_poster.pages: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/duomark/erlangsp/a0a47d4ef3b74c1138e264424e36c754a3024e0a/apps/coop/docs/coops_poster.pages -------------------------------------------------------------------------------- /apps/coop/docs/coops_poster.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/duomark/erlangsp/a0a47d4ef3b74c1138e264424e36c754a3024e0a/apps/coop/docs/coops_poster.pdf -------------------------------------------------------------------------------- /apps/coop/include/coop.hrl: -------------------------------------------------------------------------------- 1 | 2 | %% Cooperating Processes (Co-op) data structure definitions 3 | 4 | %% A Co-op is a graph of process clusters, split into a Co-op Head and Co-op Body 5 | %% A Co-op Head receives incoming data and feeds it to the Co-op Body 6 | %% Co-op Nodes withing the Co-op Body are linked into a directed graph upon which the data flows 7 | %% Each Co-op Node executes a task function on the data that flows in, passing the result onward 8 | -record(coop_head, { 9 | ctl_pid :: pid(), 10 | data_pid :: pid() 11 | }). 12 | 13 | -record(coop_node, { 14 | ctl_pid :: pid(), 15 | task_pid :: pid() 16 | }). 17 | 18 | -type coop_head() :: #coop_head{} | none. 19 | -type coop_node() :: #coop_node{} | none. 20 | -type coop_body() :: coop_node(). 21 | 22 | %% A Co-op Instance is a Co-op Head and Co-op Body with live processes populating the graph nodes 23 | -record(coop_instance, { 24 | id :: integer(), 25 | name :: atom(), 26 | head = none :: coop_head(), 27 | body = none :: coop_body(), 28 | dag :: digraph() 29 | }). 30 | 31 | -type coop_instance() :: #coop_instance{}. 32 | 33 | %% Dataflow methods determine how data is distributed to Co-op instances and 34 | %% within a Fanout Co-op Body to the downstream Co-op Nodes. 35 | -type single_data_flow_method() :: random | round_robin. 36 | -type multiple_data_flow_method() :: broadcast. 37 | -type data_flow_method() :: single_data_flow_method() | multiple_data_flow_method(). 38 | 39 | -define(DATAFLOW_TYPES, [random, round_robin, broadcast]). 40 | 41 | %% TODO: Add 'select' to execute M:F(A) to determine one or more destinations 42 | %% TODO: Add 'shuffle' to randomly sample without duplicates 43 | %% TODO: Add 'replace' to use replacement policy to select process 44 | %% TODO: Add 'consume' to use round_robin but expire at end of task 45 | %% TODO: consume requires {Init_Count, Threshold_Count, Replenish_Count} 46 | 47 | %% A Co-op is a static Directed Acyclic Graph (DAG) template and a collection of Co-op Instances 48 | %% Each Co-op Instance is a clone of the DAG template, populated with active processes. 49 | %% An ets table is used to allow distributed access to the Co-op Collection (round_robin/random), 50 | %% unless there is only one Coop Instance. 51 | -type coop_collection() :: ets:tid(). 52 | 53 | %% The attributes of the Coop Collection ets table are standardized. 54 | -define(NEW_COOP_ETS, ets:new(coop, [set, public, {write_concurrency, true}, 55 | {keypos, #coop_instance.id}])). 56 | 57 | -record(coop, { 58 | instances :: coop_instance() | coop_collection(), 59 | dataflow :: data_flow_method(), 60 | dag_template :: digraph() 61 | }). 62 | 63 | -type coop() :: #coop{}. 64 | 65 | %% Output from a Co-op or internal Co-op Nodes can flow to Pids, other Co-ops, or internal Co-op Nodes 66 | -type coop_receiver() :: pid() | coop() | coop_node() | none. 67 | 68 | %% A given Co-op Node can have 0, 1 or N direct receivers of its results. 69 | -type downstream_workers() :: queue() | {coop_receiver()} | {}. 70 | 71 | %% A proplist of options (currently only 'access_coop_head') can be specified on Co-op Nodes. 72 | %% If true, access_coop_head exposes the Co-op Head in the arglist of the Node Task Function. 73 | -type coop_data_options() :: [proplists:property()]. 74 | -record(coop_node_options, { 75 | access_coop_head = false :: boolean() 76 | }). 77 | 78 | %% Co-op Nodes are initialized as M:F(Arg) or M:F({Coop_Head, Arg}) 79 | %% Generally, use a tuple for the arg structure. 80 | -type coop_init_fn() :: {module(), atom(), any()}. 81 | %% Task functions are Module + Function, with args of (State, Data) or (Coop_Head, State, Data) 82 | -type coop_task_fn() :: {module(), atom()}. 83 | 84 | -record(coop_node_fn, { 85 | init :: coop_init_fn(), 86 | task :: coop_task_fn(), 87 | options = [] :: coop_data_options(), 88 | flow = broadcast :: data_flow_method() 89 | }). 90 | 91 | -record(coop_dag_node, { 92 | name :: string() | atom(), 93 | label :: #coop_node_fn{} 94 | }). 95 | 96 | -type coop_dag_node() :: #coop_dag_node{}. 97 | -------------------------------------------------------------------------------- /apps/coop/include/coop_dag.hrl: -------------------------------------------------------------------------------- 1 | 2 | %% Coop command tokens are uppercase prefixed with '$$_'. 3 | %% Applications should avoid using this prefix for atoms. 4 | -define(DAG_TOKEN, '$$_DAG'). 5 | -define(DATA_TOKEN, '$$_DATA'). 6 | -define(CTL_TOKEN, '$$_CTL'). 7 | -define(ROOT_TOKEN, '$$_ROOT'). 8 | 9 | %% Coop data tokens are uppercase prefixed with '##_'. 10 | %% Applications should avoid using this prefix for atoms. 11 | -define(COOP_NOOP, '##_NOOP'). 12 | 13 | -define(CTL_MSG(__Msg), {?DAG_TOKEN, ?CTL_TOKEN, __Msg}). 14 | -define(DATA_MSG(__Msg), {?DAG_TOKEN, ?DATA_TOKEN, __Msg}). 15 | 16 | -define(COOP_INIT_FN(__Fun, __Args), {?MODULE, __Fun, __Args}). 17 | -define(COOP_TASK_FN(__Fun), {?MODULE, __Fun}). 18 | 19 | %% TODO: Convert these to a function call instead of a macro. 20 | -define(SEND_CTL_MSG(__Coop_Node, __Ctl_Msg), 21 | {coop_node, __Ctl_Pid, __Task_Pid} = __Coop_Node, 22 | __Ctl_Pid ! {?DAG_TOKEN, ?CTL_TOKEN, __Ctl_Msg}). 23 | 24 | -define(SEND_CTL_MSG(__Coop_Node, __Ctl_Msg, __Flag, __Caller), 25 | {coop_node, __Ctl_Pid, __Task_Pid} = __Coop_Node, 26 | __Ctl_Pid ! {?DAG_TOKEN, ?CTL_TOKEN, __Ctl_Msg, __Flag, __Caller}). 27 | -------------------------------------------------------------------------------- /apps/coop/include/coop_head.hrl: -------------------------------------------------------------------------------- 1 | 2 | %%---------------------------------------------------------------------- 3 | %% A Coop Head is the external interface of a coop graph. 4 | %% It receives control and data requests and passes them on to 5 | %% the Coop Node components of the coop graph. 6 | %% 7 | %% There are separate pids internal to a Coop Head used to: 8 | %% 1) terminate the entire coop (kill_switch) 9 | %% 2) receive control requests (ctl) 10 | %% 3) forward data requests (data) 11 | %% 4) one_at_a_time gateway to Coop Body (root) 12 | %% 4) relay trace information (trace) 13 | %% 5) record log and telemetry data (log) 14 | %% 6) reflect data flow for user display and analysis (reflect) 15 | %%---------------------------------------------------------------------- 16 | 17 | -record(coop_head_state, { 18 | kill_switch :: pid(), 19 | ctl :: pid(), 20 | data :: pid(), 21 | root :: pid(), 22 | log :: pid(), 23 | trace :: pid(), 24 | reflect :: pid(), 25 | coop_root_node :: coop_body() 26 | }). 27 | 28 | -------------------------------------------------------------------------------- /apps/coop/include/coop_node.hrl: -------------------------------------------------------------------------------- 1 | 2 | %%---------------------------------------------------------------------- 3 | %% A Co-op Node is a single worker element of a Co-op. Every worker 4 | %% element exists to accept data, transform it and pass it on. 5 | %% 6 | %% There are separate pids internal to a Co-op Node used to: 7 | %% 1) terminate the entire co-op (kill_switch) 8 | %% 2) receive control requests (ctl) 9 | %% 3) execute the transform function (task execs task_fn) 10 | %% 4) relay trace information (trace) 11 | %% 5) record log and telemetry data (log) 12 | %% 6) reflect data flow for user display and analysis (reflect) 13 | %%---------------------------------------------------------------------- 14 | 15 | -record(coop_node_state, { 16 | kill_switch :: pid(), 17 | ctl :: pid(), 18 | task :: pid(), 19 | init_fn :: coop_init_fn(), 20 | task_fn :: coop_task_fn(), 21 | trace :: pid(), 22 | log :: pid(), 23 | reflect :: pid() 24 | }). 25 | -------------------------------------------------------------------------------- /apps/coop/src/coop.app.src: -------------------------------------------------------------------------------- 1 | {application, coop, 2 | [ 3 | {id, "Co-op"}, 4 | {vsn, "0.0.1"}, 5 | {description, "Erlang Co-op Library"}, 6 | {modules, []}, 7 | {registered, []}, 8 | {applications, [kernel, stdlib, sasl, gs, appmon]}, 9 | {included_applications, []}, 10 | {env, []} 11 | ]}. 12 | -------------------------------------------------------------------------------- /apps/coop/src/coop.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Co-operating Process instances modeled on coop_flow graphs. 8 | %%% @since v0.0.1 9 | %%% @end 10 | %%%------------------------------------------------------------------------------ 11 | -module(coop). 12 | -author('Jay Nelson '). 13 | 14 | -include("../erlangsp/include/license_and_copyright.hrl"). 15 | 16 | %% External API 17 | -export([ 18 | new_pipeline/2, new_fanout/3, 19 | make_dag_node/4, make_dag_node/5, 20 | get_head_kill_switch/1, get_body_kill_switch/1, 21 | is_live/1, relay_data/2, relay_high_priority_data/2 22 | ]). 23 | 24 | %% For testing purposes only. 25 | -export([pipeline/4, fanout/5]). 26 | 27 | -include("coop.hrl"). 28 | 29 | 30 | %%---------------------------------------------------------------------- 31 | %% Create a Co-op (with a single Co-op Instance) 32 | %% 33 | %% PIPELINE: 34 | %% A chain of Co-op Nodes, ordered left to right. Data is 35 | %% received at the left Node, transformed at each Node and emerges 36 | %% normally at the right Node ending in transfer to the Co-op 37 | %% Receiver. 38 | %% 39 | %% FANOUT: 40 | %% A single Co-op Node which has 2 or more immediate downstream 41 | %% Nodes. Data must flow through the single entry point, but may 42 | %% then be directed to any one or more of the children Nodes. 43 | %%---------------------------------------------------------------------- 44 | -spec new_pipeline([#coop_dag_node{}], coop_receiver()) -> coop() | false. 45 | -spec new_fanout(#coop_dag_node{}, [#coop_dag_node{}], coop_receiver()) -> coop() | false. 46 | 47 | new_pipeline([#coop_dag_node{} | _More] = Node_Fns, Receiver) -> 48 | Head_Kill_Switch = coop_kill_link_rcv:make_kill_switch(), 49 | Coop_Head = coop_head:new(Head_Kill_Switch, none), 50 | Body_Kill_Switch = coop_kill_link_rcv:make_kill_switch(), 51 | {Coop_Root_Node, Template_Graph, Pipeline_Graph} 52 | = pipeline(Coop_Head, Body_Kill_Switch, Node_Fns, Receiver), 53 | Coop_Instance = make_coop_instance(1, Coop_Head, Coop_Root_Node, Pipeline_Graph), 54 | finish_new_coop(Coop_Instance, Head_Kill_Switch, Body_Kill_Switch, Template_Graph). 55 | 56 | new_fanout(#coop_dag_node{} = Router_Fn, [#coop_dag_node{} | _More] = Workers, Receiver) -> 57 | Head_Kill_Switch = coop_kill_link_rcv:make_kill_switch(), 58 | Coop_Head = coop_head:new(Head_Kill_Switch, none), 59 | Body_Kill_Switch = coop_kill_link_rcv:make_kill_switch(), 60 | {Coop_Root_Node, Template_Graph, Fanout_Graph} 61 | = fanout(Coop_Head, Body_Kill_Switch, Router_Fn, Workers, Receiver), 62 | Coop_Instance = make_coop_instance(1, Coop_Head, Coop_Root_Node, Fanout_Graph), 63 | finish_new_coop(Coop_Instance, Head_Kill_Switch, Body_Kill_Switch, Template_Graph). 64 | 65 | finish_new_coop(#coop_instance{head=Coop_Head, body=Coop_Root_Node} = Coop_Instance, 66 | Head_Kill_Switch, Body_Kill_Switch, Template_Graph) -> 67 | case coop_head:set_root_node(Coop_Head, Coop_Root_Node) of 68 | true -> #coop{instances=Coop_Instance, dataflow=broadcast, dag_template=Template_Graph}; 69 | false -> 70 | exit(Body_Kill_Switch, kill), 71 | exit(Head_Kill_Switch, kill), 72 | false 73 | end. 74 | 75 | 76 | %%---------------------------------------------------------------------- 77 | %% Functions for making Co-op records 78 | %%---------------------------------------------------------------------- 79 | -spec make_coop_instance(integer(), coop_head(), coop_body(), digraph()) -> coop_instance() | {error, invalid_head_or_body}. 80 | -spec make_dag_node(string() | atom(), coop_init_fn(), coop_task_fn(), coop_data_options()) -> coop_dag_node(). 81 | -spec make_dag_node(string() | atom(), coop_init_fn(), coop_task_fn(), coop_data_options(), data_flow_method()) -> coop_dag_node() | {error, {invalid_data_flow_method, any()}}. 82 | 83 | make_coop_instance(Id, Head, Body, Dag) 84 | when is_integer(Id), Id > 0 -> 85 | case {Head, Body} of 86 | {none, none} -> #coop_instance{id=Id, head=Head, body=Body, dag=Dag}; 87 | {none, #coop_node{}} -> #coop_instance{id=Id, head=Head, body=Body, dag=Dag}; 88 | {#coop_head{}, none} -> #coop_instance{id=Id, head=Head, body=Body, dag=Dag}; 89 | {#coop_head{}, #coop_node{}} -> #coop_instance{id=Id, head=Head, body=Body, dag=Dag}; 90 | _Other -> {error, invalid_head_or_body} 91 | end. 92 | 93 | make_dag_node(Name, Init_Fn, Task_Fn, Opts) -> 94 | make_dag_node(Name, Init_Fn, Task_Fn, Opts, broadcast). 95 | 96 | make_dag_node(Name, {_Imod, _Ifun, _Iargs} = Init_Fn, {_Mod, _Fun} = Task_Fn, Opts, Data_Flow) 97 | when is_atom(_Imod), is_atom(_Ifun), is_atom(_Mod), is_atom(_Fun), is_list(Opts) -> 98 | case length([T || T <- ?DATAFLOW_TYPES, Data_Flow =:= T]) of 99 | 1 -> #coop_dag_node{name=Name, label=#coop_node_fn{init=Init_Fn, task=Task_Fn, 100 | options=Opts, flow=Data_Flow}}; 101 | 0 -> {error, {invalid_data_flow_method, Data_Flow}} 102 | end. 103 | 104 | 105 | %%---------------------------------------------------------------------- 106 | %% Get a reference to the Co-op Head or Co-op Body kill switch Pid. 107 | %%---------------------------------------------------------------------- 108 | get_head_kill_switch(Coop_Head) -> 109 | coop_head:get_kill_switch(Coop_Head). 110 | 111 | get_body_kill_switch(Coop_Node) -> 112 | coop_node:get_kill_switch(Coop_Node). 113 | 114 | 115 | %%---------------------------------------------------------------------- 116 | %% Check if a Coop_Head, Coop_Node or raw Pid is alive. 117 | %%---------------------------------------------------------------------- 118 | is_live(none) -> false; 119 | is_live(Pid) when is_pid(Pid) -> is_process_alive(Pid); 120 | is_live(#coop_head{ctl_pid=Ctl_Pid, data_pid=Data_Pid}) -> 121 | is_process_alive(Ctl_Pid) andalso is_process_alive(Data_Pid); 122 | is_live(#coop_node{ctl_pid=Ctl_Pid, task_pid=Task_Pid}) -> 123 | is_process_alive(Ctl_Pid) andalso is_process_alive(Task_Pid); 124 | is_live(#coop_instance{head=Coop_Head}) -> 125 | is_live(Coop_Head); 126 | is_live(#coop{instances=#coop_instance{head=Coop_Head}}) -> 127 | is_live(Coop_Head); 128 | is_live(#coop{instances=Ets_Table}) -> 129 | lists:all([is_live(Inst) || Inst <- ets:tab2list(Ets_Table)]). 130 | 131 | 132 | 133 | %%---------------------------------------------------------------------- 134 | %% Data messaging to Co-op Instances 135 | %%---------------------------------------------------------------------- 136 | 137 | %% Relay data is used to deliver Node output to Co-op, Co-op Instance, 138 | %% Co-op Head, Co-op Node or raw Pid. 139 | relay_data(#coop{instances=Instances}, Data) -> 140 | case Instances of 141 | #coop_instance{} -> relay_data(Instances, Data); 142 | _Ets_Table -> not_implemented_yet 143 | end, 144 | ok; 145 | relay_data(#coop_instance{head=none}, _Data) -> 146 | ok; 147 | relay_data(#coop_instance{head=Coop_Head}, Data) -> 148 | coop_head:send_data_msg(Coop_Head, Data); 149 | relay_data(#coop_head{} = Coop_Head, Data) -> 150 | coop_head:send_data_msg(Coop_Head, Data), 151 | ok; 152 | relay_data(#coop_node{} = Coop_Node, Data) -> 153 | coop_node:node_task_deliver_data(Coop_Node, Data), 154 | ok; 155 | relay_data(Pid, Data) when is_pid(Pid) -> 156 | Pid ! Data, 157 | ok; 158 | relay_data(none, _Data) -> 159 | ok. 160 | 161 | 162 | %% High priority only works for a Coop_Head, bypassing all pending Data requests. 163 | relay_high_priority_data(#coop_head{} = Coop_Head, Data) -> 164 | coop_head:send_priority_data_msg(Coop_Head, Data), 165 | ok; 166 | relay_high_priority_data(Dest, Data) -> 167 | relay_data(Dest, Data), 168 | ok. 169 | 170 | 171 | %%---------------------------------------------------------------------- 172 | %% Pipeline patterns (can only use serial broadcast dataflow method) 173 | %%---------------------------------------------------------------------- 174 | pipeline(Coop_Head, Kill_Switch, [#coop_dag_node{} | _More] = Node_Fns, Receiver) -> 175 | Pipeline_Graph = coop_flow:pipeline(Node_Fns), 176 | Vertex_List = [digraph:vertex(Pipeline_Graph, Name) || #coop_dag_node{name=Name} <- Node_Fns], 177 | pipeline(Coop_Head, Kill_Switch, Pipeline_Graph, Vertex_List, Receiver). 178 | 179 | pipeline(Coop_Head, Kill_Switch, Pipeline_Template_Graph, Left_To_Right_Stages, Receiver) -> 180 | Coops_Graph = digraph:new([acyclic]), 181 | digraph:add_vertex(Coops_Graph, outbound, Receiver), 182 | {First_Stage_Coop_Node, _Second_Stage_Vertex_Name} = 183 | lists:foldr(fun(Node_Name_Fn_Pair, {_NextStage, _Downstream_Vertex_Name} = Acc) -> 184 | spawn_pipeline_stage(Coop_Head, Kill_Switch, Coops_Graph, Node_Name_Fn_Pair, Acc) 185 | end, {Receiver, outbound}, Left_To_Right_Stages), 186 | 187 | %% Return the first coop_node, template graph and live coop_node graph. 188 | {First_Stage_Coop_Node, Pipeline_Template_Graph, Coops_Graph}. 189 | 190 | spawn_pipeline_stage(Coop_Head, Kill_Switch, Graph, 191 | {Name, #coop_node_fn{init=Init_Fn, task=Task_Fn, options=Opts}}, 192 | {Receiver, Downstream_Vertex_Name}) -> 193 | Coop_Node = coop_node:new(Coop_Head, Kill_Switch, Task_Fn, Init_Fn, Opts), % Defaults to broadcast out 194 | coop_node:node_task_add_downstream_pids(Coop_Node, [Receiver]), % And just 1 receiver 195 | digraph:add_vertex(Graph, Name, Coop_Node), 196 | digraph:add_edge(Graph, Name, Downstream_Vertex_Name), 197 | {Coop_Node, Name}. 198 | 199 | 200 | %%---------------------------------------------------------------------- 201 | %% Fanout patterns 202 | %%---------------------------------------------------------------------- 203 | fanout(Coop_Head, Kill_Switch, #coop_dag_node{name=Inbound} = Router_Fn, 204 | [#coop_dag_node{} | _More] = Workers, Receiver) -> 205 | Fanout_Graph = coop_flow:fanout(Router_Fn, Workers, Receiver), 206 | fanout(Inbound, Coop_Head, Kill_Switch, Fanout_Graph). 207 | 208 | fanout(Inbound, Coop_Head ,Kill_Switch, Fanout_Template_Graph) -> 209 | Coops_Graph = digraph:new([acyclic]), 210 | {Inbound, #coop_node_fn{init=Inbound_Init_Fn, task=Inbound_Task_Fn, options=Opts, flow=Inbound_Dataflow}} 211 | = digraph:vertex(Fanout_Template_Graph, Inbound), 212 | Inbound_Node = coop_node:new(Coop_Head, Kill_Switch, Inbound_Task_Fn, Inbound_Init_Fn, Opts, Inbound_Dataflow), 213 | digraph:add_vertex(Coops_Graph, Inbound, Inbound_Node), 214 | {Has_Fan_In, Rcvr} = case digraph:vertex(Fanout_Template_Graph, outbound) of 215 | false -> {false, none}; 216 | {outbound, Receiver} -> 217 | digraph:add_vertex(Coops_Graph, outbound, Receiver), 218 | {true, Receiver} 219 | end, 220 | Worker_Nodes = [add_fanout_worker_node(Coop_Head, Kill_Switch, Inbound, Has_Fan_In, Rcvr, Fanout_Template_Graph, Vertex_Name, Coops_Graph) 221 | || Vertex_Name <- digraph:out_neighbours(Fanout_Template_Graph, Inbound)], 222 | coop_node:node_task_add_downstream_pids(Inbound_Node, Worker_Nodes), 223 | {Inbound_Node, Fanout_Template_Graph, Coops_Graph}. 224 | 225 | add_fanout_worker_node(Coop_Head, Kill_Switch, Inbound, Has_Fan_In, Receiver, Template_Graph, Vertex_Name, Coops_Graph) -> 226 | {Vertex_Name, #coop_node_fn{init=Init_Fn, task=Task_Fn, options=Opts}} 227 | = digraph:vertex(Template_Graph, Vertex_Name), 228 | Coop_Node = coop_node:new(Coop_Head, Kill_Switch, Task_Fn, Init_Fn, Opts), % Defaults to broadcast 229 | digraph:add_vertex(Coops_Graph, Vertex_Name, Coop_Node), 230 | digraph:add_edge(Coops_Graph, Inbound, Vertex_Name), 231 | Has_Fan_In andalso begin 232 | digraph:add_edge(Coops_Graph, Vertex_Name, outbound), 233 | coop_node:node_task_add_downstream_pids(Coop_Node, [Receiver]) 234 | end, 235 | Coop_Node. 236 | 237 | -------------------------------------------------------------------------------- /apps/coop/src/coop_flow.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Flow graphs for cooperating processes. 8 | %%% @since v0.0.1 9 | %%% @end 10 | %%%------------------------------------------------------------------------------ 11 | -module(coop_flow). 12 | -author('Jay Nelson '). 13 | 14 | -include("../erlangsp/include/license_and_copyright.hrl"). 15 | 16 | %% Friendly API 17 | -export([pipeline/1, chain_vertices/2, fanout/3]). 18 | 19 | -include("coop.hrl"). 20 | -include("coop_dag.hrl"). 21 | 22 | 23 | %%---------------------------------------------------------------------- 24 | %% Pipeline patterns 25 | %% pipeline flow is Graph<{Name, Fn}, ...> 26 | %%---------------------------------------------------------------------- 27 | -spec pipeline([#coop_dag_node{}]) -> digraph(). 28 | -spec chain_vertices(digraph(), [digraph:vertex()]) -> digraph(). 29 | 30 | pipeline([#coop_dag_node{} | _More] = Node_Fns) -> 31 | Graph = digraph:new([acyclic]), 32 | Vertices = [digraph:add_vertex(Graph, Name, Fn) 33 | || #coop_dag_node{name=Name, label=Fn} <- Node_Fns], 34 | chain_vertices(Graph, Vertices). 35 | 36 | chain_vertices(Graph, []) -> Graph; 37 | chain_vertices(Graph, [_H]) -> Graph; 38 | chain_vertices(Graph, [H1,H2 | T]) -> 39 | digraph:add_edge(Graph, H1, H2), 40 | chain_vertices(Graph, [H2 | T]). 41 | 42 | 43 | %%---------------------------------------------------------------------- 44 | %% Fanout patterns 45 | %% fanout flow is Graph<{Name, Fn} => [... {Name, Fn} ...] => {Name, Fn}> 46 | %%---------------------------------------------------------------------- 47 | -spec fanout(#coop_dag_node{}, [#coop_dag_node{}], coop_receiver()) -> digraph(). 48 | 49 | fanout(#coop_dag_node{name=Name, label=Node_Fn} = _Router_Fn, 50 | [#coop_dag_node{}|_More] = Workers, Fan_In_Receiver) -> 51 | Graph = digraph:new([acyclic]), 52 | Inbound = make_named_vertex(Graph, Name, Node_Fn, inbound), 53 | Outbound = case Fan_In_Receiver of 54 | none -> none; 55 | _Node -> digraph:add_vertex(Graph, outbound, Fan_In_Receiver) 56 | end, 57 | _Frontier = [begin 58 | V = make_named_vertex(Graph, FName, FNode_Fn, worker), 59 | digraph:add_edge(Graph, Inbound, V), 60 | Outbound =:= none orelse digraph:add_edge(Graph, V, Outbound), 61 | V 62 | end || #coop_dag_node{name=FName, label=FNode_Fn} <- Workers], 63 | Graph. 64 | 65 | 66 | make_named_vertex(Graph, Name, Fn, Default_Name) -> 67 | Vertex_Name = case Name of undefined -> Default_Name; Name -> Name end, 68 | digraph:add_vertex(Graph, Vertex_Name, Fn). 69 | -------------------------------------------------------------------------------- /apps/coop/src/coop_head.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Coop Head construct, manages data flow to the Coop Body. 8 | %%% 9 | %%% Coop Head is built from a Ctl process and a Data process that 10 | %%% are used to prioritize control commands over data processing. 11 | %%% Messages in the data queue are sent synchronously to the 12 | %%% Root Pid which then relays them to the Coop Body. Control 13 | %%% messages are relayed without any synchronous flow restrictions. 14 | %%% By acking each data request, after it relays it to the body, 15 | %%% the Root Pid ensures that all Control messages can be seen ahead 16 | %%% of queued Data messages. 17 | %%% 18 | %%% It is possible to send a data message on the control channel. 19 | %%% This serves as a high-priority bypass, but its use should be 20 | %%% rare. 21 | %%% 22 | %%% Excessive use of control messages will cause queueing at the 23 | %%% Root Pid rather than in another area of the system, resulting 24 | %%% in delayed responsiveness to command and control or OTP System 25 | %%% messages, as well as a lack of data throughput. 26 | %%% 27 | %%% The Root Pid also responds to OTP System messages so it can 28 | %%% be suspended, resumed, debugged, traced and managed using OTP 29 | %%% tools. These are primarily used to restrict data flow when code 30 | %%% changes require it, or data in transit is too heavy. 31 | %%% 32 | %%% @since v0.0.1 33 | %%% @end 34 | %%%------------------------------------------------------------------------------ 35 | -module(coop_head). 36 | -author('Jay Nelson '). 37 | 38 | -include("../erlangsp/include/license_and_copyright.hrl"). 39 | 40 | %% Graph API 41 | -export([ 42 | %% Create coop_head instances... 43 | new/2, get_kill_switch/1, 44 | 45 | %% Send commands to coop_head control process... 46 | %% ctl_clone/1, 47 | stop/1, suspend_root/1, resume_root/1, format_status/1, 48 | %% ctl_trace/1, ctl_untrace/1, 49 | ctl_stats/3, ctl_log/3, ctl_log_to_file/3 50 | %% ctl_install_trace_fn/3, ctl_remove_trace_fn/3, 51 | ]). 52 | 53 | %% Internal functions that are exported (not part of the external API) 54 | -export([ 55 | get_root_pid/1, set_root_node/2, 56 | 57 | %% Send commands to coop_head data task process... 58 | send_ctl_msg/2, send_ctl_change_timeout/2, 59 | send_data_msg/2, send_priority_data_msg/2, 60 | send_data_change_timeout/2 61 | ]). 62 | 63 | %% Internal functions for spawned processes 64 | -export([echo_loop/1]). 65 | 66 | -include("coop.hrl"). 67 | -include("coop_dag.hrl"). 68 | -include("coop_head.hrl"). 69 | 70 | -define(CTL_MSG_TIMEOUT, 500). 71 | -define(SYNC_MSG_TIMEOUT, none). 72 | -define(SYNC_RCV_TIMEOUT, 2000). 73 | 74 | 75 | %%---------------------------------------------------------------------- 76 | %% External interface for sending ctl/data messages 77 | %%---------------------------------------------------------------------- 78 | -spec send_ctl_msg(coop_head(), any()) -> ok. 79 | -spec send_ctl_change_timeout(coop_head(), none | pos_integer()) -> ok. 80 | -spec send_data_msg(coop_head(), any()) -> ok. 81 | -spec send_priority_data_msg(coop_head(), any()) -> ok. 82 | -spec send_data_change_timeout(coop_head(), none | pos_integer()) -> ok. 83 | 84 | -spec get_kill_switch(coop_head()) -> pid(). 85 | -spec get_root_pid(coop_head()) -> pid() | none. 86 | 87 | -spec set_root_node(coop_head(), coop_node()) -> boolean(). 88 | 89 | -spec stop(coop_head()) -> ok. 90 | -spec suspend_root(coop_head()) -> ok. 91 | -spec resume_root(coop_head()) -> ok. 92 | -spec format_status(coop_head()) -> ok. 93 | 94 | -spec ctl_stats(coop_head(), boolean() | get, pid()) -> ok | {ok, list()}. 95 | 96 | send_ctl_msg_internal (#coop_head{ctl_pid=Head_Ctl_Pid}, Msg) -> Head_Ctl_Pid ! {?DAG_TOKEN, ?CTL_TOKEN, Msg}, ok. 97 | send_data_msg_internal(#coop_head{data_pid=Head_Data_Pid}, Msg) -> Head_Data_Pid ! {?DAG_TOKEN, ?DATA_TOKEN, Msg}, ok. 98 | 99 | send_ctl_msg(Coop_Head, Msg) -> send_ctl_msg_internal(Coop_Head, Msg). 100 | send_ctl_msg(Coop_Head, Msg, Flag, From) -> send_ctl_msg_internal(Coop_Head, {Msg, Flag, From}). 101 | send_ctl_change_timeout(Coop_Head, New_Timeout) -> send_ctl_msg_internal(Coop_Head, {change_timeout, New_Timeout}). 102 | 103 | send_data_msg(Coop_Head, Msg) -> send_data_msg_internal(Coop_Head, Msg). 104 | send_priority_data_msg(#coop_head{ctl_pid=Head_Ctl_Pid}, Msg) -> Head_Ctl_Pid ! {?DAG_TOKEN, ?DATA_TOKEN, Msg}, ok. 105 | send_data_change_timeout(#coop_head{data_pid=Head_Data_Pid}, New_Timeout) -> 106 | Head_Data_Pid ! {?DAG_TOKEN, ?CTL_TOKEN, {change_timeout, New_Timeout}}, 107 | ok. 108 | 109 | stop(Coop_Head) -> send_ctl_msg(Coop_Head, {stop}). 110 | suspend_root(Coop_Head) -> send_ctl_msg(Coop_Head, {suspend}). 111 | resume_root(Coop_Head) -> send_ctl_msg(Coop_Head, {resume}). 112 | format_status(Coop_Head) -> send_ctl_msg(Coop_Head, {format_status}). 113 | 114 | ctl_stats(Coop_Head, Flag, From) -> 115 | Ref = make_ref(), 116 | send_ctl_msg(Coop_Head, stats, Flag, {Ref, From}), 117 | wait_ctl_response(stats, Ref). 118 | 119 | ctl_log(Coop_Head, Flag, From) -> 120 | Ref = make_ref(), 121 | send_ctl_msg(Coop_Head, log, Flag, {Ref, From}), 122 | wait_ctl_response(log, Ref). 123 | 124 | ctl_log_to_file(Coop_Head, Flag, From) -> 125 | Ref = make_ref(), 126 | send_ctl_msg(Coop_Head, log_to_file, Flag, {Ref, From}), 127 | wait_ctl_response(log_to_file, Ref). 128 | 129 | get_root_pid(Coop_Head) -> 130 | Ref = make_ref(), 131 | send_ctl_msg(Coop_Head, {get_root_pid, {Ref, self()}}), 132 | wait_ctl_response(get_root_pid, Ref). 133 | 134 | get_kill_switch(Coop_Head) -> 135 | Ref = make_ref(), 136 | send_ctl_msg(Coop_Head, {get_kill_switch, {Ref, self()}}), 137 | wait_ctl_response(get_kill_switch, Ref). 138 | 139 | wait_ctl_response(Type, Ref) -> 140 | receive {Type, Ref, Info} -> Info 141 | after ?SYNC_RCV_TIMEOUT -> timeout 142 | end. 143 | 144 | set_root_node({coop_head, _Head_Ctl_Pid, _Head_Data_Pid} = Coop_Head, 145 | {coop_node, _Node_Ctl_Pid, _Node_Task_Pid} = Coop_Node) -> 146 | Ref = make_ref(), 147 | send_ctl_msg(Coop_Head, {set_root_node, Coop_Node, {Ref, self()}}), 148 | wait_ctl_response(set_root_node, Ref). 149 | 150 | 151 | %%---------------------------------------------------------------------- 152 | %% Create a new coop_head. A coop_head is represented by a pair of 153 | %% pids: a control process and a data process. 154 | %%---------------------------------------------------------------------- 155 | -spec new(pid(), pid()) -> coop_head(). 156 | 157 | new(Kill_Switch, Coop_Node) 158 | when is_pid(Kill_Switch) -> 159 | 160 | %% Start the root and data processes... 161 | Root_Pid = make_root_pid(Coop_Node), 162 | Ctl_Pid = make_ctl_pid (Root_Pid, ?CTL_MSG_TIMEOUT), 163 | Data_Pid = make_data_pid(Root_Pid, ?SYNC_MSG_TIMEOUT), 164 | 165 | %% Start support processes and initialize the control process internal state... 166 | {Trace_Pid, Log_Pid, Reflect_Pid} = make_support_pids(), 167 | Ctl_State = #coop_head_state{kill_switch=Kill_Switch, ctl=Ctl_Pid, data=Data_Pid, 168 | root=Root_Pid, log=Log_Pid, trace=Trace_Pid, 169 | reflect=Reflect_Pid, coop_root_node=Coop_Node}, 170 | Ctl_Pid ! {?DAG_TOKEN, ?CTL_TOKEN, {init_state, Ctl_State}}, 171 | 172 | %% Link all pids to the Kill_Switch and return the coop_head. 173 | Kill_Link_Args = [Ctl_Pid, Data_Pid, Root_Pid, Trace_Pid, Log_Pid, Reflect_Pid], 174 | coop_kill_link_rcv:link_to_kill_switch(Kill_Switch, Kill_Link_Args), 175 | #coop_head{ctl_pid=Ctl_Pid, data_pid=Data_Pid}. 176 | 177 | 178 | make_root_pid(none) -> 179 | proc_lib:spawn(coop_head_root_rcv, sync_pass_thru_loop, [none]); 180 | make_root_pid(#coop_node{} = Coop_Node) -> 181 | proc_lib:spawn(coop_head_root_rcv, sync_pass_thru_loop, [Coop_Node]). 182 | 183 | make_data_pid(Root_Pid, Timeout) when is_pid(Root_Pid) -> 184 | proc_lib:spawn(coop_head_data_rcv, one_at_a_time_loop, [Root_Pid, Timeout]). 185 | 186 | make_ctl_pid(Root_Pid, Timeout) when is_pid(Root_Pid) -> 187 | proc_lib:spawn(coop_head_ctl_rcv, msg_loop, [{}, Root_Pid, Timeout]). 188 | 189 | make_support_pids() -> 190 | Trace_Pid = proc_lib:spawn(?MODULE, echo_loop, ["HTRC"]), 191 | [Log_Pid, Reflect_Pid] 192 | = [proc_lib:spawn(?MODULE, echo_loop, [Type]) || Type <- ["HLOG", "HRFL"]], 193 | {Trace_Pid, Log_Pid, Reflect_Pid}. 194 | 195 | 196 | %%---------------------------------------------------------------------- 197 | %% Coop Head receive loops for support pids. 198 | %%---------------------------------------------------------------------- 199 | -spec echo_loop(string()) -> no_return(). 200 | 201 | %% Trace, Log and Reflect process receive loop 202 | echo_loop(Type) -> 203 | receive 204 | {stop} -> exit(stopped); 205 | Any -> error_logger:info_msg("~p ~p ~p: ~p~n", [?MODULE, Type, self(), Any]) 206 | end, 207 | echo_loop(Type). 208 | -------------------------------------------------------------------------------- /apps/coop/src/coop_head_ctl_rcv.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Coop Head control process receive loop. 8 | %%% @since v0.0.1 9 | %%% @end 10 | %%%------------------------------------------------------------------------------ 11 | -module(coop_head_ctl_rcv). 12 | -author('Jay Nelson '). 13 | 14 | -include("../erlangsp/include/license_and_copyright.hrl"). 15 | 16 | %% Receive loop methods 17 | -export([msg_loop/3]). 18 | 19 | %% System message API functions 20 | -export([ 21 | system_continue/3, system_terminate/4, system_code_change/4, 22 | format_status/2, debug_coop/3 23 | ]). 24 | 25 | -include("coop.hrl"). 26 | -include("coop_dag.hrl"). 27 | -include("coop_head.hrl"). 28 | 29 | %% Exit, initialize, timeout changes and getting the root_pid don't need root_pid involvement... 30 | msg_loop(State, Root_Pid, Timeout) -> 31 | msg_loop(State, Root_Pid, Timeout, sys:debug_options([])). 32 | 33 | %% Until init finished, respond only to OTP and receiving an initial state record. 34 | msg_loop({} = State, Root_Pid, Timeout, Debug_Opts) -> 35 | receive 36 | %% System messages for compatibility with OTP... 37 | {'EXIT', _Parent, Reason} -> exit(Reason); 38 | {system, From, System_Msg} -> 39 | Sys_Args = {State, Root_Pid, Timeout, Debug_Opts}, 40 | handle_sys(Sys_Args, From, System_Msg); 41 | {get_modules, From} -> 42 | From ! {modules, [?MODULE]}, 43 | msg_loop(State, Root_Pid, Timeout, Debug_Opts); 44 | {?DAG_TOKEN, ?CTL_TOKEN, {init_state, #coop_head_state{} = New_State}} -> 45 | msg_loop(New_State, Root_Pid, Timeout, Debug_Opts) 46 | end; 47 | 48 | %% Normal message loop after initial state is received. 49 | msg_loop(#coop_head_state{} = State, Root_Pid, Timeout, Debug_Opts) -> 50 | receive 51 | %% System messages for compatibility with OTP... 52 | {'EXIT', _Parent, Reason} -> exit(Reason); 53 | {system, From, System_Msg} -> 54 | Sys_Args = {State, Root_Pid, Timeout, Debug_Opts}, 55 | handle_sys(Sys_Args, From, System_Msg); 56 | {get_modules, From} -> 57 | From ! {modules, [?MODULE]}, 58 | msg_loop(State, Root_Pid, Timeout, Debug_Opts); 59 | 60 | %% Coop System message control messages... 61 | {?DAG_TOKEN, ?CTL_TOKEN, {stop}} -> 62 | exit(stopped); 63 | {?DAG_TOKEN, ?CTL_TOKEN, {suspend}} -> 64 | sys:suspend(Root_Pid), 65 | msg_loop(State, Root_Pid, Timeout, Debug_Opts); 66 | {?DAG_TOKEN, ?CTL_TOKEN, {resume}} -> 67 | sys:resume(Root_Pid), 68 | msg_loop(State, Root_Pid, Timeout, Debug_Opts); 69 | {?DAG_TOKEN, ?CTL_TOKEN, {stats, Flag, {Ref, From}}} -> 70 | From ! {stats, Ref, sys:statistics(Root_Pid, Flag)}, 71 | msg_loop(State, Root_Pid, Timeout, Debug_Opts); 72 | {?DAG_TOKEN, ?CTL_TOKEN, {format_status}} -> 73 | State#coop_head_state.log ! sys:get_status(Root_Pid), 74 | msg_loop(State, Root_Pid, Timeout, Debug_Opts); 75 | {?DAG_TOKEN, ?CTL_TOKEN, {log, Flag, {Ref, From}}} -> 76 | From ! {log, Ref, sys:log(Root_Pid, Flag)}, 77 | msg_loop(State, Root_Pid, Timeout, Debug_Opts); 78 | {?DAG_TOKEN, ?CTL_TOKEN, log_to_file, File, {Ref, From}} -> 79 | From ! {log_to_file, Ref, sys:log_to_file(Root_Pid, File)}, 80 | msg_loop(State, Root_Pid, Timeout, Debug_Opts); 81 | 82 | %% State management and access control messages... 83 | {?DAG_TOKEN, ?CTL_TOKEN, {change_timeout, New_Timeout}} -> 84 | msg_loop(State, Root_Pid, New_Timeout, Debug_Opts); 85 | {?DAG_TOKEN, ?CTL_TOKEN, {get_kill_switch, {Ref, From}}} -> 86 | From ! {get_kill_switch, Ref, State#coop_head_state.kill_switch}, 87 | msg_loop(State, Root_Pid, Timeout, Debug_Opts); 88 | {?DAG_TOKEN, ?CTL_TOKEN, {get_root_pid, {Ref, From}}} -> 89 | From ! {get_root_pid, Ref, Root_Pid}, 90 | msg_loop(State, Root_Pid, Timeout, Debug_Opts); 91 | {?DAG_TOKEN, ?CTL_TOKEN, {set_root_node, {coop_node, _, _} = Coop_Node, {Ref, From}} = Msg} -> 92 | case State#coop_head_state.coop_root_node of 93 | none -> 94 | Root_Pid ! {?CTL_TOKEN, Msg}, 95 | New_State = State#coop_head_state{coop_root_node=Coop_Node}, 96 | msg_loop(New_State, Root_Pid, Timeout, Debug_Opts); 97 | _Already_Set -> 98 | From ! {set_root_node, Ref, false}, 99 | msg_loop(State, Root_Pid, Timeout, Debug_Opts) 100 | end; 101 | 102 | %% Priority data messages bypass data queue via control channel, 103 | %% but can clog control processing waiting for ACKs. The timeout 104 | %% used is relatively short, and backlog can cause the Coop to 105 | %% crash, so high priority data should be sent sparingly. 106 | {?DAG_TOKEN, ?DATA_TOKEN, Data_Msg} -> 107 | ack = coop_head_data_rcv:relay_msg_to_root_pid(Data_Msg, Root_Pid, Timeout), 108 | msg_loop(State, Root_Pid, Timeout, Debug_Opts); 109 | 110 | %% Unrecognized control msgs are forwarded to Root Pid, without regard to how 111 | %% many messages are currently pending on the Root Pid queue. Sending too many 112 | %% can cause a high priority data message to crash the entire Coop. 113 | {?DAG_TOKEN, ?CTL_TOKEN, Ctl_Msg} -> 114 | Root_Pid ! {?CTL_TOKEN, Ctl_Msg}, 115 | msg_loop(State, Root_Pid, Timeout, Debug_Opts); 116 | 117 | %% Quit if random data shows up. 118 | _Unexpected -> 119 | exit(coop_head_bad_ctl) 120 | end. 121 | 122 | %%---------------------------------------------------------------------- 123 | %% System, debug and control messages for OTP compatibility 124 | %%---------------------------------------------------------------------- 125 | -spec system_continue(pid(), [sys:dbg_opt()], term()) -> no_return(). 126 | -spec system_terminate(atom(), pid(), [sys:dbg_opt()], term()) -> no_return(). 127 | -spec system_code_change(term(), module(), atom(), term()) -> {ok, term()}. 128 | -spec format_status(normal | terminate, list()) -> [proplists:property()]. 129 | 130 | handle_sys({_State, _Root_Pid, _Timeout, Debug_Opts} = Ctl_Internals, From, System_Msg) -> 131 | [Parent | _] = get('$ancestors'), 132 | sys:handle_system_msg(System_Msg, From, Parent, ?MODULE, Debug_Opts, Ctl_Internals). 133 | 134 | debug_coop(Dev, Event, State) -> 135 | io:format(Dev, "DBG: ~p event = ~p~n", [State, Event]). 136 | 137 | system_continue(_Parent, New_Debug_Opts, {State, Root_Pid, Timeout, _Old_Debug_Opts} = _Misc) -> 138 | msg_loop(State, Root_Pid, Timeout, New_Debug_Opts). 139 | 140 | system_terminate(Reason, _Parent, _Debug_Opts, _Misc) -> exit(Reason). 141 | system_code_change(Misc, _Module, _OldVsn, _Extra) -> {ok, Misc}. 142 | 143 | format_status(normal, [_PDict, Sys_State, Parent, New_Debug_Opts, 144 | {_State, _Root_Pid, _Timeout, _Old_Debug_Opts}]) -> 145 | Hdr = "Status for " ++ atom_to_list(?MODULE), 146 | Log = sys:get_debug(log, New_Debug_Opts, []), 147 | [{header, Hdr}, 148 | {data, [{"Status", Sys_State}, 149 | {"Parent", Parent}, 150 | {"Logged events", Log}, 151 | {"Debug", New_Debug_Opts}] 152 | }]; 153 | 154 | format_status(terminate, Status_Data) -> [{terminate, Status_Data}]. 155 | -------------------------------------------------------------------------------- /apps/coop/src/coop_head_data_rcv.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Coop Head data process receive loop. 8 | %%% @since v0.0.1 9 | %%% @end 10 | %%%------------------------------------------------------------------------------ 11 | -module(coop_head_data_rcv). 12 | -author('Jay Nelson '). 13 | 14 | -include("../erlangsp/include/license_and_copyright.hrl"). 15 | 16 | %% Receive loop methods 17 | -export([one_at_a_time_loop/2, relay_msg_to_root_pid/3]). 18 | 19 | -include("coop.hrl"). 20 | -include("coop_dag.hrl"). 21 | 22 | 23 | -spec one_at_a_time_loop(pid(), pos_integer() | none) -> no_return(). 24 | 25 | %% One-at-a-time sends one synchronous message (waits for the ack) before the next. 26 | one_at_a_time_loop(Root_Pid, Timeout) -> 27 | receive 28 | 29 | %% Ctl tag is used for meta-data about the data process... 30 | {?DAG_TOKEN, ?CTL_TOKEN, {change_timeout, New_Timeout}} -> 31 | one_at_a_time_loop(Root_Pid, New_Timeout); 32 | 33 | %% Data must be tagged as such to be processed... 34 | {?DAG_TOKEN, ?DATA_TOKEN, Data_Msg} -> 35 | ack = relay_msg_to_root_pid(Data_Msg, Root_Pid, Timeout), 36 | one_at_a_time_loop(Root_Pid, Timeout); 37 | 38 | %% Quit if random data shows up. 39 | _Unexpected -> 40 | exit(coop_head_bad_data) 41 | end. 42 | 43 | relay_msg_to_root_pid(Msg, Root_Pid, Timeout) -> 44 | 45 | %% New ref causes selective receive optimization when looking for ACK. 46 | Ref = make_ref(), 47 | Root_Pid ! {?DATA_TOKEN, {Ref, self()}, Msg}, 48 | 49 | %% Wait for Root_Pid to ack the receipt of data. 50 | case Timeout of 51 | none -> receive {?ROOT_TOKEN, Ref, Root_Pid} -> ack end; 52 | Milliseconds when is_integer(Milliseconds), Milliseconds > 0 -> 53 | receive {?ROOT_TOKEN, Ref, Root_Pid} -> ack 54 | after Timeout -> exit(root_ack_timeout) 55 | end 56 | end. 57 | -------------------------------------------------------------------------------- /apps/coop/src/coop_head_root_rcv.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Receive loop for the Root_Pid in the Coop Head. 8 | %%% 9 | %%% All messages are handled synchronously so that a single message 10 | %%% source should only send a single message onto the root_pid queue. 11 | %%% This allows control messages to interleave with data messages, 12 | %%% effectively bypassing all pending data requests except the 13 | %%% currently executing data request. 14 | %%% 15 | %%% @since v0.0.1 16 | %%% @end 17 | %%%------------------------------------------------------------------------------ 18 | -module(coop_head_root_rcv). 19 | -author('Jay Nelson '). 20 | 21 | -include("../erlangsp/include/license_and_copyright.hrl"). 22 | 23 | %% Receive loop methods 24 | -export([sync_pass_thru_loop/1]). 25 | 26 | %% System message API functions 27 | -export([ 28 | system_continue/3, system_terminate/4, system_code_change/4, 29 | format_status/2, debug_coop/3 30 | ]). 31 | 32 | -include("coop.hrl"). 33 | -include("coop_dag.hrl"). 34 | 35 | 36 | -spec sync_pass_thru_loop(coop_node() | none) -> no_return(). 37 | 38 | %% Synchronous pass-thru just relays messages, but does so with ack to sender. 39 | sync_pass_thru_loop(Coop_Root_Node) -> 40 | sync_pass_thru_loop(Coop_Root_Node, sys:debug_options([])). 41 | 42 | 43 | %% Initially non-responsive Coop_Head when Root Coop_Node is 'none'... 44 | sync_pass_thru_loop(none, Debug_Opts) -> 45 | receive 46 | %% System messages for compatibility with OTP... 47 | {'EXIT', _Parent, Reason} -> exit(Reason); 48 | {system, From, System_Msg} -> 49 | Sys_Args = {pass_thru, none, Debug_Opts}, 50 | handle_sys(Sys_Args, From, System_Msg); 51 | {get_modules, From} -> 52 | From ! {modules, [?MODULE]}, 53 | sync_pass_thru_loop(none, Debug_Opts); 54 | 55 | %% Connect a valid Root Node. 56 | {?CTL_TOKEN, {set_root_node, {coop_node, _Node_Ctl_Pid, _Node_Task_Pid} = Coop_Node, {Ref, From}}} -> 57 | From ! {set_root_node, Ref, true}, 58 | sync_pass_thru_loop(Coop_Node, Debug_Opts); 59 | 60 | %% Refuse to connect an invalid Root Node. 61 | {?CTL_TOKEN, {set_root_node, _Not_A_Coop_Node, {Ref, From}}} -> 62 | From ! {set_root_node, Ref, false}, 63 | sync_pass_thru_loop(none, Debug_Opts) 64 | end; 65 | 66 | %% Normally functioning node, when a Root Coop_Node is connected. 67 | sync_pass_thru_loop(#coop_node{ctl_pid=Node_Ctl_Pid, task_pid=Node_Task_Pid} = Coop_Root_Node, Debug_Opts) -> 68 | receive 69 | %% System messages for compatibility with OTP... 70 | {'EXIT', _Parent, Reason} -> exit(Reason); 71 | {system, From, System_Msg} -> 72 | Sys_Args = {pass_thru, Coop_Root_Node, Debug_Opts}, 73 | handle_sys(Sys_Args, From, System_Msg); 74 | {get_modules, From} -> 75 | From ! {modules, [?MODULE]}, 76 | sync_pass_thru_loop(Coop_Root_Node, Debug_Opts); 77 | 78 | %% Control messages are not acked... 79 | {?CTL_TOKEN, Msg} -> 80 | In_Opts = sys:handle_debug(Debug_Opts, fun debug_coop/3, {}, {in, Msg}), 81 | Node_Ctl_Pid ! Msg, 82 | Out_Opts = sys:handle_debug(In_Opts, fun debug_coop/3, {}, {out, Msg, Node_Ctl_Pid}), 83 | sync_pass_thru_loop(Coop_Root_Node, Out_Opts); 84 | 85 | %% Data messages are acked for flow control. 86 | {?DATA_TOKEN, {Ref, From}, Msg} -> 87 | In_Opts = sys:handle_debug(Debug_Opts, fun debug_coop/3, {}, {in, Msg}), 88 | Node_Task_Pid ! Msg, 89 | Out_Opts = sys:handle_debug(In_Opts, fun debug_coop/3, {}, {out, Msg, Node_Task_Pid}), 90 | From ! {?ROOT_TOKEN, Ref, self()}, 91 | sync_pass_thru_loop(Coop_Root_Node, Out_Opts); 92 | 93 | %% Crash the process if unexpected data is received. 94 | _Unexpected -> exit(coop_root_bad_data) 95 | end. 96 | 97 | %%---------------------------------------------------------------------- 98 | %% System, debug and control messages for OTP compatibility 99 | %%---------------------------------------------------------------------- 100 | -spec system_continue(pid(), [sys:dbg_opt()], term()) -> no_return(). 101 | -spec system_terminate(atom(), pid(), [sys:dbg_opt()], term()) -> no_return(). 102 | -spec system_code_change(term(), module(), atom(), term()) -> {ok, term()}. 103 | -spec format_status(normal | terminate, list()) -> [proplists:property()]. 104 | 105 | handle_sys({_Rcv_Loop_Type, _Coop_Root_Node, Debug_Opts} = Coop_Internals, From, System_Msg) -> 106 | [Parent | _] = get('$ancestors'), 107 | sys:handle_system_msg(System_Msg, From, Parent, ?MODULE, Debug_Opts, Coop_Internals). 108 | 109 | debug_coop(Dev, Event, State) -> 110 | io:format(Dev, "DBG: ~p event = ~p~n", [State, Event]). 111 | 112 | system_continue(_Parent, New_Debug_Opts, {pass_thru, Coop_Root_Node, _Old_Debug_Opts} = _Misc) -> 113 | sync_pass_thru_loop(Coop_Root_Node, New_Debug_Opts). 114 | 115 | system_terminate(Reason, _Parent, _Debug_Opts, _Misc) -> exit(Reason). 116 | system_code_change(Misc, _Module, _OldVsn, _Extra) -> {ok, Misc}. 117 | 118 | format_status(normal, [_PDict, SysState, Parent, New_Debug_Opts, 119 | {Rcv_Loop_Type, Coop_Root_Node, _Old_Debug_Opts}]) -> 120 | Hdr = "Status for " ++ atom_to_list(?MODULE), 121 | Log = sys:get_debug(log, New_Debug_Opts, []), 122 | Msgs = erlang:process_info(self(), [messages]), 123 | [{header, Hdr}, 124 | {data, [{"Status", SysState}, 125 | {"Loop", Rcv_Loop_Type}, 126 | {"Root", Coop_Root_Node}, 127 | {"Messages", Msgs}, 128 | {"Parent", Parent}, 129 | {"Logged events", Log}, 130 | {"Debug", New_Debug_Opts}] 131 | }]; 132 | 133 | format_status(terminate, StatusData) -> [{terminate, StatusData}]. 134 | -------------------------------------------------------------------------------- /apps/coop/src/coop_kill_link_rcv.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Receive loop for the Coop Kill Switch process. 8 | %%% @since v0.0.1 9 | %%% @end 10 | %%%------------------------------------------------------------------------------ 11 | -module(coop_kill_link_rcv). 12 | -author('Jay Nelson '). 13 | 14 | -include("../erlangsp/include/license_and_copyright.hrl"). 15 | 16 | %% Graph API 17 | -export([make_kill_switch/0, link_to_kill_switch/2, link_loop/0]). 18 | 19 | -include("coop_dag.hrl"). 20 | 21 | -spec make_kill_switch() -> pid(). 22 | make_kill_switch() -> proc_lib:spawn(?MODULE, link_loop, []). 23 | 24 | link_to_kill_switch(Kill_Switch, Procs) when is_list(Procs) -> 25 | Kill_Switch ! {?DAG_TOKEN, ?CTL_TOKEN, {link, Procs}}. 26 | 27 | link_loop() -> 28 | receive 29 | 30 | %%------------------------------------------------------------ 31 | %% TODO: This code needs to be improved to handle remote 32 | %% support processes. Right now all are assumed to be local 33 | %% to the coop_head and coop_node's erlang VM node. 34 | {?DAG_TOKEN, ?CTL_TOKEN, {link, Procs}} -> 35 | [case is_process_alive(P) of 36 | 37 | %% Crash if process to link is already dead 38 | false -> [exit(Pid, kill) || Pid <- Procs], exit(kill); 39 | 40 | %% Otherwise link and continue 41 | true -> link(P) 42 | 43 | end || P <- Procs], 44 | link_loop(); 45 | %%------------------------------------------------------------ 46 | 47 | _Unknown -> 48 | error_logger:error_msg("~p ~p: Ignoring ~p~n", [?MODULE, self(), _Unknown]), 49 | link_loop() 50 | end. 51 | -------------------------------------------------------------------------------- /apps/coop/src/coop_node.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Single graph node process. 8 | %%% @since v0.0.1 9 | %%% @end 10 | %%%------------------------------------------------------------------------------ 11 | -module(coop_node). 12 | -author('Jay Nelson '). 13 | 14 | -include("../erlangsp/include/license_and_copyright.hrl"). 15 | 16 | %% Graph API 17 | -export([ 18 | %% Create coop_node instances... 19 | new/5, new/6, 20 | 21 | %% Send commands to coop_node control process... 22 | node_ctl_clone/1, node_ctl_stop/1, 23 | node_ctl_suspend/1, node_ctl_resume/1, node_ctl_trace/1, node_ctl_untrace/1, 24 | node_ctl_stats/3, node_ctl_log/3, node_ctl_log_to_file/3, 25 | node_ctl_install_trace_fn/3, node_ctl_remove_trace_fn/3 26 | ]). 27 | 28 | %% Internal functions that are exported (not part of the external API) 29 | -export([ 30 | %% Send data to a node... 31 | node_task_deliver_data/2, 32 | 33 | %% Inspect and add to downstream receivers. 34 | node_task_get_downstream_pids/1, node_task_add_downstream_pids/2 35 | ]). 36 | 37 | %% Internal functions for spawned processes 38 | -export([echo_loop/1, link_loop/0]). 39 | 40 | -include("coop.hrl"). 41 | -include("coop_dag.hrl"). 42 | -include("coop_node.hrl"). 43 | 44 | %%---------------------------------------------------------------------- 45 | %% Create a new coop_node. A coop_node is represented by a pair of 46 | %% pids: a control process and a data task process. 47 | %%---------------------------------------------------------------------- 48 | -spec new(coop_head(), pid(), coop_task_fn(), coop_init_fn(), coop_data_options()) -> coop_node(). 49 | -spec new(coop_head(), pid(), coop_task_fn(), coop_init_fn(), coop_data_options(), data_flow_method()) -> coop_node(). 50 | 51 | %% Broadcast is default for downstream data distribution. 52 | %% Optimized for special case of 1 downstream pid. 53 | new(Coop_Head, Kill_Switch, Node_Fn, Init_Fn, Data_Opts) -> 54 | new(Coop_Head, Kill_Switch, Node_Fn, Init_Fn, Data_Opts, broadcast). 55 | 56 | %% Override downstream data distribution. 57 | new(#coop_head{ctl_pid=Head_Ctl_Pid, data_pid=Head_Data_Pid} = Coop_Head, Kill_Switch, 58 | {_Task_Mod, _Task_Fn} = Node_Fn, {_Mod, _Fun, _Args} = Init_Fn, 59 | Data_Opts, Data_Flow_Method) 60 | 61 | when is_pid(Head_Ctl_Pid), is_pid(Head_Data_Pid), is_pid(Kill_Switch), 62 | is_atom(_Task_Mod), is_atom(_Task_Fn), is_atom(_Mod), is_atom(_Fun), 63 | is_list(Data_Opts), 64 | ( Data_Flow_Method =:= random 65 | orelse Data_Flow_Method =:= round_robin 66 | orelse Data_Flow_Method =:= broadcast ) -> 67 | 68 | %% Start the data task process... 69 | Task_Pid = make_data_task_pid(Coop_Head, Node_Fn, Init_Fn, Data_Opts, Data_Flow_Method), 70 | 71 | %% Start support function processes... 72 | {Trace_Pid, Log_Pid, Reflect_Pid} = make_support_pids(), 73 | 74 | %% Start the control process... 75 | Ctl_Args = [Kill_Switch, Task_Pid, Init_Fn, Node_Fn, Trace_Pid, Log_Pid, Reflect_Pid], 76 | Ctl_Pid = proc_lib:spawn(coop_node_ctl_rcv, node_ctl_loop, Ctl_Args), 77 | 78 | %% Link all component pids to the Kill_Switch pid and return the Ctl and Data pids. 79 | coop_kill_link_rcv:link_to_kill_switch(Kill_Switch, [Ctl_Pid, Task_Pid, Trace_Pid, Log_Pid, Reflect_Pid]), 80 | #coop_node{ctl_pid=Ctl_Pid, task_pid=Task_Pid}. 81 | 82 | make_data_task_pid(Coop_Head, Node_Fn, Init_Fn, Data_Opts, Data_Flow_Method) -> 83 | Worker_Set = case Data_Flow_Method of random -> {}; _Other -> queue:new() end, 84 | Task_Args = [Coop_Head, Node_Fn, Init_Fn, Worker_Set, Data_Opts, Data_Flow_Method], 85 | proc_lib:spawn(coop_node_data_rcv, start_node_data_loop, Task_Args). 86 | 87 | make_support_pids() -> 88 | Trace_Pid = proc_lib:spawn(?MODULE, echo_loop, ["NTRC"]), 89 | [Log_Pid, Reflect_Pid] 90 | = [proc_lib:spawn(?MODULE, echo_loop, [Type]) || Type <- ["NLOG", "NRFL"]], 91 | {Trace_Pid, Log_Pid, Reflect_Pid}. 92 | 93 | 94 | %%---------------------------------------------------------------------- 95 | %% Control process interface... 96 | %%---------------------------------------------------------------------- 97 | -define(SYNC_RECEIVE_TIME, 2000). 98 | 99 | node_ctl_clone (Coop_Node) -> ?SEND_CTL_MSG(Coop_Node, clone). 100 | node_ctl_stop (Coop_Node) -> ?SEND_CTL_MSG(Coop_Node, stop). 101 | node_ctl_suspend(Coop_Node) -> ?SEND_CTL_MSG(Coop_Node, suspend). 102 | node_ctl_resume (Coop_Node) -> ?SEND_CTL_MSG(Coop_Node, resume). 103 | node_ctl_trace (Coop_Node) -> ?SEND_CTL_MSG(Coop_Node, trace). 104 | node_ctl_untrace(Coop_Node) -> ?SEND_CTL_MSG(Coop_Node, untrace). 105 | 106 | wait_ctl_response(Type, Ref) -> 107 | receive {Type, Ref, Info} -> Info 108 | after ?SYNC_RECEIVE_TIME -> timeout 109 | end. 110 | 111 | node_ctl_log(Coop_Node, Flag, From) -> 112 | Ref = make_ref(), 113 | ?SEND_CTL_MSG(Coop_Node, log, Flag, {Ref, From}), 114 | wait_ctl_response(node_ctl_log, Ref). 115 | 116 | node_ctl_log_to_file(Coop_Node, File, From) -> 117 | Ref = make_ref(), 118 | ?SEND_CTL_MSG(Coop_Node, log_to_file, File, {Ref, From}), 119 | wait_ctl_response(node_ctl_log_to_file, Ref). 120 | 121 | node_ctl_stats(Coop_Node, Flag, From) -> 122 | Ref = make_ref(), 123 | ?SEND_CTL_MSG(Coop_Node, stats, Flag, {Ref, From}), 124 | wait_ctl_response(node_ctl_stats, Ref). 125 | 126 | node_ctl_install_trace_fn(Coop_Node, {Func, Func_State}, From) -> 127 | Ref = make_ref(), 128 | ?SEND_CTL_MSG(Coop_Node, install_trace_fn, {Func, Func_State}, {Ref, From}), 129 | wait_ctl_response(node_ctl_install_trace_fn, Ref). 130 | 131 | node_ctl_remove_trace_fn(Coop_Node, Func, From) -> 132 | Ref = make_ref(), 133 | ?SEND_CTL_MSG(Coop_Node, remove_trace_fn, Func, {Ref, From}), 134 | wait_ctl_response(node_ctl_remove_trace_fn, Ref). 135 | 136 | %%---------------------------------------------------------------------- 137 | %% Task process interface... 138 | %%---------------------------------------------------------------------- 139 | node_task_get_downstream_pids(#coop_node{task_pid=Node_Task_Pid}) -> 140 | Ref = make_ref(), 141 | Node_Task_Pid ! {?DAG_TOKEN, ?CTL_TOKEN, {get_downstream, {Ref, self()}}}, 142 | receive 143 | {get_downstream, Ref, Pids} -> Pids 144 | after ?SYNC_RECEIVE_TIME -> timeout 145 | end. 146 | 147 | node_task_add_downstream_pids(#coop_node{task_pid=Node_Task_Pid}, Pids) when is_list(Pids) -> 148 | Node_Task_Pid ! {?DAG_TOKEN, ?CTL_TOKEN, {add_downstream, Pids}}, 149 | ok. 150 | 151 | %% Deliver data to a downstream Pid or Coop_Node. 152 | node_task_deliver_data(#coop_node{task_pid=Node_Task_Pid}, Data) -> 153 | Node_Task_Pid ! Data, 154 | ok; 155 | node_task_deliver_data(Pid, Data) when is_pid(Pid) -> 156 | Pid ! Data, 157 | ok. 158 | 159 | 160 | %%---------------------------------------------------------------------- 161 | %% Co-op Node receive loops for support pids. 162 | %%---------------------------------------------------------------------- 163 | 164 | -spec echo_loop(string()) -> no_return(). 165 | -spec link_loop() -> no_return(). 166 | 167 | %% Trace, Log and Reflect process receive loop 168 | echo_loop(Type) -> 169 | receive 170 | {stop} -> ok; 171 | Any -> error_logger:info_msg("~p ~p ~p: ~p~n", [?MODULE, Type, self(), Any]) 172 | end, 173 | echo_loop(Type). 174 | 175 | %% Kill_switch process receive loop 176 | link_loop() -> 177 | receive 178 | 179 | %%------------------------------------------------------------ 180 | %% TODO: This code needs to be improved to handle remote 181 | %% support processes. Right now all are assumed to be local 182 | %% to the coop_node's erlang VM node. 183 | {?DAG_TOKEN, ?CTL_TOKEN, {link, Procs}} -> 184 | [case is_process_alive(P) of 185 | 186 | %% Crash if process to link is already dead 187 | false -> 188 | [exit(Pid, kill) || Pid <- Procs], 189 | exit(kill); 190 | 191 | %% Otherwise link and continue 192 | true -> link(P) 193 | 194 | end || P <- Procs], 195 | link_loop(); 196 | %%------------------------------------------------------------ 197 | 198 | _Unknown -> 199 | error_logger:error_msg("~p ~p: Ignoring ~p~n", [?MODULE, self(), _Unknown]), 200 | link_loop() 201 | end. 202 | -------------------------------------------------------------------------------- /apps/coop/src/coop_node_ctl_rcv.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Default receive loop for coop_node data. 8 | %%% @since v0.0.1 9 | %%% @end 10 | %%%------------------------------------------------------------------------------ 11 | -module(coop_node_ctl_rcv). 12 | -author('Jay Nelson '). 13 | 14 | -include("../erlangsp/include/license_and_copyright.hrl"). 15 | 16 | %% Graph API 17 | -export([node_ctl_loop/7]). 18 | 19 | -include("coop.hrl"). 20 | -include("coop_dag.hrl"). 21 | -include("coop_node.hrl"). 22 | 23 | %%---------------------------------------------------------------------- 24 | %% Coop Node data is executed using Node_Fn and the results are 25 | %% passed to one or more of the downstream workers. 26 | %%---------------------------------------------------------------------- 27 | -spec node_ctl_loop(pid(), pid(), coop_init_fn(), coop_task_fn(), pid(), pid(), pid()) -> no_return(). 28 | 29 | node_ctl_loop(Kill_Switch, Task_Pid, Init_Fn, Node_Fn, Trace_Pid, Log_Pid, Reflect_Pid) -> 30 | node_ctl_loop(#coop_node_state{kill_switch=Kill_Switch, ctl=self(), task=Task_Pid, 31 | init_fn=Init_Fn, task_fn=Node_Fn, 32 | trace=Trace_Pid, log=Log_Pid, reflect=Reflect_Pid}). 33 | 34 | node_ctl_loop(#coop_node_state{task=Task_Pid, trace=Trace_Pid} = Coop_Node_State) -> 35 | receive 36 | %% Commands for controlling the entire Coop_Node element... 37 | {?DAG_TOKEN, ?CTL_TOKEN, stop} -> exit(stopped); 38 | {?DAG_TOKEN, ?CTL_TOKEN, clone} -> node_clone(Coop_Node_State); 39 | 40 | %% Commands for controlling/monitoring the Task_Pid... 41 | {?DAG_TOKEN, ?CTL_TOKEN, suspend } -> sys:suspend(Task_Pid); 42 | {?DAG_TOKEN, ?CTL_TOKEN, resume } -> sys:resume(Task_Pid); 43 | {?DAG_TOKEN, ?CTL_TOKEN, trace } -> erlang:trace(Task_Pid, true, trace_options(Trace_Pid)); 44 | {?DAG_TOKEN, ?CTL_TOKEN, untrace } -> erlang:trace(Task_Pid, false, trace_options(Trace_Pid)); 45 | 46 | {?DAG_TOKEN, ?CTL_TOKEN, log, Flag, {Ref, From}} -> From ! {node_ctl_log, Ref, sys:log(Task_Pid, Flag)}; 47 | {?DAG_TOKEN, ?CTL_TOKEN, log_to_file, File, {Ref, From}} -> From ! {node_ctl_log_to_file, Ref, sys:log_to_file(Task_Pid, File)}; 48 | {?DAG_TOKEN, ?CTL_TOKEN, stats, Flag, {Ref, From}} -> From ! {node_ctl_stats, Ref, sys:statistics(Task_Pid, Flag)}; 49 | 50 | {?DAG_TOKEN, ?CTL_TOKEN, install_trace_fn, FInfo, {Ref, From}} -> From ! {node_ctl_install_trace_fn, Ref, sys:install(Task_Pid, FInfo)}; 51 | {?DAG_TOKEN, ?CTL_TOKEN, remove_trace_fn, FInfo, {Ref, From}} -> From ! {node_ctl_remove_trace_fn, Ref, sys:remove(Task_Pid, FInfo)}; 52 | 53 | %% All others are unknown commands, just unqueue them. 54 | _Skip_Unknown_Msgs -> do_nothing 55 | end, 56 | node_ctl_loop(Coop_Node_State). 57 | 58 | node_clone(#coop_node_state{} = _Coop_Node_State) -> ok. 59 | trace_options(Tracer_Pid) -> [{tracer, Tracer_Pid}, send, 'receive', procs, timestamp]. 60 | -------------------------------------------------------------------------------- /apps/coop/src/coop_node_data_rcv.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Default receive loop for coop_node data. 8 | %%% @since v0.0.1 9 | %%% @end 10 | %%%------------------------------------------------------------------------------ 11 | -module(coop_node_data_rcv). 12 | -author('Jay Nelson '). 13 | 14 | -include("../erlangsp/include/license_and_copyright.hrl"). 15 | 16 | %% Graph API 17 | -export([start_node_data_loop/6]). 18 | 19 | %% System message API functions 20 | -export([ 21 | system_continue/3, system_terminate/4, system_code_change/4, 22 | format_status/2, debug_coop/3 23 | ]). 24 | 25 | -include("coop.hrl"). 26 | -include("coop_dag.hrl"). 27 | 28 | 29 | %%---------------------------------------------------------------------- 30 | %% Co-op Node data is executed using Node_Fn and the results are 31 | %% passed to one or more of the downstream workers. 32 | %%---------------------------------------------------------------------- 33 | -spec start_node_data_loop(coop_head(), coop_task_fn(), coop_init_fn(), 34 | downstream_workers(), coop_data_options(), data_flow_method()) -> no_return(). 35 | -spec node_data_loop(coop_head(), coop_task_fn(), any(), downstream_workers(), 36 | #coop_node_options{}, data_flow_method(), [sys:dbg_opt()]) -> no_return(). 37 | 38 | init_data_options(Options) -> 39 | #coop_node_options{access_coop_head = proplists:get_bool(access_coop_head, Options)}. 40 | 41 | start_node_data_loop(Coop_Head, Node_Fn, {Mod, Fun, Args} = _Init_Fn, Downstream_Pids, Options, Data_Flow_Method) -> 42 | Data_Opts = init_data_options(Options), 43 | Init_State = case Data_Opts#coop_node_options.access_coop_head of 44 | true -> Mod:Fun({Coop_Head, Args}); 45 | false -> Mod:Fun(Args) 46 | end, 47 | node_data_loop(Coop_Head, Node_Fn, Init_State, Downstream_Pids, Data_Opts, Data_Flow_Method, sys:debug_options([])). 48 | 49 | node_data_loop(Coop_Head, Node_Fn, Node_State, Downstream_Pids, Data_Opts, Data_Flow_Method, Debug_Opts) -> 50 | receive 51 | %% System messages 52 | {'EXIT', _Parent, Reason} -> exit(Reason); 53 | {system, From, System_Msg} -> 54 | Sys_Args = {Coop_Head, Node_Fn, Node_State, Downstream_Pids, Data_Opts, Data_Flow_Method, Debug_Opts}, 55 | handle_sys(Sys_Args, From, System_Msg); 56 | {get_modules, From} -> 57 | {Task_Module, _Task_Fn} = Node_Fn, 58 | From ! {modules, [?MODULE, Task_Module]}, 59 | node_data_loop(Coop_Head, Node_Fn, Node_State, Downstream_Pids, Data_Opts, Data_Flow_Method, Debug_Opts); 60 | 61 | %% Node control messages affecting Node_Fn, Pids or Data_Flow_Method... 62 | {?DAG_TOKEN, ?CTL_TOKEN, Dag_Ctl_Msg} -> 63 | New_Debug_Opts = sys:handle_debug(Debug_Opts, fun debug_coop/3, {Data_Flow_Method, Data_Opts, Node_State}, {in, Dag_Ctl_Msg}), 64 | New_Downstream_Pids = handle_ctl(Coop_Head, Downstream_Pids, Data_Opts, Data_Flow_Method, Dag_Ctl_Msg), 65 | node_data_loop(Coop_Head, Node_Fn, Node_State, New_Downstream_Pids, Data_Opts, Data_Flow_Method, New_Debug_Opts); 66 | 67 | %% All data is passed as is and untagged for processing. 68 | Data -> 69 | New_Debug_Opts = sys:handle_debug(Debug_Opts, fun debug_coop/3, {Data_Flow_Method, Data_Opts, Node_State}, {in, Data}), 70 | {Final_Debug_Opts, Maybe_Reordered_Pids, New_Node_State} 71 | = relay_data(Coop_Head, New_Debug_Opts, Node_Fn, Node_State, Data_Opts, Data_Flow_Method, Data, Downstream_Pids), 72 | node_data_loop(Coop_Head, Node_Fn, New_Node_State, Maybe_Reordered_Pids, Data_Opts, Data_Flow_Method, Final_Debug_Opts) 73 | end. 74 | 75 | call_task_fn(Mod, Fn, Node_State, Data, true, Coop_Head) -> Mod:Fn(Coop_Head, Node_State, Data); 76 | call_task_fn(Mod, Fn, Node_State, Data, false, _Coop_Head) -> Mod:Fn(Node_State, Data). 77 | 78 | %% No Downstream_Pids... 79 | relay_data(Coop_Head, Debug_Opts, {Module, Function} = _Node_Fn, Node_State, 80 | #coop_node_options{access_coop_head=ACH}, _Any_Data_Flow_Method, Data, Worker_Set) 81 | when Worker_Set =:= {}; Worker_Set =:= {[],[]} -> 82 | {New_Node_State, _Fn_Result} = call_task_fn(Module, Function, Node_State, Data, ACH, Coop_Head), %% For side effects only. 83 | {Debug_Opts, Worker_Set, New_Node_State}; 84 | 85 | %% Faster routing if only one Downstream_Pid... 86 | relay_data(Coop_Head, Debug_Opts, Node_Fn, Node_State, Data_Opts, Any_Data_Flow_Method, Data, {Pid} = Worker_Set) -> 87 | notify_debug_and_return(Coop_Head, Debug_Opts, Node_Fn, Node_State, Data_Opts, Any_Data_Flow_Method, Data, Worker_Set, Pid); 88 | 89 | %% Relay data to all Downstream_Pids... 90 | relay_data(Coop_Head, Debug_Opts, {Module, Function} = _Node_Fn, Node_State, 91 | #coop_node_options{access_coop_head=ACH} = Data_Opts, broadcast, Data, Worker_Set) -> 92 | {New_Node_State, Fn_Result} = call_task_fn(Module, Function, Node_State, Data, ACH, Coop_Head), 93 | New_Opts = case Fn_Result of 94 | ?COOP_NOOP -> Debug_Opts; 95 | Live_Data -> lists:foldl(fun(To, Opts) -> 96 | coop:relay_data(To, Live_Data), 97 | Debug_Args = {broadcast, Data_Opts, New_Node_State}, 98 | sys:handle_debug(Opts, fun debug_coop/3, Debug_Args, {out, Live_Data, To}) 99 | end, Debug_Opts, queue:to_list(Worker_Set)) %% TODO: is this expensive?! 100 | end, 101 | {New_Opts, Worker_Set, New_Node_State}; 102 | 103 | %% Relay data with random or round_robin has to choose a single destination. 104 | relay_data(Coop_Head, Debug_Opts, Node_Fn, Node_State, Data_Opts, Single_Data_Flow_Method, Data, Worker_Set) -> 105 | {Worker, New_Worker_Set} = choose_worker(Worker_Set, Single_Data_Flow_Method), 106 | notify_debug_and_return(Coop_Head, Debug_Opts, Node_Fn, Node_State, Data_Opts, Single_Data_Flow_Method, Data, New_Worker_Set, Worker). 107 | 108 | %% Used only for single downstream pid delivery methods. 109 | notify_debug_and_return(Coop_Head, Debug_Opts, {Module, Function}, Node_State, 110 | #coop_node_options{access_coop_head=ACH} = Data_Opts, Data_Flow_Method, Data, Worker_Set, Pid) -> 111 | {New_Node_State, Fn_Result} = call_task_fn(Module, Function, Node_State, Data, ACH, Coop_Head), 112 | New_Opts = case Fn_Result of 113 | ?COOP_NOOP -> Debug_Opts; 114 | Live_Data -> coop:relay_data(Pid, Live_Data), 115 | Debug_Args = {Data_Flow_Method, Data_Opts, New_Node_State}, 116 | sys:handle_debug(Debug_Opts, fun debug_coop/3, Debug_Args, {out, Live_Data, Pid}) 117 | end, 118 | {New_Opts, Worker_Set, New_Node_State}. 119 | 120 | %% Choose a worker randomly without changing the Worker_Set... 121 | choose_worker(Worker_Set, random) -> 122 | N = coop_node_util:random_worker(Worker_Set), 123 | {element(N, Worker_Set), Worker_Set}; 124 | %% Grab first worker, then rotate worker list for round_robin. 125 | choose_worker(Worker_Set, round_robin) -> 126 | {{value, Worker}, Set_Minus_Worker} = queue:out(Worker_Set), 127 | {Worker, queue:in(Worker, Set_Minus_Worker)}. 128 | 129 | 130 | %%---------------------------------------------------------------------- 131 | %% Control message requests affecting data receive loop 132 | %%---------------------------------------------------------------------- 133 | handle_ctl( Coop_Head, Downstream_Pids, _Data_Opts, _Data_Flow_Method, {get_coop_head, {Ref, From}}) -> 134 | From ! {get_coop_head, Ref, Coop_Head}, 135 | Downstream_Pids; 136 | handle_ctl(_Coop_Head, Downstream_Pids, _Data_Opts, _Data_Flow_Method, {add_downstream, []}) -> 137 | Downstream_Pids; 138 | handle_ctl(_Coop_Head, Downstream_Pids, _Data_Opts, Data_Flow_Method, {add_downstream, New_Pids}) 139 | when is_list(New_Pids) -> 140 | do_add_downstream(Data_Flow_Method, Downstream_Pids, New_Pids); 141 | handle_ctl(_Coop_Head, Downstream_Pids, _Data_Opts, Data_Flow_Method, {get_downstream, {Ref, From}}) -> 142 | reply_downstream_pids_as_list(Data_Flow_Method, Downstream_Pids, Ref, From), 143 | Downstream_Pids; 144 | handle_ctl(_Coop_Head, Downstream_Pids, _Data_Opts, _Data_Flow_Method, _Unknown_Cmd) -> 145 | error_logger:info_msg("~p Unknown DAG Cmd: ~p~n", [?MODULE, _Unknown_Cmd]), 146 | Downstream_Pids. 147 | 148 | do_add_downstream(random, Downstream_Pids, New_Pids) -> 149 | list_to_tuple(tuple_to_list(Downstream_Pids) ++ New_Pids); 150 | 151 | do_add_downstream(_Not_Random, {}, [Pid]) -> {Pid}; 152 | do_add_downstream(_Not_Random, {}, New_Pids) -> queue:from_list(New_Pids); 153 | do_add_downstream(_Not_Random, {Pid}, New_Pids) -> queue:from_list([Pid | New_Pids]); 154 | do_add_downstream(_Not_Random, Downstream_Pids, New_Pids) -> 155 | queue:join(Downstream_Pids, queue:from_list(New_Pids)). 156 | 157 | reply_downstream_pids_as_list(random, Downstream_Pids, Ref, From) -> 158 | From ! {get_downstream, Ref, tuple_to_list(Downstream_Pids)}; 159 | reply_downstream_pids_as_list(_Not_Random, Downstream_Pids, Ref, From) -> 160 | case Downstream_Pids of 161 | {} -> From ! {get_downstream, Ref, []}; 162 | {Pid} -> From ! {get_downstream, Ref, [Pid]}; 163 | Queue -> From ! {get_downstream, Ref, queue:to_list(Queue)} 164 | end. 165 | 166 | 167 | %%---------------------------------------------------------------------- 168 | %% System, debug and control messages for OTP compatibility 169 | %%---------------------------------------------------------------------- 170 | -spec system_continue(pid(), [sys:dbg_opt()], term()) -> no_return(). 171 | -spec system_terminate(atom(), pid(), [sys:dbg_opt()], term()) -> no_return(). 172 | -spec system_code_change(term(), module(), atom(), term()) -> {ok, term()}. 173 | -spec format_status(normal | terminate, list()) -> [proplists:property()]. 174 | 175 | handle_sys({_Coop_Head, _Node_Fn, _Node_State, _Downstream_Pids, _Data_Opts, _Data_Flow_Method, Debug_Opts} = Coop_Internals, 176 | From, System_Msg) -> 177 | [Parent | _] = get('$ancestors'), 178 | sys:handle_system_msg(System_Msg, From, Parent, ?MODULE, Debug_Opts, Coop_Internals). 179 | 180 | debug_coop(Dev, Event, State) -> 181 | io:format(Dev, "~p DBG: ~p event = ~p~n", [self(), State, Event]). 182 | 183 | system_continue(_Parent, New_Debug_Opts, 184 | {Coop_Head, Node_Fn, Node_State, Downstream_Pids, Data_Opts, Data_Flow_Method, _Old_Debug_Opts} = _Misc) -> 185 | node_data_loop(Coop_Head, Node_Fn, Node_State, Downstream_Pids, Data_Opts, Data_Flow_Method, New_Debug_Opts). 186 | 187 | system_terminate(Reason, _Parent, _Debug_Opts, _Misc) -> exit(Reason). 188 | system_code_change(Misc, _Module, _OldVsn, _Extra) -> {ok, Misc}. 189 | 190 | format_status(normal, [_PDict, Sys_State, Parent, New_Debug_Opts, 191 | {_Coop_Head, Node_Fn, Node_State, Downstream_Pids, Data_Opts, Data_Flow_Method, _Old_Debug_Opts}]) -> 192 | Pid_Count = case Data_Flow_Method of 193 | random -> tuple_size(Downstream_Pids); 194 | _Not_Random -> 195 | case Downstream_Pids of 196 | {} -> 0; 197 | {_Pid} -> 1; 198 | Queue -> queue:len(Queue) 199 | end 200 | end, 201 | Hdr = "Status for coop_node", 202 | Log = sys:get_debug(log, New_Debug_Opts, []), 203 | [{header, Hdr}, 204 | {data, [{"Status", Sys_State}, 205 | {"Node_Fn", Node_Fn}, 206 | {"Node_State", Node_State}, 207 | {"Downstream_Pid_Count", Pid_Count}, 208 | {"Data_Options", format_data_options(Data_Opts)}, 209 | {"Data_Flow_Method", Data_Flow_Method}, 210 | {"Parent", Parent}, 211 | {"Logged events", Log}, 212 | {"Debug", New_Debug_Opts}] 213 | }]; 214 | 215 | format_status(terminate, Status_Data) -> [{terminate, Status_Data}]. 216 | 217 | format_data_options(#coop_node_options{access_coop_head=ACH}) -> 218 | "{access_coop_head: " ++ atom_to_list(ACH) ++ "}". 219 | -------------------------------------------------------------------------------- /apps/coop/src/coop_node_util.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Utilities to ease testing 8 | %%% @since v0.0.1 9 | %%% @end 10 | %%%------------------------------------------------------------------------------ 11 | -module(coop_node_util). 12 | -author('Jay Nelson '). 13 | 14 | -include("../erlangsp/include/license_and_copyright.hrl"). 15 | 16 | -export([random_worker/1]). 17 | 18 | random_worker(Worker_Set) -> crypto:rand_uniform(1, tuple_size(Worker_Set)). 19 | -------------------------------------------------------------------------------- /apps/ctest/coop.coverspec: -------------------------------------------------------------------------------- 1 | {export, "./coop/logs/cover"}. 2 | {incl_mods, [coop, coop_flow, coop_head, coop_node, 3 | coop_head_ctl_rcv, coop_head_data_rcv, coop_head_root_rcv, 4 | coop_node_ctl_rcv, coop_node_data_rcv, 5 | coop_kill_link_rcv, coop_node_util 6 | ]}. 7 | -------------------------------------------------------------------------------- /apps/ctest/coop.spec: -------------------------------------------------------------------------------- 1 | {alias, coop, "./coop/"}. 2 | {logdir, "./logs/"}. 3 | {cover, "./coop.coverspec"}. 4 | {suites, coop, all}. 5 | -------------------------------------------------------------------------------- /apps/ctest/coop/coop_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(coop_SUITE). 2 | 3 | -include("../../erlangsp/include/license_and_copyright.hrl"). 4 | -include_lib("common_test/include/ct.hrl"). 5 | -include("../../coop/include/coop.hrl"). 6 | -include("../../coop/include/coop_dag.hrl"). 7 | 8 | %% Suite functions 9 | -export([all/0, init_per_suite/1, end_per_suite/1]). 10 | 11 | %% Pipeline and fanout tests 12 | -export([pipeline_flow/1, pipeline_failure/1, pipeline/1, 13 | fanout_flow/1, fanout_failure/1, 14 | fanout_round_robin/1, fanout_broadcast/1 15 | ]). 16 | 17 | %% Node task and init functions 18 | -export([init/1, plus2/2, times3/2, minus5/2, rr_init/1, rr_inc/2]). 19 | 20 | %% Test procs for validating process message output 21 | -export([receive_pipe_results/0, receive_round_robin_results/2]). 22 | 23 | all() -> [pipeline_flow, pipeline_failure, pipeline, 24 | fanout_flow, fanout_failure, 25 | fanout_round_robin, fanout_broadcast 26 | ]. 27 | 28 | init_per_suite(Config) -> Config. 29 | end_per_suite(_Config) -> ok. 30 | 31 | 32 | %%---------------------------------------------------------------------- 33 | %% Pipeline patterns 34 | %%---------------------------------------------------------------------- 35 | pipeline_failure(_Config) -> 36 | try coop_flow:pipeline(a) 37 | catch error:function_clause -> ok 38 | end, 39 | 40 | try coop_flow:pipeline([a]) 41 | catch error:function_clause -> ok 42 | end. 43 | 44 | init([f1]) -> f1; 45 | init([f2]) -> f2; 46 | init([f3]) -> f3. 47 | 48 | make_fake_head() -> 49 | Head_Kill_Switch = coop_kill_link_rcv:make_kill_switch(), 50 | coop_head:new(Head_Kill_Switch, none). 51 | 52 | %% Init state and looping state are unused, but checked placeholders. 53 | plus2(f1, Num) -> {f1, Num+2}. 54 | times3(f2, Num) -> {f2, Num*3}. 55 | minus5(f3, Num) -> {f3, Num-5}. 56 | 57 | example_pipeline_fns() -> 58 | %% Pipeline => 3 * (X+2) - 5 59 | F1_Init = {?MODULE, init, [f1]}, 60 | F2_Init = {?MODULE, init, [f2]}, 61 | F3_Init = {?MODULE, init, [f3]}, 62 | 63 | F1_Task = {?MODULE, plus2}, 64 | F2_Task = {?MODULE, times3}, 65 | F3_Task = {?MODULE, minus5}, 66 | 67 | F1_Node_Fn = #coop_node_fn{init=F1_Init, task=F1_Task}, 68 | F2_Node_Fn = #coop_node_fn{init=F2_Init, task=F2_Task}, 69 | F3_Node_Fn = #coop_node_fn{init=F3_Init, task=F3_Task}, 70 | 71 | [ 72 | #coop_dag_node{name=a, label=F1_Node_Fn}, 73 | #coop_dag_node{name=b, label=F2_Node_Fn}, 74 | #coop_dag_node{name=c, label=F3_Node_Fn} 75 | ]. 76 | 77 | pipeline_flow(_Config) -> 78 | Pipe_Stages = example_pipeline_fns(), 79 | Pipeline = coop_flow:pipeline(Pipe_Stages), 80 | Pipe_Stats = digraph:info(Pipeline), 81 | acyclic = proplists:get_value(cyclicity, Pipe_Stats), 82 | 83 | %% Check a -> b -> c... 84 | 3 = digraph:no_vertices(Pipeline), 85 | 2 = digraph:no_edges(Pipeline), 86 | 87 | %% Unidirectional flow... 88 | [a,b,c] = digraph:get_path(Pipeline, a, c), 89 | false = digraph:get_path(Pipeline, c, a), 90 | 91 | %% Check graph vertices. 92 | 3 = length(digraph:vertices(Pipeline)), 93 | [A, B, C] = [{N, L} || #coop_dag_node{name=N, label=L} <- Pipe_Stages], 94 | A = digraph:vertex(Pipeline, a), 95 | B = digraph:vertex(Pipeline, b), 96 | C = digraph:vertex(Pipeline, c). 97 | 98 | pipeline(_Config) -> 99 | Pid = spawn_link(?MODULE, receive_pipe_results, []), 100 | Pipe_Stages = example_pipeline_fns(), 101 | Kill_Switch = coop_kill_link_rcv:make_kill_switch(), 102 | {First_Stage_Node, _Template_Graph, Coops_Graph} 103 | = coop:pipeline(make_fake_head(), Kill_Switch, Pipe_Stages, Pid), 104 | Pipe_Stats = digraph:info(Coops_Graph), 105 | acyclic = proplists:get_value(cyclicity, Pipe_Stats), 106 | coop:relay_data(First_Stage_Node, 7), 107 | timer:sleep(100), 108 | ok = fetch_results(Pid). 109 | 110 | 111 | receive_pipe_results() -> 112 | receive 113 | 3 * (7+2) - 5 -> hold_results(ok); 114 | Other -> hold_results({fail, Other}) 115 | after 3000 -> hold_results(timeout) 116 | end. 117 | 118 | 119 | %%---------------------------------------------------------------------- 120 | %% Fanout patterns 121 | %%---------------------------------------------------------------------- 122 | fanout_failure(_Config) -> 123 | try coop_flow:fanout(a, 8, self()) 124 | catch error:function_clause -> ok 125 | end, 126 | 127 | try coop_flow:fanout(#coop_dag_node{}, a, self()) 128 | catch error:function_clause -> ok 129 | end. 130 | 131 | check_fanout_vertex(Graph, #coop_dag_node{label=Label}, Inbound, InDegree, OutDegree) -> 132 | {Inbound, Label} = digraph:vertex(Graph, Inbound), 133 | InDegree = digraph:in_degree(Graph, Inbound), 134 | OutDegree = digraph:out_degree(Graph, Inbound), 135 | InDegree = length(digraph:in_neighbours(Graph, Inbound)), 136 | OutDegree = length([V || V <- digraph:out_neighbours(Graph, Inbound)]); 137 | check_fanout_vertex(Graph, Pid, outbound = Outbound, InDegree, OutDegree) -> 138 | {Outbound, Pid} = digraph:vertex(Graph, Outbound), 139 | InDegree = digraph:in_degree(Graph, Outbound), 140 | OutDegree = digraph:out_degree(Graph, Outbound), 141 | InDegree = length([V || V <- digraph:in_neighbours(Graph, Outbound)]), 142 | OutDegree = length(digraph:out_neighbours(Graph, Outbound)); 143 | check_fanout_vertex(Graph, _N, {Name, Inbound, _Fn}, 1, 1) -> 144 | {Name, #coop_node_fn{}} = digraph:vertex(Graph, Name), 145 | [Inbound] = digraph:in_neighbours(Graph, Name), 146 | [outbound] = digraph:out_neighbours(Graph, Name). 147 | 148 | fanout_flow(_Config) -> 149 | Self = self(), 150 | Router_Fn = #coop_dag_node{ 151 | name = funnel, 152 | label = #coop_node_fn{init={?MODULE, init, [f2]}, task={?MODULE, times3}} 153 | }, 154 | Worker_Node_Fns = [#coop_dag_node{ 155 | name = N, 156 | label = #coop_node_fn{init={?MODULE, init, [f3]}, task={?MODULE, minus5}}} 157 | || N <- lists:seq(1,8)], 158 | Coop_Flow = coop_flow:fanout(Router_Fn, Worker_Node_Fns, Self), 159 | 10 = digraph:no_vertices(Coop_Flow), 160 | 16 = digraph:no_edges(Coop_Flow), 161 | check_fanout_vertex(Coop_Flow, Router_Fn, funnel, 0, 8), 162 | check_fanout_vertex(Coop_Flow, Self, outbound, 8, 0), 163 | [check_fanout_vertex(Coop_Flow, 8, {N,funnel,#coop_node_fn{}}, 1, 1) || N <- lists:seq(1,8)]. 164 | 165 | make_fanout_coop(Dataflow_Type, Num_Workers, Receiver_Pid) -> 166 | Kill_Switch = coop_kill_link_rcv:make_kill_switch(), 167 | Router_Fn = #coop_dag_node{ 168 | name = inbound, 169 | label = #coop_node_fn{init={?MODULE, rr_init, [0]}, task={?MODULE, rr_inc}, flow=Dataflow_Type} 170 | }, 171 | Worker_Node_Fns = [#coop_dag_node{ 172 | name = "inc_by_" ++ integer_to_list(N), 173 | label = #coop_node_fn{init={?MODULE, rr_init, [N]}, task={?MODULE, rr_inc}}} 174 | || N <- lists:seq(1, Num_Workers)], 175 | coop:fanout(make_fake_head(), Kill_Switch, Router_Fn, Worker_Node_Fns, Receiver_Pid). 176 | 177 | fanout_round_robin(_Config) -> 178 | Num_Results = 6, 179 | Num_Workers = 3, 180 | Receiver_Pid = spawn_link(?MODULE, receive_round_robin_results, [Num_Results, []]), 181 | {Root_Coop_Node, _Template_Graph, Coops_Graph} = make_fanout_coop(round_robin, Num_Workers, Receiver_Pid), 182 | Fanout_Stats = digraph:info(Coops_Graph), 183 | acyclic = proplists:get_value(cyclicity, Fanout_Stats), 184 | 5 = digraph:no_vertices(Coops_Graph), 185 | 6 = digraph:no_edges(Coops_Graph), 186 | [coop:relay_data(Root_Coop_Node, 5) || _N <- lists:seq(1, Num_Results)], 187 | timer:sleep(100), 188 | Results6 = fetch_results(Receiver_Pid), 189 | 6 = length(Results6), 190 | Results4 = Results6 -- [6,6], 191 | 4 = length(Results4), 192 | Results2 = Results4 -- [7,7], 193 | 2 = length(Results2), 194 | Results0 = Results2 -- [8,8], 195 | 0 = length(Results0). 196 | 197 | fanout_broadcast(_Config) -> 198 | Num_Results = 12, 199 | Num_Workers = 4, 200 | Receiver_Pid = spawn_link(?MODULE, receive_round_robin_results, [Num_Results, []]), 201 | {Root_Coop_Node, _Template_Graph, Coops_Graph} = make_fanout_coop(broadcast, Num_Workers, Receiver_Pid), 202 | Fanout_Stats = digraph:info(Coops_Graph), 203 | acyclic = proplists:get_value(cyclicity, Fanout_Stats), 204 | 6 = digraph:no_vertices(Coops_Graph), 205 | 8 = digraph:no_edges(Coops_Graph), 206 | [coop:relay_data(Root_Coop_Node, 7) || _N <- lists:seq(1, Num_Results div Num_Workers)], 207 | timer:sleep(100), 208 | Results12 = fetch_results(Receiver_Pid), 209 | 12 = length(Results12), 210 | Results9 = Results12 -- [8,8,8], 211 | 9 = length(Results9), 212 | Results6 = Results9 -- [9,9,9], 213 | 6 = length(Results6), 214 | Results3 = Results6 -- [10,10,10], 215 | 3 = length(Results3), 216 | Results0 = Results3 -- [11,11,11], 217 | 0 = length(Results0). 218 | 219 | rr_init([Inc_Amt]) -> Inc_Amt. 220 | rr_inc(Inc_Amt, Value) -> {Inc_Amt, Value + Inc_Amt}. 221 | 222 | receive_round_robin_results(0, Acc) -> hold_results(lists:reverse(Acc)); 223 | receive_round_robin_results(N, Acc) -> 224 | receive Any -> receive_round_robin_results(N-1, [Any | Acc]) 225 | after 3000 -> hold_results([timeout | Acc]) 226 | end. 227 | 228 | 229 | %%---------------------------------------------------------------------- 230 | %% Utilities for receiving coop results 231 | %%---------------------------------------------------------------------- 232 | fetch_results(Pid) -> 233 | Pid ! {fetch, self()}, 234 | receive Any -> Any 235 | after 3000 -> timeout_fetching 236 | end. 237 | 238 | hold_results(Results) -> 239 | receive 240 | {fetch, From} -> From ! Results 241 | after 3000 -> timeout 242 | end. 243 | -------------------------------------------------------------------------------- /apps/ctest/coop/coop_head_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(coop_head_SUITE). 2 | 3 | -include_lib("../../erlangsp/include/license_and_copyright.hrl"). 4 | -include_lib("common_test/include/ct.hrl"). 5 | 6 | %% Suite functions 7 | -export([ 8 | all/0, groups/0, 9 | init_per_suite/1, end_per_suite/1, 10 | init_per_group/2, end_per_group/2 11 | ]). 12 | 13 | %% Control process loop. 14 | -export([ 15 | head_ctl_kill_one_proc/1, head_ctl_kill_two_proc/1, 16 | head_ctl_stop_one_proc/1, 17 | 18 | send_ctl_msgs/1, send_data_msgs/1, 19 | 20 | sys_suspend/1, sys_format/1, sys_statistics/1, sys_log/1 21 | %% sys_install/1 22 | ]). 23 | 24 | %% Spawned functions 25 | -export([report_result/0]). 26 | -export([ 27 | fake_node_ctl/0, fake_node_data/0, fake_coop_node/0, 28 | result_node_ctl/0, result_node_task/0, result_coop_node/0 29 | ]). 30 | 31 | -include("../../coop/include/coop.hrl"). 32 | 33 | groups() -> [{ctl_tests, [sequence], 34 | [ 35 | {kill, [sequence], [head_ctl_kill_one_proc, head_ctl_kill_two_proc]}, 36 | {stop, [sequence], [head_ctl_stop_one_proc]} 37 | ]}, 38 | {send_msgs, [sequence], 39 | [ 40 | {msgs, [sequence], [send_ctl_msgs, send_data_msgs]} 41 | ]}, 42 | {sys_tests, [sequence], 43 | [ 44 | {suspend, [sequence], [sys_suspend]}, 45 | {format, [sequence], [sys_format]}, 46 | {stats, [sequence], [sys_statistics]}, 47 | {log, [sequence], [sys_log]} 48 | %% {install, [sequence], [sys_install]} 49 | ]} 50 | ]. 51 | 52 | all() -> [{group, ctl_tests}, {group, send_msgs}, {group, sys_tests}]. 53 | 54 | init_per_suite(Config) -> Config. 55 | end_per_suite(_Config) -> ok. 56 | 57 | init_per_group(_Group, Config) -> Config. 58 | end_per_group(_Group, _Config) -> ok. 59 | 60 | %% Test module 61 | -define(TM, coop_head). 62 | 63 | %%---------------------------------------------------------------------- 64 | %% Head Control 65 | %%---------------------------------------------------------------------- 66 | %% fake_node_ctl() -> proc_lib:spawn(?MODULE, ctl_loop). 67 | %% fake_node_data() -> proc_lib:spawn(?MODULE, data_loop). 68 | fake_node_ctl() -> proc_lib:spawn(?TM, echo_loop, ["NCTL"]). 69 | fake_node_data() -> proc_lib:spawn(?TM, echo_loop, ["NDTA"]). 70 | fake_coop_node() -> {coop_node, fake_node_ctl(), fake_node_data()}. 71 | make_kill_switch() -> coop_kill_link_rcv:make_kill_switch(). 72 | 73 | 74 | create_new_coop_head_args(Fn) -> 75 | Kill_Switch = make_kill_switch(), 76 | true = is_process_alive(Kill_Switch), 77 | [Kill_Switch, ?MODULE:Fn()]. 78 | 79 | head_ctl_kill_one_proc(_Config) -> 80 | Args = [Kill_Switch, _Coop_Node] = create_new_coop_head_args(fake_coop_node), 81 | #coop_head{ctl_pid=Head_Ctl_Pid, data_pid=Head_Data_Pid} = apply(?TM, new, Args), 82 | timer:sleep(50), 83 | [true = is_process_alive(Pid) || Pid <- [Head_Ctl_Pid, Head_Data_Pid]], 84 | exit(Head_Data_Pid, kill), 85 | timer:sleep(50), 86 | [false = is_process_alive(Pid) || Pid <- [Head_Ctl_Pid, Head_Data_Pid, Kill_Switch]], 87 | ok. 88 | 89 | head_ctl_kill_two_proc(_Config) -> 90 | Args = [Kill_Switch, _Coop_Node] = create_new_coop_head_args(fake_coop_node), 91 | #coop_head{ctl_pid=Head_Ctl_Pid1, data_pid=Head_Data_Pid1} = apply(?TM, new, Args), 92 | [true = is_process_alive(Pid) || Pid <- [Head_Ctl_Pid1, Head_Data_Pid1]], 93 | #coop_head{ctl_pid=Head_Ctl_Pid2, data_pid=Head_Data_Pid2} = apply(?TM, new, Args), 94 | [true = is_process_alive(Pid) || Pid <- [Head_Ctl_Pid2, Head_Data_Pid2]], 95 | exit(Head_Ctl_Pid2, kill), 96 | timer:sleep(50), 97 | [false = is_process_alive(Pid) || Pid <- [Head_Ctl_Pid1, Head_Data_Pid1, Head_Ctl_Pid2, 98 | Head_Data_Pid2, Kill_Switch]], 99 | ok. 100 | 101 | head_ctl_stop_one_proc(_Config) -> 102 | Args = [_Kill_Switch, _Coop_Node] = create_new_coop_head_args(fake_coop_node), 103 | Coop_Node = #coop_head{ctl_pid=Head_Ctl_Pid, data_pid=Head_Data_Pid} = apply(?TM, new, Args), 104 | [true = is_process_alive(Pid) || Pid <- [Head_Ctl_Pid, Head_Data_Pid]], 105 | ?TM:stop(Coop_Node), 106 | timer:sleep(50), 107 | false = is_process_alive(Head_Ctl_Pid), 108 | %% false = is_process_alive(Head_Data_Pid), 109 | %% false = is_process_alive(Kill_Switch). 110 | ok. 111 | 112 | %%---------------------------------------------------------------------- 113 | %% Function Tasks 114 | %%---------------------------------------------------------------------- 115 | result_node_ctl() -> proc_lib:spawn(?MODULE, report_result, []). 116 | result_node_task() -> proc_lib:spawn(?MODULE, report_result, []). 117 | result_coop_node() -> #coop_node{ctl_pid=result_node_ctl(), task_pid=result_node_task()}. 118 | 119 | report_result() -> 120 | report_result([]). 121 | 122 | report_result(Rcvd) -> 123 | receive 124 | {get_oldest, From} -> 125 | case Rcvd of 126 | [] -> From ! none, report_result(Rcvd); 127 | [H|T] -> From ! H, report_result(T) 128 | end; 129 | Any -> report_result(Rcvd ++ [Any]) 130 | end. 131 | 132 | get_result_data(Pid) -> 133 | Pid ! {get_oldest, self()}, 134 | receive Any -> Any after 50 -> timeout end. 135 | 136 | start_head() -> 137 | Args = [_Kill_Switch, #coop_node{ctl_pid=Node_Ctl_Pid, task_pid=Node_Task_Pid} = Coop_Node] 138 | = create_new_coop_head_args(result_coop_node), 139 | Coop_Head = #coop_head{ctl_pid=Head_Ctl_Pid, data_pid=Head_Data_Pid} = apply(?TM, new, Args), 140 | Root_Pid = ?TM:get_root_pid(Coop_Head), 141 | timer:sleep(50), 142 | [true = is_process_alive(P) || P <- [Head_Ctl_Pid, Head_Data_Pid, 143 | Node_Ctl_Pid, Node_Task_Pid, Root_Pid]], 144 | {Coop_Head, Root_Pid, Coop_Node}. 145 | 146 | send_ctl_msgs(_Config) -> 147 | {#coop_head{ctl_pid=Head_Ctl_Pid, data_pid=Head_Data_Pid} = Coop_Head, 148 | Root_Pid, #coop_node{ctl_pid=Node_Ctl_Pid, task_pid=Node_Task_Pid}} = start_head(), 149 | Procs = [Head_Ctl_Pid, Head_Data_Pid, Node_Ctl_Pid, Node_Task_Pid, Root_Pid], 150 | [?TM:send_ctl_msg(Coop_Head, N) || N <- lists:seq(2,4)], 151 | timer:sleep(50), 152 | [true = is_process_alive(P) || P <- Procs], 153 | [2,3,4,none] = [get_result_data(Node_Ctl_Pid) || _N <- lists:seq(1,4)], 154 | [none,none,none,none] = [get_result_data(Node_Task_Pid) || _N <- lists:seq(1,4)], 155 | ok. 156 | 157 | send_data_msgs(_Config) -> 158 | {#coop_head{ctl_pid=Head_Ctl_Pid, data_pid=Head_Data_Pid} = Coop_Head, 159 | Root_Pid, #coop_node{ctl_pid=Node_Ctl_Pid, task_pid=Node_Task_Pid}} = start_head(), 160 | Procs = [Head_Ctl_Pid, Head_Data_Pid, Node_Ctl_Pid, Node_Task_Pid, Root_Pid], 161 | [?TM:send_data_msg(Coop_Head, N) || N <- lists:seq(5,7)], 162 | timer:sleep(50), 163 | [true = is_process_alive(P) || P <- Procs], 164 | [none,none,none,none] = [get_result_data(Node_Ctl_Pid) || _N <- lists:seq(1,4)], 165 | [5,6,7,none] = [get_result_data(Node_Task_Pid) || _N <- lists:seq(1,4)], 166 | ok. 167 | 168 | sys_suspend(_Config) -> 169 | {#coop_head{ctl_pid=Head_Ctl_Pid, data_pid=Head_Data_Pid} = Coop_Head, 170 | Root_Pid, #coop_node{ctl_pid=Node_Ctl_Pid, task_pid=Node_Task_Pid}} = start_head(), 171 | Procs = [Head_Ctl_Pid, Head_Data_Pid, Node_Ctl_Pid, Node_Task_Pid, Root_Pid], 172 | [?TM:send_data_msg(Coop_Head, N) || N <- lists:seq(5,7)], 173 | timer:sleep(50), 174 | [true = is_process_alive(P) || P <- Procs], 175 | [5,6,7,none] = [get_result_data(Node_Task_Pid) || _N <- lists:seq(1,4)], 176 | 177 | %% Suspend message handling and get no result... 178 | ?TM:suspend_root(Coop_Head), 179 | timer:sleep(50), 180 | [?TM:send_data_msg(Coop_Head, N) || N <- lists:seq(8,10)], 181 | [true = is_process_alive(P) || P <- Procs], 182 | [none,none,none,none] = [get_result_data(Node_Task_Pid) || _N <- lists:seq(1,4)], 183 | 184 | %% Resume and result appears. 185 | ?TM:resume_root(Coop_Head), 186 | timer:sleep(50), 187 | [true = is_process_alive(P) || P <- Procs], 188 | [8,9,10,none] = [get_result_data(Node_Task_Pid) || _N <- lists:seq(1,4)], 189 | ok. 190 | 191 | sys_format(_Config) -> 192 | {#coop_head{ctl_pid=Head_Ctl_Pid, data_pid=Head_Data_Pid} = _Coop_Head, 193 | Root_Pid, #coop_node{ctl_pid=Node_Ctl_Pid, task_pid=Node_Task_Pid}} = start_head(), 194 | Procs = [Head_Ctl_Pid, Head_Data_Pid, Node_Ctl_Pid, Node_Task_Pid, Root_Pid], 195 | 196 | %% Get the custom status information... 197 | Custom_Running_Fmt = get_custom_fmt(sys:get_status(Root_Pid)), 198 | ["Status for coop_head_root_rcv", Custom_Running_Props] 199 | = [proplists:get_value(P, Custom_Running_Fmt) || P <- [header, data]], 200 | [running, pass_thru, [{messages, []}]] 201 | = [proplists:get_value(P, Custom_Running_Props) || P <- ["Status", "Loop", "Messages"]], 202 | [true = is_process_alive(P) || P <- Procs], 203 | 204 | sys:suspend(Root_Pid), 205 | timer:sleep(50), 206 | Custom_Suspended_Fmt = get_custom_fmt(sys:get_status(Root_Pid)), 207 | Custom_Suspended_Props = proplists:get_value(data, Custom_Suspended_Fmt), 208 | [suspended, pass_thru, [{messages, []}]] 209 | = [proplists:get_value(P, Custom_Suspended_Props) || P <- ["Status", "Loop", "Messages"]], 210 | 211 | sys:resume(Root_Pid), 212 | timer:sleep(50), 213 | New_Custom_Running_Fmt = get_custom_fmt(sys:get_status(Root_Pid)), 214 | ["Status for coop_head_root_rcv", New_Custom_Running_Props] 215 | = [proplists:get_value(P, New_Custom_Running_Fmt) || P <- [header, data]], 216 | [running, pass_thru, [{messages, []}]] 217 | = [proplists:get_value(P, New_Custom_Running_Props) || P <- ["Status", "Loop", "Messages"]], 218 | ok. 219 | 220 | 221 | get_custom_fmt(Status) -> lists:nth(5, element(4, Status)). 222 | 223 | %% send_data(N, Coop_Head) -> 224 | %% [begin 225 | %% ?TM:send_data_msg(Coop_Head, 5), 226 | %% 5 = receive Data -> Data end 227 | %% end || _N <- lists:seq(1,N)]. 228 | 229 | sys_statistics(_Config) -> 230 | {#coop_head{ctl_pid=Head_Ctl_Pid, data_pid=Head_Data_Pid} = Coop_Head, 231 | Root_Pid, #coop_node{ctl_pid=Node_Ctl_Pid, task_pid=Node_Task_Pid}} = start_head(), 232 | Procs = [Head_Ctl_Pid, Head_Data_Pid, Node_Ctl_Pid, Node_Task_Pid, Root_Pid], 233 | 234 | ok = ?TM:ctl_stats(Coop_Head, true, self()), 235 | [true = is_process_alive(P) || P <- Procs], 236 | {ok, Props1} = ?TM:ctl_stats(Coop_Head, get, self()), 237 | [0,0] = [proplists:get_value(P, Props1) || P <- [messages_in, messages_out]], 238 | ok = ?TM:send_data_msg(Coop_Head, 10), 239 | ok = ?TM:send_data_msg(Coop_Head, 11), 240 | ok = ?TM:send_data_msg(Coop_Head, 12), 241 | timer:sleep(50), 242 | {ok, Props2} = ?TM:ctl_stats(Coop_Head, get, self()), 243 | [3,3] = [proplists:get_value(P, Props2) || P <- [messages_in, messages_out]], 244 | ok = ?TM:ctl_stats(Coop_Head, false, self()), 245 | [true = is_process_alive(P) || P <- Procs], 246 | ok. 247 | 248 | sys_log(_Config) -> 249 | {#coop_head{ctl_pid=Head_Ctl_Pid, data_pid=Head_Data_Pid} = Coop_Head, 250 | Root_Pid, #coop_node{ctl_pid=Node_Ctl_Pid, task_pid=Node_Task_Pid}} = start_head(), 251 | Procs = [Head_Ctl_Pid, Head_Data_Pid, Node_Ctl_Pid, Node_Task_Pid, Root_Pid], 252 | 253 | %% ok = ?TM:ctl_log_to_file(Coop_Head, "./coop.dump", self()) 254 | ok = ?TM:ctl_log(Coop_Head, true, self()), 255 | {ok, []} = ?TM:ctl_log(Coop_Head, get, self()), 256 | [true = is_process_alive(P) || P <- Procs], 257 | ?TM:send_data_msg(Coop_Head, 5), 258 | {ok, Events} = ?TM:ctl_log(Coop_Head, get, self()), 259 | 2 = length(Events), 260 | Ins = [{in,5}], 261 | Ins = [{Type,Num} || {{Type,Num}, _Flow, _Fun} <- Events], 262 | Outs = [{out,5}], 263 | Outs = [{Type,Num} || {{Type,Num,_Pid}, _Flow, _Fun} <- Events], 264 | ok = ?TM:ctl_log(Coop_Head, false, self()), 265 | [true = is_process_alive(P) || P <- Procs], 266 | ok. 267 | 268 | %% sys_install(_Config) -> 269 | %% Coop_Node = #coop_node{task_pid=Node_Task_Pid} = setup_no_downstream(), 270 | %% Pid = spawn_link(fun() -> 271 | %% %% Trace results... 272 | %% receive {15, 30} -> ok; 273 | %% Bad_Result -> exit(Bad_Result) 274 | %% after 2000 -> exit(timeout) 275 | %% end, 276 | 277 | %% %% After trace uninstalled. 278 | %% case receive Data -> Data after 200 -> timeout end of 279 | %% {data, 21} -> ok; 280 | %% Bad -> Msg = io_lib:format("Trace_Fn failed ~p",[Bad]), 281 | %% exit(lists:flatten(Msg)) 282 | %% end 283 | %% end), 284 | %% F = fun 285 | %% ({Ins, Outs, 3}, _Any, round_robin) -> 286 | %% Pid ! {Ins, Outs}; 287 | %% ({Ins, Outs, Count}, {in, Amt}, round_robin) when is_integer(Amt) -> 288 | %% {Ins+Amt, Outs, Count+1}; 289 | %% ({Ins, Outs, Count}, {out, Amt, _Pid}, round_robin) when is_integer(Amt) -> 290 | %% {Ins, Outs+Amt, Count}; 291 | %% ({Ins, Outs, Count}, {in, {add_downstream, _Id}}, round_robin) -> 292 | %% {Ins, Outs, Count}; 293 | %% ({Ins, Outs, Count}, {in, {get_downstream, _Id}}, round_robin) -> 294 | %% {Ins, Outs, Count}; 295 | %% (_State, Unknown, _Extra) -> 296 | %% Pid ! {unknown_msg_rcvd, Unknown} 297 | %% end, 298 | %% ok = ?TM:node_ctl_install_trace_fn(Coop_Node, {F, {0,0,0}}, self()), 299 | 300 | %% send_data(3, Coop_Node), 301 | %% timer:sleep(50), 302 | %% ok = ?TM:node_ctl_remove_trace_fn(Coop_Node, F, self()), 303 | %% ?TM:node_task_deliver_data(Node_Task_Pid, 7), 304 | %% _ = receive Data -> Pid ! {data, Data} after 50 -> 0 end, 305 | %% timer:sleep(1000). 306 | -------------------------------------------------------------------------------- /apps/ctest/coop/coop_node_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(coop_node_SUITE). 2 | 3 | -include_lib("../../erlangsp/include/license_and_copyright.hrl"). 4 | -include_lib("common_test/include/ct.hrl"). 5 | -include("../../coop/include/coop.hrl"). 6 | -include("../../coop/include/coop_dag.hrl"). 7 | 8 | %% Suite functions 9 | -export([ 10 | all/0, groups/0, 11 | init_per_suite/1, end_per_suite/1, 12 | init_per_group/2, end_per_group/2 13 | ]). 14 | 15 | %% Control process loop. 16 | -export([ 17 | node_ctl_kill_one_proc/1, node_ctl_kill_two_proc/1, 18 | node_ctl_stop_one_proc/1, 19 | 20 | task_compute_one/1, task_compute_three_round_robin/1, 21 | task_compute_three_broadcast/1, task_compute_random/1, 22 | 23 | sys_suspend/1, sys_format/1, sys_statistics/1, sys_log/1, 24 | sys_install/1 25 | ]). 26 | 27 | %% Spawned functions 28 | -export([init_noop/1, x3/2, report_result/1]). 29 | 30 | groups() -> [{ctl_tests, [sequence], 31 | [ 32 | {kill, [sequence], [node_ctl_kill_one_proc, node_ctl_kill_two_proc]}, 33 | {stop, [sequence], [node_ctl_stop_one_proc]} 34 | ]}, 35 | {data_tests, [sequence], 36 | [ 37 | {compute, [sequence], [task_compute_one, task_compute_three_round_robin, 38 | task_compute_three_broadcast, task_compute_random]} 39 | ]}, 40 | {sys_tests, [sequence], 41 | [ 42 | {suspend, [sequence], [sys_suspend]}, 43 | {format, [sequence], [sys_format]}, 44 | {stats, [sequence], [sys_statistics]}, 45 | {log, [sequence], [sys_log]}, 46 | {install, [sequence], [sys_install]} 47 | ]} 48 | ]. 49 | 50 | all() -> [{group, ctl_tests}, {group, data_tests}, {group, sys_tests}]. 51 | 52 | init_per_suite(Config) -> Config. 53 | end_per_suite(_Config) -> ok. 54 | 55 | init_per_group(_Group, Config) -> Config. 56 | end_per_group(_Group, _Config) -> ok. 57 | 58 | %% Test module 59 | -define(TM, coop_node). 60 | -define(CK, coop_kill_link_rcv). 61 | 62 | 63 | %%---------------------------------------------------------------------- 64 | %% Node Control 65 | %%---------------------------------------------------------------------- 66 | init_noop({}) -> {}. 67 | x3({}, N) -> {{}, N * 3}. 68 | 69 | make_fake_head() -> 70 | Head_Kill_Switch = coop_kill_link_rcv:make_kill_switch(), 71 | coop_head:new(Head_Kill_Switch, none). 72 | 73 | create_new_coop_node_args() -> 74 | Kill_Switch = ?CK:make_kill_switch(), 75 | true = is_process_alive(Kill_Switch), 76 | [make_fake_head(), Kill_Switch, {?MODULE, x3}, {?MODULE, init_noop, {}}, []]. 77 | 78 | create_new_coop_node_args(Dist_Type) -> 79 | Kill_Switch = ?CK:make_kill_switch(), 80 | true = is_process_alive(Kill_Switch), 81 | [make_fake_head(), Kill_Switch, {?MODULE, x3}, {?MODULE, init_noop, {}}, [], Dist_Type]. 82 | 83 | node_ctl_kill_one_proc(_Config) -> 84 | Args = [_Coop_Head, Kill_Switch, _Node_Fn, _Init_Fn, _Opts] = create_new_coop_node_args(), 85 | #coop_node{ctl_pid=Node_Ctl_Pid, task_pid=Node_Task_Pid} = apply(?TM, new, Args), 86 | true = is_process_alive(Node_Ctl_Pid), 87 | true = is_process_alive(Node_Task_Pid), 88 | exit(Node_Task_Pid, kill), 89 | timer:sleep(50), 90 | false = is_process_alive(Node_Ctl_Pid), 91 | false = is_process_alive(Node_Task_Pid), 92 | false = is_process_alive(Kill_Switch). 93 | 94 | node_ctl_kill_two_proc(_Config) -> 95 | Args = [_Coop_Head, Kill_Switch, _Node_Fn, _Init_Fn, _Opts] = create_new_coop_node_args(), 96 | #coop_node{ctl_pid=Node_Ctl_Pid1, task_pid=Node_Task_Pid1} = apply(?TM, new, Args), 97 | true = is_process_alive(Node_Ctl_Pid1), 98 | true = is_process_alive(Node_Task_Pid1), 99 | #coop_node{ctl_pid=Node_Ctl_Pid2, task_pid=Node_Task_Pid2} = apply(?TM, new, Args), 100 | true = is_process_alive(Node_Ctl_Pid2), 101 | true = is_process_alive(Node_Task_Pid2), 102 | exit(Node_Ctl_Pid2, kill), 103 | timer:sleep(50), 104 | false = is_process_alive(Node_Ctl_Pid1), 105 | false = is_process_alive(Node_Task_Pid1), 106 | false = is_process_alive(Node_Ctl_Pid2), 107 | false = is_process_alive(Node_Task_Pid2), 108 | false = is_process_alive(Kill_Switch). 109 | 110 | node_ctl_stop_one_proc(_Config) -> 111 | Args = [_Coop_Head, Kill_Switch, _Node_Fn, _Init_Fn, _Opts] = create_new_coop_node_args(), 112 | Coop_Node = #coop_node{ctl_pid=Node_Ctl_Pid, task_pid=Node_Task_Pid} = apply(?TM, new, Args), 113 | true = is_process_alive(Node_Ctl_Pid), 114 | true = is_process_alive(Node_Task_Pid), 115 | ?TM:node_ctl_stop(Coop_Node), 116 | timer:sleep(50), 117 | false = is_process_alive(Node_Ctl_Pid), 118 | false = is_process_alive(Node_Task_Pid), 119 | false = is_process_alive(Kill_Switch). 120 | 121 | %%---------------------------------------------------------------------- 122 | %% Function Tasks 123 | %%---------------------------------------------------------------------- 124 | report_result(Rcvd) -> 125 | receive 126 | {get_oldest, From} -> 127 | case Rcvd of 128 | [] -> From ! none, report_result(Rcvd); 129 | [H|T] -> From ! H, report_result(T) 130 | end; 131 | Any -> report_result([Any] ++ Rcvd) 132 | end. 133 | 134 | get_result_data(Pid) -> 135 | Pid ! {get_oldest, self()}, 136 | receive Any -> Any after 50 -> timeout end. 137 | 138 | setup_no_downstream() -> 139 | Args = [Coop_Head, _Kill_Switch, _Node_Fn, _Init_Fn, _Opts] = create_new_coop_node_args(), 140 | Coop_Node = apply(?TM, new, Args), 141 | coop_head:set_root_node(Coop_Head, Coop_Node), 142 | [] = ?TM:node_task_get_downstream_pids(Coop_Node), 143 | Coop_Node. 144 | 145 | setup_no_downstream(Dist_Type) -> 146 | Args = [Coop_Head, _Kill_Switch, _Node_Fn, _Init_Fn, _Opts, Dist_Type] 147 | = create_new_coop_node_args(Dist_Type), 148 | Coop_Node = apply(?TM, new, Args), 149 | coop_head:set_root_node(Coop_Head, Coop_Node), 150 | [] = ?TM:node_task_get_downstream_pids(Coop_Node), 151 | Coop_Node. 152 | 153 | task_compute_one(_Config) -> 154 | Coop_Node = #coop_node{task_pid=Node_Task_Pid} = setup_no_downstream(), 155 | 156 | ?TM:node_task_add_downstream_pids(Coop_Node, []), 157 | [] = ?TM:node_task_get_downstream_pids(Coop_Node), 158 | 159 | Receiver = [self()], 160 | ?TM:node_task_add_downstream_pids(Coop_Node, Receiver), 161 | Receiver = ?TM:node_task_get_downstream_pids(Coop_Node), 162 | 163 | ?TM:node_task_deliver_data(Node_Task_Pid, 5), 164 | 15 = receive Data -> Data end. 165 | 166 | task_compute_three_round_robin(_Config) -> 167 | Coop_Node = #coop_node{task_pid=Node_Task_Pid} = setup_no_downstream(round_robin), 168 | Receivers = [A,B,C] = [proc_lib:spawn_link(?MODULE, report_result, [[]]) 169 | || _N <- lists:seq(1,3)], 170 | ?TM:node_task_add_downstream_pids(Coop_Node, [A]), 171 | [A] = ?TM:node_task_get_downstream_pids(Coop_Node), 172 | ?TM:node_task_add_downstream_pids(Coop_Node, [B,C]), 173 | Receivers = ?TM:node_task_get_downstream_pids(Coop_Node), 174 | [true = is_process_alive(Pid) || Pid <- Receivers], 175 | 176 | ?TM:node_task_deliver_data(Node_Task_Pid, 5), 177 | timer:sleep(50), 178 | [15, none, none] = [get_result_data(Pid) || Pid <- Receivers], 179 | 180 | ?TM:node_task_deliver_data(Node_Task_Pid, 5), 181 | timer:sleep(50), 182 | [none, 15, none] = [get_result_data(Pid) || Pid <- Receivers], 183 | 184 | ?TM:node_task_deliver_data(Node_Task_Pid, 5), 185 | timer:sleep(50), 186 | [none, none, 15] = [get_result_data(Pid) || Pid <- Receivers], 187 | 188 | ?TM:node_task_deliver_data(Node_Task_Pid, 9), 189 | timer:sleep(50), 190 | [27, none, none] = [get_result_data(Pid) || Pid <- Receivers], 191 | 192 | ?TM:node_task_deliver_data(Node_Task_Pid, 7), 193 | ?TM:node_task_deliver_data(Node_Task_Pid, 6), 194 | timer:sleep(50), 195 | [none, 21, 18] = [get_result_data(Pid) || Pid <- Receivers]. 196 | 197 | task_compute_three_broadcast(_Config) -> 198 | Coop_Node = #coop_node{task_pid=Node_Task_Pid} = setup_no_downstream(broadcast), 199 | Receivers = [A,B,C] = [proc_lib:spawn_link(?MODULE, report_result, [[]]) 200 | || _N <- lists:seq(1,3)], 201 | ?TM:node_task_add_downstream_pids(Coop_Node, [A]), 202 | [A] = ?TM:node_task_get_downstream_pids(Coop_Node), 203 | ?TM:node_task_add_downstream_pids(Coop_Node, [B,C]), 204 | Receivers = ?TM:node_task_get_downstream_pids(Coop_Node), 205 | [true = is_process_alive(Pid) || Pid <- Receivers], 206 | 207 | ?TM:node_task_deliver_data(Node_Task_Pid, 5), 208 | timer:sleep(50), 209 | [15, 15, 15] = [get_result_data(Pid) || Pid <- Receivers], 210 | 211 | ?TM:node_task_deliver_data(Node_Task_Pid, 6), 212 | timer:sleep(50), 213 | [18, 18, 18] = [get_result_data(Pid) || Pid <- Receivers]. 214 | 215 | task_compute_random(_Config) -> 216 | Coop_Node = #coop_node{task_pid=Node_Task_Pid} = setup_no_downstream(random), 217 | Receivers = [proc_lib:spawn_link(?MODULE, report_result, [[]]) 218 | || _N <- lists:seq(1,5)], 219 | ?TM:node_task_add_downstream_pids(Coop_Node, Receivers), 220 | Receivers = ?TM:node_task_get_downstream_pids(Coop_Node), 221 | [true = is_process_alive(Pid) || Pid <- Receivers], 222 | 223 | Ets_Name = crypto_rand_test, 224 | Key = crypto_rand_stub, 225 | ets:new(Ets_Name, [named_table, public]), 226 | ets:insert(Ets_Name, {Key, [4,2,3,1,5]}), 227 | meck:new(coop_node_util), 228 | meck:expect(coop_node_util, random_worker, fun(_Tuple) -> [{Key, [H|T]}] = ets:lookup(Ets_Name, Key), ets:insert(Ets_Name, {Key, T}), H end), 229 | ?TM:node_task_deliver_data(Node_Task_Pid, 3), 230 | timer:sleep(50), 231 | [none, none, none, 9, none] = [get_result_data(Pid) || Pid <- Receivers], 232 | ?TM:node_task_deliver_data(Node_Task_Pid, 4), 233 | timer:sleep(50), 234 | [none, 12, none, none, none] = [get_result_data(Pid) || Pid <- Receivers], 235 | ?TM:node_task_deliver_data(Node_Task_Pid, 5), 236 | timer:sleep(50), 237 | [none, none, 15, none, none] = [get_result_data(Pid) || Pid <- Receivers], 238 | ?TM:node_task_deliver_data(Node_Task_Pid, 6), 239 | timer:sleep(50), 240 | [18, none, none, none, none] = [get_result_data(Pid) || Pid <- Receivers], 241 | ?TM:node_task_deliver_data(Node_Task_Pid, 7), 242 | timer:sleep(50), 243 | [none, none, none, none, 21] = [get_result_data(Pid) || Pid <- Receivers], 244 | meck:unload(coop_node_util), 245 | ets:delete(Ets_Name). 246 | 247 | sys_suspend(_Config) -> 248 | Coop_Node = #coop_node{task_pid=Node_Task_Pid} = setup_no_downstream(), 249 | Receiver = [self()], 250 | ?TM:node_task_add_downstream_pids(Coop_Node, Receiver), 251 | Receiver = ?TM:node_task_get_downstream_pids(Coop_Node), 252 | 253 | %% Verify it computes normally... 254 | ?TM:node_task_deliver_data(Node_Task_Pid, 5), 255 | 15 = receive Data1 -> Data1 end, 256 | 257 | %% Suspend message handling and get no result... 258 | ?TM:node_ctl_suspend(Coop_Node), 259 | timer:sleep(50), 260 | ?TM:node_task_deliver_data(Node_Task_Pid, 5), 261 | 0 = receive Data2 -> Data2 after 1000 -> 0 end, 262 | true = is_process_alive(Node_Task_Pid), 263 | 264 | %% Resume and result appears. 265 | ?TM:node_ctl_resume(Coop_Node), 266 | 15 = receive Data3 -> Data3 after 100 -> 0 end. 267 | 268 | sys_format(_Config) -> 269 | Coop_Node = #coop_node{task_pid=Node_Task_Pid} = setup_no_downstream(random), 270 | 271 | %% Get the custom status information... 272 | Custom_Running_Fmt = get_custom_fmt(sys:get_status(Node_Task_Pid)), 273 | ["Status for coop_node", Custom_Running_Props] 274 | = [proplists:get_value(P, Custom_Running_Fmt) || P <- [header, data]], 275 | [running, {coop_node_SUITE,x3}, 0, random] 276 | = [proplists:get_value(P, Custom_Running_Props) 277 | || P <- ["Status", "Node_Fn", "Downstream_Pid_Count", "Data_Flow_Method"]], 278 | 279 | [A,B,C] = [proc_lib:spawn_link(?MODULE, report_result, [[]]) || _N <- lists:seq(1,3)], 280 | ?TM:node_task_add_downstream_pids(Coop_Node, [A,B,C]), 281 | [A,B,C] = ?TM:node_task_get_downstream_pids(Coop_Node), 282 | 283 | ?TM:node_ctl_suspend(Coop_Node), 284 | timer:sleep(50), 285 | Custom_Suspended_Fmt = get_custom_fmt(sys:get_status(Node_Task_Pid)), 286 | Custom_Suspended_Props = proplists:get_value(data, Custom_Suspended_Fmt), 287 | [suspended, {coop_node_SUITE,x3}, 3, random] 288 | = [proplists:get_value(P, Custom_Suspended_Props) 289 | || P <- ["Status", "Node_Fn", "Downstream_Pid_Count", "Data_Flow_Method"]], 290 | 291 | ?TM:node_ctl_resume(Coop_Node), 292 | timer:sleep(50), 293 | New_Custom_Running_Fmt = get_custom_fmt(sys:get_status(Node_Task_Pid)), 294 | ["Status for coop_node", New_Custom_Running_Props] 295 | = [proplists:get_value(P, New_Custom_Running_Fmt) || P <- [header, data]], 296 | [running, {coop_node_SUITE,x3}, 3, random] 297 | = [proplists:get_value(P, New_Custom_Running_Props) 298 | || P <- ["Status", "Node_Fn", "Downstream_Pid_Count", "Data_Flow_Method"]]. 299 | 300 | 301 | get_custom_fmt(Status) -> lists:nth(5, element(4, Status)). 302 | 303 | send_data(N, #coop_node{task_pid=Node_Task_Pid} = Coop_Node) -> 304 | Receiver = [self()], 305 | ?TM:node_task_add_downstream_pids(Coop_Node, Receiver), 306 | Receiver = ?TM:node_task_get_downstream_pids(Coop_Node), 307 | 308 | %% Verify it computes normally... 309 | [begin 310 | ?TM:node_task_deliver_data(Node_Task_Pid, 5), 311 | 15 = receive Data -> Data end 312 | end || _N <- lists:seq(1,N)]. 313 | 314 | sys_statistics(_Config) -> 315 | Coop_Node = setup_no_downstream(), 316 | ok = ?TM:node_ctl_stats(Coop_Node, true, self()), 317 | {ok, Props1} = ?TM:node_ctl_stats(Coop_Node, get, self()), 318 | [0,0] = [proplists:get_value(P, Props1) || P <- [messages_in, messages_out]], 319 | send_data(10, Coop_Node), 320 | {ok, Props2} = ?TM:node_ctl_stats(Coop_Node, get, self()), 321 | [12,10] = [proplists:get_value(P, Props2) || P <- [messages_in, messages_out]], 322 | ok = ?TM:node_ctl_stats(Coop_Node, false, self()). 323 | 324 | sys_log(_Config) -> 325 | Coop_Node = setup_no_downstream(), 326 | %% ok = ?TM:node_ctl_log_to_file(Coop_Node, "./coop.dump", self()) 327 | ok = ?TM:node_ctl_log(Coop_Node, true, self()), 328 | {ok, []} = ?TM:node_ctl_log(Coop_Node, get, self()), 329 | send_data(6, Coop_Node), 330 | {ok, Events} = ?TM:node_ctl_log(Coop_Node, get, self()), 331 | 10 = length(Events), 332 | Ins = lists:duplicate(5,{in,5}), 333 | Ins = [{Type,Num} || {{Type,Num}, _Flow, _Fun} <- Events], 334 | Outs = lists:duplicate(5,{out,15}), 335 | Outs = [{Type,Num} || {{Type,Num,_Pid}, _Flow, _Fun} <- Events], 336 | ok = ?TM:node_ctl_log(Coop_Node, false, self()). 337 | %% ok = ?TM:node_ctl_log_to_file(Coop_Node, false, self()). 338 | 339 | sys_install(_Config) -> 340 | Coop_Node = #coop_node{task_pid=Node_Task_Pid} = setup_no_downstream(round_robin), 341 | Pid = spawn_link(fun() -> 342 | %% Trace results... 343 | receive 344 | {15, 30} -> ok; 345 | Bad_Result -> exit(Bad_Result) 346 | after 2000 -> exit(timeout) 347 | end, 348 | 349 | %% After trace uninstalled. 350 | case receive Data -> Data after 200 -> timeout end of 351 | {data, 21} -> ok; 352 | Bad -> Msg = io_lib:format("Trace_Fn failed ~p",[Bad]), 353 | exit(lists:flatten(Msg)) 354 | end 355 | end), 356 | F = fun 357 | ({Ins, Outs, 3}, _Any, {round_robin, #coop_node_options{}, {}}) -> 358 | Pid ! {Ins, Outs}; 359 | ({Ins, Outs, Count}, {in, Amt}, {round_robin, #coop_node_options{}, {}}) when is_integer(Amt) -> 360 | {Ins+Amt, Outs, Count+1}; 361 | ({Ins, Outs, Count}, {out, Amt, _Pid}, {round_robin, #coop_node_options{}, {}}) when is_integer(Amt) -> 362 | {Ins, Outs+Amt, Count}; 363 | ({Ins, Outs, Count}, {in, {add_downstream, _Id}}, {round_robin, #coop_node_options{}, {}}) -> 364 | {Ins, Outs, Count}; 365 | ({Ins, Outs, Count}, {in, {get_downstream, _Id}}, {round_robin, #coop_node_options{}, {}}) -> 366 | {Ins, Outs, Count}; 367 | (_State, Unknown, _Extra) -> 368 | error_logger:info_msg("~p ~p ~p ~p~n", [?MODULE, _State, Unknown, _Extra]), 369 | Pid ! {unknown_msg_rcvd, Unknown} 370 | end, 371 | ok = ?TM:node_ctl_install_trace_fn(Coop_Node, {F, {0,0,0}}, self()), 372 | 373 | send_data(3, Coop_Node), 374 | timer:sleep(50), 375 | ok = ?TM:node_ctl_remove_trace_fn(Coop_Node, F, self()), 376 | ?TM:node_task_deliver_data(Node_Task_Pid, 7), 377 | _ = receive Data -> Pid ! {data, Data} after 50 -> 0 end, 378 | timer:sleep(1000). 379 | -------------------------------------------------------------------------------- /apps/ctest/examples.coverspec: -------------------------------------------------------------------------------- 1 | {export, "./examples/logs/cover"}. 2 | {incl_mods, [esp_cache]}. 3 | -------------------------------------------------------------------------------- /apps/ctest/examples.spec: -------------------------------------------------------------------------------- 1 | {alias, examples, "./examples/"}. 2 | {logdir, "./logs/"}. 3 | {cover, "./examples.coverspec"}. 4 | {suites, examples, all}. 5 | -------------------------------------------------------------------------------- /apps/ctest/examples/esp_cache_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(esp_cache_SUITE). 2 | 3 | -include("../../erlangsp/include/license_and_copyright.hrl"). 4 | -include_lib("common_test/include/ct.hrl"). 5 | 6 | %% Suite functions 7 | -export([all/0, init_per_suite/1, end_per_suite/1]). 8 | 9 | %% Test Coop Node functionality individually 10 | -export([ 11 | datum_value/1, 12 | worker_value/1, worker_mfa/1, worker_replace/1, 13 | value_request_lookup/1, value_request_add_replace/1 14 | ]). 15 | 16 | %% Test full coop 17 | -export([cache_coop/1]). 18 | 19 | %% Spawned functions must be exported 20 | -export([check_worker/2, compute_value/1]). 21 | 22 | -include("../../coop/include/coop.hrl"). 23 | -include("../../coop/include/coop_dag.hrl"). 24 | -include("../../examples/esp_cache/include/esp_cache.hrl"). 25 | 26 | all() -> [ 27 | datum_value, 28 | worker_value, worker_mfa, worker_replace, 29 | value_request_lookup, value_request_add_replace, 30 | cache_coop 31 | ]. 32 | 33 | init_per_suite(Config) -> Config. 34 | end_per_suite(_Config) -> ok. 35 | 36 | 37 | %%---------------------------------------------------------------------- 38 | %% Final stage Datum Coop Node tests 39 | %%---------------------------------------------------------------------- 40 | make_fake_head() -> 41 | Head_Kill_Switch = coop_kill_link_rcv:make_kill_switch(), 42 | coop_head:new(Head_Kill_Switch, none). 43 | 44 | datum_value(_Config) -> 45 | 46 | %% Get original value... 47 | Kill_Switch = coop_kill_link_rcv:make_kill_switch(), 48 | #coop_node{task_pid=Node_Task_Pid} = Coop_Node 49 | = esp_cache:new_datum_node(make_fake_head(), Kill_Switch, 17), 50 | R1 = make_ref(), 51 | coop:relay_data(Coop_Node, {get_value, {R1, self()}}), 52 | 17 = check_datum(R1), 53 | R2 = make_ref(), 54 | coop:relay_data(Coop_Node, {get_value, {R2, self()}}), 55 | 17 = check_datum(R2), 56 | 57 | %% Replace value and get new value... 58 | R3 = make_ref(), 59 | coop:relay_data(Coop_Node, {replace, 23, {R3, self()}}), 60 | 23 = check_datum(R3), 61 | R4 = make_ref(), 62 | coop:relay_data(Coop_Node, {get_value, {R4, self()}}), 63 | 23 = check_datum(R4), 64 | 65 | %% Check for expiration... 66 | erlang:monitor(process, Node_Task_Pid), 67 | coop:relay_data(Coop_Node, {expire, {foo, self()}}), 68 | 23 = check_datum(foo), 69 | {exited, _Pid} = check_datum(foo). 70 | 71 | check_datum(Ref) -> 72 | receive 73 | {Ref, Value} -> Value; 74 | {'DOWN', _MRef, process, Pid, normal} -> {exited, Pid} 75 | after 1000 -> timeout 76 | end. 77 | 78 | %%---------------------------------------------------------------------- 79 | %% Mid-tier worker Coop Node tests 80 | %%---------------------------------------------------------------------- 81 | worker_test_age(Value_Expr, Answer) -> 82 | 83 | %% Create a worker node... 84 | Self = self(), 85 | meck:new(coop_head, [passthrough]), 86 | meck:expect(coop_head, get_kill_switch, fun(_Coop_Head) -> Self end), 87 | Fake_Coop_Head = #coop_head{ctl_pid=Self, data_pid=Self}, 88 | Coop_Node = esp_cache:new_worker_node(Fake_Coop_Head), 89 | ?CTL_MSG({link, _Pids1}) = receive A -> A after 1000 -> timeout end, 90 | 91 | %% Create a new cached datum node from the value expression... 92 | R1 = make_ref(), 93 | Rcvr = proc_lib:spawn_link(?MODULE, check_worker, [R1, []]), 94 | coop:relay_data(Coop_Node, {add, {age, Value_Expr, {R1, Rcvr}}}), 95 | Results = check_worker([]), 96 | 2 = length(Results), 97 | [?CTL_MSG({link, _Pids2})] = [I || I <- Results, element(1,element(3, I)) =:= link], 98 | [?DATA_MSG({new, age, #coop_node{}})] 99 | = [I || I <- Results, element(1, element(3, I)) =:= new], 100 | timer:sleep(50), 101 | Rcvr ! {results, Self}, 102 | [[Answer]] = check_worker([]), 103 | meck:unload(coop_head). 104 | 105 | check_worker(Acc) -> 106 | receive Any -> check_worker([Any | Acc]) 107 | after 100 -> lists:reverse(Acc) 108 | end. 109 | check_worker(Ref, Acc) -> 110 | receive 111 | {Ref, Value} -> check_worker(Ref, [Value | Acc]); 112 | {results, From} -> From ! lists:reverse(Acc) 113 | after 5000 -> no_msg 114 | end. 115 | 116 | compute_value(X) -> 3*X. 117 | 118 | worker_value(_Config) -> worker_test_age({?VALUE, 15}, 15). 119 | worker_mfa(_Config) -> worker_test_age({?MFA, {?MODULE, compute_value, 7}}, 21). 120 | 121 | worker_replace(_Config) -> 122 | 123 | %% Create a worker node... 124 | Self = self(), 125 | meck:new(coop_head, [passthrough]), 126 | meck:expect(coop_head, get_kill_switch, fun(_Coop_Head) -> Self end), 127 | Fake_Coop_Head = #coop_head{ctl_pid=Self, data_pid=Self}, 128 | Coop_Node = esp_cache:new_worker_node(Fake_Coop_Head), 129 | ?CTL_MSG({link, _Pids1}) = receive A -> A after 1000 -> timeout end, 130 | 131 | %% Create a new cached datum node from a simple value... 132 | R1 = make_ref(), 133 | Rcvr = proc_lib:spawn_link(?MODULE, check_worker, [R1, []]), 134 | coop:relay_data(Coop_Node, {add, {age, {?VALUE, 37}, {R1, Rcvr}}}), 135 | Results = check_worker([]), 136 | 2 = length(Results), 137 | [?CTL_MSG({link, _Pids2})] = [I || I <- Results, element(1,element(3, I)) =:= link], 138 | [?DATA_MSG({new, age, #coop_node{} = New_Datum_Cache_Node})] 139 | = [I || I <- Results, element(1, element(3, I)) =:= new], 140 | timer:sleep(50), 141 | Rcvr ! {results, Self}, 142 | [[37]] = check_worker([]), 143 | 144 | %% Replace the cached datum value... 145 | R2 = make_ref(), 146 | Rcvr2 = proc_lib:spawn_link(?MODULE, check_worker, [R2, []]), 147 | coop:relay_data(Coop_Node, {replace, {age, {?MFA, {?MODULE, compute_value, 11}}, {R2, Rcvr2}}, 148 | New_Datum_Cache_Node}), 149 | timer:sleep(50), 150 | Rcvr2 ! {results, Self}, 151 | [[33]] = check_worker([]), 152 | meck:unload(coop_head). 153 | 154 | %%---------------------------------------------------------------------- 155 | %% First stage Directory Coop Node tests 156 | %%---------------------------------------------------------------------- 157 | value_request_lookup(_Config) -> 158 | Key = foo, 159 | Exp_Value = 29, 160 | 161 | %% Create a directory node... 162 | Self = self(), 163 | meck:new(coop_head, [passthrough]), 164 | meck:expect(coop_head, get_kill_switch, fun(_Coop_Head) -> Self end), 165 | Fake_Coop_Head = #coop_head{ctl_pid=Self, data_pid=Self}, 166 | Directory_Node = esp_cache:new_directory_node(Fake_Coop_Head), 167 | ?CTL_MSG({link, _Pids1}) = receive A -> A after 1000 -> timeout end, 168 | check_directory_empty(Directory_Node, Key), 169 | 170 | %% Create a datum Coop_Node... 171 | Value_Node = insert_value(Directory_Node, Key, Exp_Value), 172 | 173 | %% Delete the value and check the count. 174 | R1 = make_ref(), 175 | coop:relay_data(Directory_Node, {remove, {Key, {R1, Self}}}), 176 | timer:sleep(50), 177 | R2 = make_ref(), 178 | coop:relay_data(Directory_Node, {num_keys, {R2, Self}}), 179 | timer:sleep(50), 180 | [{R1, Exp_Value}, {R2, 0}] = check_worker([]), 181 | false = coop:is_live(Value_Node), 182 | 183 | %% Delete again to make sure it doesn't fail... 184 | R3 = make_ref(), 185 | coop:relay_data(Directory_Node, {remove, {Key, {R3, Self}}}), 186 | R4 = make_ref(), 187 | coop:relay_data(Directory_Node, {num_keys, {R4, Self}}), 188 | timer:sleep(50), 189 | [{R3, undefined}, {R4, 0}] = check_worker([]), 190 | false = coop:is_live(Value_Node), 191 | true = coop:is_live(Directory_Node), 192 | 193 | meck:unload(coop_head). 194 | 195 | check_directory_empty(Directory_Node, Key) -> 196 | Self = self(), 197 | R1 = make_ref(), 198 | coop:relay_data(Directory_Node, {num_keys, {R1, Self}}), 199 | R2 = make_ref(), 200 | coop:relay_data(Directory_Node, {lookup, {Key, {R2, Self}}}), 201 | R3 = make_ref(), 202 | coop:relay_data(Directory_Node, {num_keys, {R3, Self}}), 203 | timer:sleep(50), 204 | [{R1, 0}, {R2, undefined}, {R3, 0}] = check_worker([]), 205 | ok. 206 | 207 | insert_value(Directory_Node, Key, Exp_Value) -> 208 | Self = self(), 209 | 210 | %% Create a datum Coop_Node... 211 | Value_Node = esp_cache:new_datum_node(make_fake_head(), Self, Exp_Value), 212 | ?CTL_MSG({link, _Pids2}) = receive B -> B after 1000 -> timeout end, 213 | coop:relay_data(Directory_Node, {new, Key, Value_Node}), 214 | R1 = make_ref(), 215 | coop:relay_data(Directory_Node, {num_keys, {R1, Self}}), 216 | timer:sleep(50), 217 | [{R1, 1}] = check_worker([]), 218 | true = coop:is_live(Value_Node), 219 | 220 | %% Check that value is present. 221 | R2 = make_ref(), 222 | coop:relay_data(Directory_Node, {lookup, {Key, {R2, Self}}}), 223 | R3 = make_ref(), 224 | coop:relay_data(Directory_Node, {lookup, {Key, {R3, Self}}}), 225 | timer:sleep(50), 226 | [{R2, Exp_Value}, {R3, Exp_Value}] = check_worker([]), 227 | R4 = make_ref(), 228 | coop:relay_data(Directory_Node, {num_keys, {R4, Self}}), 229 | timer:sleep(50), 230 | [{R4, 1}] = check_worker([]), 231 | true = coop:is_live(Value_Node), 232 | 233 | Value_Node. 234 | 235 | value_request_add_replace(_Config) -> 236 | Key1 = foo, 237 | Exp_Value = 13, 238 | Chngd_Value = 27, 239 | 240 | %% Create a directory node... 241 | Self = self(), 242 | meck:new(coop_head, [passthrough]), 243 | meck:expect(coop_head, get_kill_switch, fun(_Coop_Head) -> Self end), 244 | Fake_Coop_Head = #coop_head{ctl_pid=Self, data_pid=Self}, 245 | Directory_Node = esp_cache:new_directory_node(Fake_Coop_Head), 246 | ?CTL_MSG({link, _Pids1}) = receive A -> A after 1000 -> timeout end, 247 | check_directory_empty(Directory_Node, Key1), 248 | 249 | %% Add a new value, then change it... 250 | Value_Node = insert_value(Directory_Node, Key1, Exp_Value), 251 | R1 = make_ref(), 252 | coop:relay_data(Directory_Node, {replace, {Key1, {?VALUE, Chngd_Value}, {R1, Self}}}), 253 | R2 = make_ref(), 254 | coop:relay_data(Directory_Node, {lookup, {Key1, {R2, Self}}}), 255 | timer:sleep(50), 256 | [{R1, Chngd_Value}, {R2, Chngd_Value}] = check_worker([]), 257 | true = coop:is_live(Value_Node), 258 | 259 | %% Try to add a value when it already exists... 260 | R3 = make_ref(), 261 | coop:relay_data(Directory_Node, {add, {Key1, {?VALUE, Chngd_Value}, {R3, Self}}}), 262 | timer:sleep(50), 263 | [{R3, defined}] = check_worker([]), 264 | true = coop:is_live(Value_Node), 265 | 266 | meck:unload(coop_head). 267 | 268 | 269 | %%---------------------------------------------------------------------- 270 | %% Complete Co-op testing 271 | %%---------------------------------------------------------------------- 272 | cache_coop(_Config) -> 273 | Cache_Coop = esp_cache:new_cache_coop(5), 274 | {Key1, Key2} = {foo, bar}, 275 | {Val1, Val2} = {17, 4}, 276 | Self = self(), 277 | 278 | %% Verify directory empty, add a value, then see it is there. 279 | R1 = make_ref(), 280 | coop:relay_data(Cache_Coop, {num_keys, {R1, Self}}), 281 | timer:sleep(50), 282 | true = coop:is_live(Cache_Coop), 283 | [{R1, 0}] = check_worker([]), 284 | R2 = make_ref(), 285 | coop:relay_data(Cache_Coop, {add, {Key1, {?VALUE, Val1}, {R2, Self}}}), 286 | timer:sleep(50), 287 | true = coop:is_live(Cache_Coop), 288 | R3 = make_ref(), 289 | coop:relay_data(Cache_Coop, {lookup, {Key1, {R3, Self}}}), 290 | timer:sleep(50), 291 | true = coop:is_live(Cache_Coop), 292 | [{R2, Val1},{R3, Val1}] = check_worker([]), 293 | R4 = make_ref(), 294 | coop:relay_data(Cache_Coop, {num_keys, {R4, Self}}), 295 | timer:sleep(50), 296 | true = coop:is_live(Cache_Coop), 297 | [{R4, 1}] = check_worker([]), 298 | 299 | %% Replace the value, then see if it can be retrieved. 300 | R5 = make_ref(), 301 | coop:relay_data(Cache_Coop, {replace, {Key1, {?VALUE, Val2}, {R5, Self}}}), 302 | timer:sleep(50), 303 | R6 = make_ref(), 304 | coop:relay_data(Cache_Coop, {num_keys, {R6, Self}}), 305 | timer:sleep(50), 306 | true = coop:is_live(Cache_Coop), 307 | [{R5, Val2}, {R6, 1}] = check_worker([]), 308 | R7 = make_ref(), 309 | coop:relay_data(Cache_Coop, {lookup, {Key1, {R7, Self}}}), 310 | timer:sleep(50), 311 | true = coop:is_live(Cache_Coop), 312 | R8 = make_ref(), 313 | coop:relay_data(Cache_Coop, {num_keys, {R8, Self}}), 314 | timer:sleep(50), 315 | true = coop:is_live(Cache_Coop), 316 | [{R7, Val2},{R8, 1}] = check_worker([]), 317 | 318 | %% Verify that missing values return undefined. 319 | R9 = make_ref(), 320 | coop:relay_data(Cache_Coop, {lookup, {Key2, {R9, Self}}}), 321 | timer:sleep(50), 322 | true = coop:is_live(Cache_Coop), 323 | RA = make_ref(), 324 | coop:relay_data(Cache_Coop, {num_keys, {RA, Self}}), 325 | timer:sleep(50), 326 | true = coop:is_live(Cache_Coop), 327 | [{R9, undefined},{RA, 1}] = check_worker([]), 328 | 329 | %% Verify that remove works. 330 | RB = make_ref(), 331 | coop:relay_data(Cache_Coop, {remove, {Key1, {RB, Self}}}), 332 | timer:sleep(50), 333 | true = coop:is_live(Cache_Coop), 334 | RC = make_ref(), 335 | coop:relay_data(Cache_Coop, {num_keys, {RC, Self}}), 336 | timer:sleep(50), 337 | true = coop:is_live(Cache_Coop), 338 | RD = make_ref(), 339 | coop:relay_data(Cache_Coop, {lookup, {Key2, {RD, Self}}}), 340 | timer:sleep(50), 341 | true = coop:is_live(Cache_Coop), 342 | RE = make_ref(), 343 | coop:relay_data(Cache_Coop, {remove, {Key1, {RE, Self}}}), 344 | timer:sleep(50), 345 | true = coop:is_live(Cache_Coop), 346 | [{RB,Val2},{RC,0},{RD,undefined},{RE,undefined}] = check_worker([]). 347 | 348 | -------------------------------------------------------------------------------- /apps/ctest/logs/README: -------------------------------------------------------------------------------- 1 | Present to allow git to checkin the logs directory. 2 | Temporary files are written here to record the results 3 | of Common Test executions. 4 | -------------------------------------------------------------------------------- /apps/erlangsp/include/license_and_copyright.hrl: -------------------------------------------------------------------------------- 1 | -license("Modified BSD License"). 2 | -copyright("(c) 2012, DuoMark International, Inc. All rights reserved"). 3 | 4 | %%%------------------------------------------------------------------------------ 5 | %%% Copyright (c) 2012, DuoMark International, Inc. 6 | %%% All rights reserved. 7 | %%% 8 | %%% Redistribution and use in source and binary forms, with or without 9 | %%% modification, are permitted provided that the following conditions are met: 10 | %%% 11 | %%% * Redistributions of source code must retain the above copyright 12 | %%% notice, this list of conditions and the following disclaimer. 13 | %%% 14 | %%% * Redistributions in binary form must reproduce the above copyright 15 | %%% notice, this list of conditions and the following disclaimer in the 16 | %%% documentation and/or other materials provided with the distribution. 17 | %%% 18 | %%% * Neither the name of DuoMark International, Inc. nor the 19 | %%% names of its contributors may be used to endorse or promote products 20 | %%% derived from this software without specific prior written permission. 21 | %%% 22 | %%% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 23 | %%% ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 24 | %%% WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 | %%% DISCLAIMED. IN NO EVENT SHALL DUOMARK INTERNATIONAL, INC. BE LIABLE FOR ANY 26 | %%% DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 27 | %%% (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 | %%% LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 29 | %%% ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | %%% (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 | %%% SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | %%%------------------------------------------------------------------------------ 33 | -------------------------------------------------------------------------------- /apps/erlangsp/src/erlangsp.app.src: -------------------------------------------------------------------------------- 1 | {application, erlangsp, 2 | [ 3 | {id, "Erlang/SP"}, 4 | {vsn, "0.0.1"}, 5 | {description, "Erlang Services Platform Library"}, 6 | {modules, []}, 7 | {registered, []}, 8 | {applications, [kernel, stdlib, sasl, gs, appmon]}, 9 | {included_applications, [coop]}, 10 | {mod, {erlangsp_app, []}}, 11 | {env, []} 12 | ]}. 13 | -------------------------------------------------------------------------------- /apps/erlangsp/src/erlangsp_app.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Erlang/SP application for graphical display of library execution. 8 | %%% @since v0.0.1 9 | %%% @end 10 | %%%------------------------------------------------------------------------------ 11 | -module(erlangsp_app). 12 | -author('Jay Nelson '). 13 | 14 | -include("license_and_copyright.hrl"). 15 | 16 | -behaviour(application). 17 | 18 | %% Application callbacks 19 | -export([start/0, start/2, stop/1]). 20 | 21 | %% =================================================================== 22 | %% Application callbacks 23 | %% =================================================================== 24 | 25 | -spec start() -> {ok, pid()}. 26 | -spec start(any(), any()) -> {ok, pid()}. 27 | -spec stop([]) -> ok. 28 | 29 | %% @doc Start the application's root supervisor in erl listener. 30 | start() -> 31 | erlangsp_sup:start_link(). 32 | 33 | %% @doc Start the application's root supervisor from boot. 34 | start(_StartType, _StartArgs) -> 35 | erlangsp_sup:start_link(). 36 | 37 | %% @doc Stop the application. 38 | stop(_State) -> ok. 39 | -------------------------------------------------------------------------------- /apps/erlangsp/src/erlangsp_sup.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @reference The license is based on the template for Modified BSD from 5 | %%% OSI 6 | %%% @doc 7 | %%% Erlang/SP supervisor for graphical display of library execution. 8 | %%% @since v0.0.1 9 | %%% @end 10 | %%%------------------------------------------------------------------------------ 11 | -module(erlangsp_sup). 12 | -author('Jay Nelson '). 13 | 14 | -include("license_and_copyright.hrl"). 15 | 16 | -behaviour(supervisor). 17 | 18 | %% External API 19 | -export([start_link/0]). 20 | 21 | %% Supervisor callbacks 22 | -export([init/1]). 23 | 24 | 25 | %% =================================================================== 26 | %% API functions 27 | %% =================================================================== 28 | 29 | -spec start_link() -> {ok, pid()}. 30 | 31 | %% @doc Start the root Erlang/SP supervisor. 32 | start_link() -> 33 | supervisor:start_link({local, ?MODULE}, ?MODULE, {}). 34 | 35 | 36 | %% =================================================================== 37 | %% Supervisor callbacks 38 | %% =================================================================== 39 | 40 | -type restart() :: {supervisor:strategy(), non_neg_integer(), non_neg_integer()}. 41 | -type sup_init_return() :: {ok, {restart(), [supervisor:child_spec()]}}. 42 | 43 | -spec init({}) -> sup_init_return(). 44 | 45 | %% @doc Placeholder for future supervision. 46 | init({}) -> 47 | {ok, { {one_for_one, 5, 10}, []} }. 48 | 49 | -------------------------------------------------------------------------------- /apps/examples/esp_cache/include/esp_cache.hrl: -------------------------------------------------------------------------------- 1 | -define(VALUE, '$$_value'). 2 | -define(MFA, '$$_mfa'). 3 | -------------------------------------------------------------------------------- /apps/examples/esp_cache/src/esp_cache.app.src: -------------------------------------------------------------------------------- 1 | {application, esp_cache, 2 | [ 3 | {id, "Erlang/SP Coops Cache"}, 4 | {vsn, "0.0.1"}, 5 | {description, "Erlang Services Platform Cache Library"}, 6 | {modules, []}, 7 | {registered, []}, 8 | {applications, [kernel, stdlib, sasl, gs, appmon]}, 9 | {included_applications, [coop]}, 10 | {env, []} 11 | ]}. 12 | -------------------------------------------------------------------------------- /apps/examples/esp_cache/src/esp_cache.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------------------ 2 | %%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved 3 | %%% @author Jay Nelson 4 | %%% @doc 5 | %%% A cache implemented using a process dictionary to manage an index 6 | %%% of data where each datum is a separate erlang process. 7 | %%% @since v0.0.1 8 | %%% @end 9 | %%%------------------------------------------------------------------------------ 10 | -module(esp_cache). 11 | 12 | -include_lib("erlangsp/include/license_and_copyright.hrl"). 13 | -author(jayn). 14 | 15 | %% Public API 16 | -export([new_cache_coop/1]). 17 | 18 | %% Testing API 19 | -export([new_directory_node/1, new_worker_node/1, new_datum_node/3]). 20 | 21 | %% Node setup functions 22 | -export([ 23 | init_directory/1, value_request/2, % Directory Coop_Node 24 | init_mfa_worker/1, make_new_datum/3, % MFA Worker Coop_Node 25 | init_datum/1, manage_datum/2 % Cached Datum Coop_Node 26 | ]). 27 | 28 | 29 | %%------------------------------------------------------------------------------ 30 | %% 31 | %% Erlang/SP caching is implemented using a Coop pattern. 32 | %% 33 | %% Functionally, there is a Coop_Node for the central directory 34 | %% of data keys which each reference a cached datum. Each datum 35 | %% is held in a separate, dynamic Coop_Node instance. A round- 36 | %% robin pool of workers is used to compute values that are not 37 | %% passed directly to a Coop_Node datum instance, to achieve 38 | %% limited but load-balanced concurrency. 39 | %% 40 | %% Two entries exist per Key: 41 | %% Key lookup: Key => Coop_Node 42 | %% Expired Index: {Key, Coop_Node} => Node_Task_Pid 43 | %% 44 | %% This module includes one comparative implementations: 45 | %% 1) Process dictionary for Keys 46 | %% 47 | %% [Two others are not yet implemented]: 48 | %% 2) Public shared concurrent read ETS table for Keys 49 | %% - One Coop_Node writing to it 50 | %% 3) Concurrent Coop_Node skiplist for Keys 51 | %% 52 | %% An application which employs this cache can either supply a 53 | %% value directly, or provide {Mod, Fun, Args} to execute which 54 | %% result in a cached value. Supplying a value directly incurs 55 | %% the overhead of passing that value as a message argument to 56 | %% a minimum of 2 processes. Using the MFA approach provides a 57 | %% way to asynchronously generate a large data structure, or 58 | %% to cache a value which may take a long time to initially 59 | %% compute without the penalty of passing that data, but rather 60 | %% waiting for the MFA to complete before the value is available. 61 | %% 62 | %% A global idle expiration time can be set for cached values, 63 | %% or an external application can implement an expiration policy 64 | %% by explicitly removing values from the cache. 65 | %% 66 | %% Future enhancements are expected to include: 67 | %% 1) Independent functions per datum for computing expiration 68 | %% 2) Invoking a function on cached datum rather than returning it 69 | %% 3) Sumbitting a function to update a cached datum 70 | %% 71 | %%------------------------------------------------------------------------------ 72 | -include_lib("coop/include/coop.hrl"). 73 | -include_lib("coop/include/coop_dag.hrl"). 74 | -include("esp_cache.hrl"). 75 | 76 | 77 | %% Coop: 78 | %% Dir => X workers => | no receiver 79 | %% Dynamic => Datum workers 80 | 81 | new_cache_coop(Num_Workers) -> 82 | 83 | %% Make the cache directory and worker function specifications... 84 | Cache_Directory = coop:make_dag_node(cache, 85 | ?COOP_INIT_FN(init_directory, []), 86 | ?COOP_TASK_FN(value_request), 87 | [], 88 | round_robin), 89 | 90 | Workers = [coop:make_dag_node(list_to_atom("worker-" ++ integer_to_list(N)), 91 | ?COOP_INIT_FN(init_mfa_worker, {}), 92 | ?COOP_TASK_FN(make_new_datum), 93 | [access_coop_head] 94 | ) 95 | || N <- lists:seq(1, Num_Workers)], 96 | 97 | %% Cache -E Workers -> none ; Dynamic Cache Nodes 98 | 99 | %% One cache directory fans out to Num_Workers with no final fan in. 100 | %% New datum nodes are created dynamically by the workers. 101 | coop:new_fanout(Cache_Directory, Workers, none). 102 | 103 | 104 | %%========================= Directory Node ================================= 105 | 106 | -type coop_proc() :: pid() | coop_head() | coop_node(). 107 | -type receiver() :: {reference(), coop_proc()}. 108 | 109 | -type change_cmd() :: add | replace. 110 | -type value_request() :: {?VALUE, any()} | {?MFA, {module(), atom(), list()}}. 111 | -type change_request() :: {change_cmd(), value_request(), receiver()} | {remove, receiver()}. 112 | 113 | -type lookup_request() :: {any(), receiver()}. 114 | -type fep_request() :: {any(), value_request(), receiver()}. 115 | -type fetch_cmd() :: lookup. 116 | 117 | %% -type stats_cmd() :: num_keys. 118 | 119 | -spec value_request({}, {change_cmd(), change_request()}) -> no_return(). 120 | -spec change_value ({}, {change_cmd(), change_request()}, coop_proc() | undefined) -> {{}, ?COOP_NOOP} | {{}, {add, change_request()}}. 121 | -spec return_value ({}, {fetch_cmd(), lookup_request() | fep_request()}, coop_proc() | undefined) -> {{}, ?COOP_NOOP}. 122 | 123 | %% Create a new directory Coop_Node. 124 | new_directory_node(Coop_Head) -> 125 | Kill_Switch = coop_head:get_kill_switch(Coop_Head), 126 | coop_node:new(Coop_Head, Kill_Switch, ?COOP_TASK_FN(value_request), ?COOP_INIT_FN(init_directory, {}), []). 127 | 128 | %% No state needed. 129 | init_directory(State) -> State. 130 | 131 | 132 | %% Modify the cached value process and send the new value to a dynamic downstream coop_node... 133 | value_request(State, {remove, {Key, _Rcvr} } = Req) -> change_value(State, Req, get(Key)); 134 | value_request(State, {add, {Key, _Chg_Type, _Rcvr} } = Req) -> change_value(State, Req, get(Key)); 135 | value_request(State, {replace, {Key, _Chg_Type, _Rcvr} } = Req) -> change_value(State, Req, get(Key)); 136 | 137 | %% Return the cached value to a dynamic downstream coop_node... 138 | value_request(State, {lookup, {Key, _Rcvr} } = Req) -> return_value(State, Req, get(Key)); 139 | 140 | %% Return the number of active keys... 141 | value_request(State, {num_keys, {Ref, Rcvr}}) -> 142 | %% 2 entries for each key and proc_lib added '$ancestors' and '$initial_call' 143 | coop:relay_data(Rcvr, {Ref, (length(get()) - 2) div 2}), 144 | {State, ?COOP_NOOP}; 145 | 146 | %% Expiration of process removes all references to it in process dictionary. 147 | %% Key => Coop_Node + {Key, Coop_Node} => Node_Data_Pid (the monitored Pid that went down) 148 | value_request(State, {'DOWN', _Ref, process, Pid, _Reason}) -> 149 | [begin erase(Key), erase(Coop_Key) end || {Key, _Coop_Node} = Coop_Key <- get_keys(Pid), get(Coop_Key) =:= Pid], 150 | {State, ?COOP_NOOP}; 151 | 152 | %% New dynamically created Coop_Nodes are monitored and placed in the process dictionary. 153 | %% Any existing Coop_Node for the same key is expired. 154 | value_request(State, {new, Key, #coop_node{task_pid=Node_Task_Pid} = Coop_Node}) -> 155 | erlang:monitor(process, Node_Task_Pid), 156 | case {put({Key, Coop_Node}, Node_Task_Pid), put(Key, Coop_Node)} of 157 | {undefined, undefined} -> no_existing_datum_to_expire; 158 | {Old_Coop_Node, _} -> erlang:demonitor(process, Node_Task_Pid), coop:relay_data(Old_Coop_Node, {expire}) 159 | end, 160 | {State, ?COOP_NOOP}. 161 | 162 | 163 | %% Terminate the Coop_Node containing the cached value if there is one... 164 | change_value(State, {remove, {_Key, {Ref, Requester}}}, undefined) -> 165 | coop:relay_data(Requester, {Ref, undefined}), 166 | {State, ?COOP_NOOP}; 167 | change_value(State, {remove, {Key, {_Ref, _Rqstr} = Requester}}, Coop_Node) -> 168 | erase(Key), 169 | erase({Key, Coop_Node}), 170 | coop:relay_data(Coop_Node, {expire, Requester}), 171 | {State, ?COOP_NOOP}; 172 | 173 | %% Update the Coop_Node containing the cached value... 174 | change_value(State, {replace, {_Key, _Chg_Type, {_Ref, _Rqstr}} = New_Value }, undefined) -> value_request(State, {add, New_Value}); 175 | change_value(State, {replace, {_Key, {?VALUE, V}, {_Ref, _Rqstr} = Requester}}, Coop_Node) -> coop:relay_data(Coop_Node, {replace, V, Requester}), {State, ?COOP_NOOP}; 176 | %% But use the downstream worker pool if M:F(A) must be executed to get the value to cache... 177 | change_value(State, {replace, {_Key, {?MFA, _MFA}, {_Ref, _Rqstr}} = Request}, Coop_Node) -> {State, {replace, Request, Coop_Node}}; 178 | 179 | %% Create a new dynamic Coop_Node containing the cached value using the downstream worker pool. 180 | change_value(State, {add, {_Key, _Chg_Type, {_Ref, _Rqstr}}} = Request, undefined) -> {State, Request}; % Request is passed to a worker. 181 | change_value(State, {add, {_Key, _Chg_Type, {Ref, Requester}}}, _Coop_Node) -> coop:relay_data(Requester, {Ref, defined}), {State, ?COOP_NOOP}. 182 | 183 | 184 | %% Send the cached value to the requester. 185 | return_value(State, {lookup, {_Key, {Ref, Requester}} }, undefined) -> coop:relay_data(Requester, {Ref, undefined}), {State, ?COOP_NOOP}; 186 | return_value(State, {_Any_Type, {_Key, {_Ref, _Rqstr} = Requester}}, Coop_Node) -> coop:relay_data(Coop_Node, {get_value, Requester}), {State, ?COOP_NOOP}. 187 | 188 | 189 | %%========================= M:F(A) Worker ================================= 190 | 191 | %% Create a new worker Coop_Node. 192 | new_worker_node(Coop_Head) -> 193 | Kill_Switch = coop_head:get_kill_switch(Coop_Head), 194 | coop_node:new(Coop_Head, Kill_Switch, ?COOP_TASK_FN(make_new_datum), ?COOP_INIT_FN(init_mfa_worker, {}), [access_coop_head]). 195 | 196 | %% Kill_Switch is kept as State to spawn dynamic Coop_Nodes (Coop_Head is added as a function argument via Data Options) 197 | init_mfa_worker({Coop_Head, {}}) -> coop_head:get_kill_switch(Coop_Head). 198 | 199 | 200 | %% Compute the replacement value and forward to the existing Coop_Node... 201 | make_new_datum(_Coop_Head, Kill_Switch, {replace, {_Key, {?MFA, {Mod, Fun, Args}}, {_Ref, _Rqstr} = Requester}, Coop_Node}) -> 202 | %% Directory already knows about this datum, using worker for potentially long running M:F(A) 203 | coop:relay_data(Coop_Node, {replace, Mod:Fun(Args), Requester}), 204 | {Kill_Switch, ?COOP_NOOP}; 205 | 206 | %% Create a new Coop_Node initialized with the value to cache, notifying the Coop_Head directory. 207 | make_new_datum(Coop_Head, Kill_Switch, {add, {Key, {?VALUE, V}, {_Ref, _Rqstr} = Requester}}) -> 208 | New_Coop_Node = new_datum_node(Coop_Head, Kill_Switch, V), 209 | relay_new_datum(Coop_Head, Key, New_Coop_Node, Requester, Kill_Switch); 210 | make_new_datum(Coop_Head, Kill_Switch, {add, {Key, {?MFA, {Mod, Fun, Args}}, {_Ref, _Rqstr} = Requester}}) -> 211 | New_Coop_Node = new_datum_node(Coop_Head, Kill_Switch, Mod:Fun(Args)), 212 | relay_new_datum(Coop_Head, Key, New_Coop_Node, Requester, Kill_Switch). 213 | 214 | relay_new_datum(Coop_Head, Key, New_Coop_Node, Requester, Kill_Switch) -> 215 | coop:relay_high_priority_data(Coop_Head, {new, Key, New_Coop_Node}), 216 | coop:relay_data(New_Coop_Node, {get_value, Requester}), 217 | {Kill_Switch, ?COOP_NOOP}. 218 | 219 | 220 | %%========================= Datum Node ==================================== 221 | 222 | %% New Datum processes are dynamically created Coop Nodes. 223 | new_datum_node(Coop_Head, Kill_Switch, V) -> 224 | coop_node:new(Coop_Head, Kill_Switch, ?COOP_TASK_FN(manage_datum), ?COOP_INIT_FN(init_datum, V), []). 225 | 226 | 227 | %% Initialize the Coop_Node with the value to cache. 228 | init_datum(V) -> V. 229 | 230 | %% Cached datum is relayed to requester, no downstream listeners. 231 | manage_datum(_Datum, {expire} ) -> exit(normal); 232 | manage_datum( Datum, {expire, {Ref, Requester}} ) -> coop:relay_data(Requester, {Ref, Datum}), exit(normal); 233 | manage_datum( Datum, {get_value, {Ref, Requester}} ) -> coop:relay_data(Requester, {Ref, Datum}), {Datum, ?COOP_NOOP}; 234 | manage_datum(_Datum, {replace, New_Value, {Ref, Requester}} ) -> coop:relay_data(Requester, {Ref, New_Value}), {New_Value, ?COOP_NOOP}. 235 | -------------------------------------------------------------------------------- /future_ideas.txt: -------------------------------------------------------------------------------- 1 | Ideas for example co-ops: 2 | * esp_pool: simple pooling 3 | * esp_skiplist: process per vertical index (distributed skiplist paper) 4 | * UUID https://github.com/okeuday/uuid 5 | * Trie: https://github.com/okeuday/trie 6 | * Skewbinheap: https://github.com/okeuday/skewbinheap 7 | * mergesort 8 | -------------------------------------------------------------------------------- /notes.txt: -------------------------------------------------------------------------------- 1 | Heriot-Watt 2 | Patrick Maier 3 | 4 | http://www.macs.hw.ac.uk/~pm175/F21DP2/l08_handout.pdf 5 | 6 | Parallel Programming Design Patterns + Algorithmic Skeletons 7 | 8 | Pipeline 9 | Parallel Tasks (Sort onto different tasks) 10 | Task Farm (Farmer does scatter/gather) 11 | Task Farm (Chain gain doesn't work in erlang) 12 | Divide & Conquer (Recursive skeleton) 13 | 14 | ParaPhrase 15 | http://backus.di.unipi.it/~marcod/Talks/pdp12.pdf 16 | 17 | 18 | 19 | OTP Features needed: 20 | 21 | 1) Spawn blocks of procs (100-100K) 22 | 2) Kill blocks of procs (100-1M linked procs) 23 | 3) Dialyzer to check message send/receive 24 | - More procs =:= more msgs and more no_return() functions 25 | 4) High-speed ACK for synchronous message passing 26 | 27 | 28 | Examples to build: 29 | 30 | 1) Distributed Cache 31 | 2) HTTP Parsing / WebMachine 32 | 3) Kademlia DHT (TeleHash / Singly / Locker Project) 33 | -------------------------------------------------------------------------------- /rebar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/duomark/erlangsp/a0a47d4ef3b74c1138e264424e36c754a3024e0a/rebar -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | %%-*- mode: erlang -*- 2 | 3 | {sub_dirs, ["apps/coop", "apps/erlangsp", "apps/examples/esp_cache", "rel"]}. 4 | 5 | {deps, [ 6 | {meck, "0.7.1", {git, "git://github.com/eproxus/meck.git", {tag, "0.7.1"}}} 7 | ]}. 8 | 9 | {erl_opts, [debug_info, warnings_as_errors]}. 10 | {cover_enabled, true}. 11 | -------------------------------------------------------------------------------- /rel/files/app.config: -------------------------------------------------------------------------------- 1 | [ 2 | %% SASL config 3 | {sasl, [ 4 | {sasl_error_logger, {file, "log/sasl-error.log"}}, 5 | {errlog_type, error}, 6 | {error_logger_mf_dir, "log/sasl"}, % Log directory 7 | {error_logger_mf_maxbytes, 10485760}, % 10 MB max file size 8 | {error_logger_mf_maxfiles, 5} % 5 files max 9 | ]} 10 | ]. 11 | 12 | -------------------------------------------------------------------------------- /rel/files/erl: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ## This script replaces the default "erl" in erts-VSN/bin. This is necessary 4 | ## as escript depends on erl and in turn, erl depends on having access to a 5 | ## bootscript (start.boot). Note that this script is ONLY invoked as a side-effect 6 | ## of running escript -- the embedded node bypasses erl and uses erlexec directly 7 | ## (as it should). 8 | ## 9 | ## Note that this script makes the assumption that there is a start_clean.boot 10 | ## file available in $ROOTDIR/release/VSN. 11 | 12 | # Determine the abspath of where this script is executing from. 13 | ERTS_BIN_DIR=$(cd ${0%/*} && pwd) 14 | 15 | # Now determine the root directory -- this script runs from erts-VSN/bin, 16 | # so we simply need to strip off two dirs from the end of the ERTS_BIN_DIR 17 | # path. 18 | ROOTDIR=${ERTS_BIN_DIR%/*/*} 19 | 20 | # Parse out release and erts info 21 | START_ERL=`cat $ROOTDIR/releases/start_erl.data` 22 | ERTS_VSN=${START_ERL% *} 23 | APP_VSN=${START_ERL#* } 24 | 25 | BINDIR=$ROOTDIR/erts-$ERTS_VSN/bin 26 | EMU=beam 27 | PROGNAME=`echo $0 | sed 's/.*\\///'` 28 | CMD="$BINDIR/erlexec" 29 | export EMU 30 | export ROOTDIR 31 | export BINDIR 32 | export PROGNAME 33 | 34 | exec $CMD -boot $ROOTDIR/releases/$APP_VSN/start_clean ${1+"$@"} 35 | -------------------------------------------------------------------------------- /rel/files/erlangsp: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # -*- tab-width:4;indent-tabs-mode:nil -*- 3 | # ex: ts=4 sw=4 et 4 | 5 | RUNNER_SCRIPT_DIR=$(cd ${0%/*} && pwd) 6 | 7 | RUNNER_BASE_DIR=${RUNNER_SCRIPT_DIR%/*} 8 | RUNNER_ETC_DIR=$RUNNER_BASE_DIR/etc 9 | RUNNER_LOG_DIR=$RUNNER_BASE_DIR/log 10 | # Note the trailing slash on $PIPE_DIR/ 11 | PIPE_DIR=/tmp/$RUNNER_BASE_DIR/ 12 | RUNNER_USER= 13 | 14 | # Make sure this script is running as the appropriate user 15 | if [ ! -z "$RUNNER_USER" ] && [ `whoami` != "$RUNNER_USER" ]; then 16 | exec sudo -u $RUNNER_USER -i $0 $@ 17 | fi 18 | 19 | # Make sure CWD is set to runner base dir 20 | cd $RUNNER_BASE_DIR 21 | 22 | # Make sure log directory exists 23 | mkdir -p $RUNNER_LOG_DIR 24 | # Identify the script name 25 | SCRIPT=`basename $0` 26 | 27 | # Parse out release and erts info 28 | START_ERL=`cat $RUNNER_BASE_DIR/releases/start_erl.data` 29 | ERTS_VSN=${START_ERL% *} 30 | APP_VSN=${START_ERL#* } 31 | 32 | # Use releases/VSN/vm.args if it exists otherwise use etc/vm.args 33 | if [ -e "$RUNNER_BASE_DIR/releases/$APP_VSN/vm.args" ]; then 34 | VMARGS_PATH="$RUNNER_BASE_DIR/releases/$APP_VSN/vm.args" 35 | else 36 | VMARGS_PATH="$RUNNER_ETC_DIR/vm.args" 37 | fi 38 | 39 | # Use releases/VSN/sys.config if it exists otherwise use etc/app.config 40 | if [ -e "$RUNNER_BASE_DIR/releases/$APP_VSN/sys.config" ]; then 41 | CONFIG_PATH="$RUNNER_BASE_DIR/releases/$APP_VSN/sys.config" 42 | else 43 | CONFIG_PATH="$RUNNER_ETC_DIR/app.config" 44 | fi 45 | 46 | # Extract the target node name from node.args 47 | NAME_ARG=`egrep '^-s?name' $VMARGS_PATH` 48 | if [ -z "$NAME_ARG" ]; then 49 | echo "vm.args needs to have either -name or -sname parameter." 50 | exit 1 51 | fi 52 | 53 | # Extract the target cookie 54 | COOKIE_ARG=`grep '^-setcookie' $VMARGS_PATH` 55 | if [ -z "$COOKIE_ARG" ]; then 56 | echo "vm.args needs to have a -setcookie parameter." 57 | exit 1 58 | fi 59 | 60 | # Add ERTS bin dir to our path 61 | ERTS_PATH=$RUNNER_BASE_DIR/erts-$ERTS_VSN/bin 62 | 63 | # Setup command to control the node 64 | NODETOOL="$ERTS_PATH/escript $ERTS_PATH/nodetool $NAME_ARG $COOKIE_ARG" 65 | 66 | # Check the first argument for instructions 67 | case "$1" in 68 | start) 69 | # Make sure there is not already a node running 70 | RES=`$NODETOOL ping` 71 | if [ "$RES" = "pong" ]; then 72 | echo "Node is already running!" 73 | exit 1 74 | fi 75 | HEART_COMMAND="$RUNNER_BASE_DIR/bin/$SCRIPT start" 76 | export HEART_COMMAND 77 | mkdir -p $PIPE_DIR 78 | shift # remove $1 79 | $ERTS_PATH/run_erl -daemon $PIPE_DIR $RUNNER_LOG_DIR "exec $RUNNER_BASE_DIR/bin/$SCRIPT console $@" 2>&1 80 | ;; 81 | 82 | stop) 83 | # Wait for the node to completely stop... 84 | case `uname -s` in 85 | Linux|Darwin|FreeBSD|DragonFly|NetBSD|OpenBSD) 86 | # PID COMMAND 87 | PID=`ps ax -o pid= -o command=|\ 88 | grep "$RUNNER_BASE_DIR/.*/[b]eam"|awk '{print $1}'` 89 | ;; 90 | SunOS) 91 | # PID COMMAND 92 | PID=`ps -ef -o pid= -o args=|\ 93 | grep "$RUNNER_BASE_DIR/.*/[b]eam"|awk '{print $1}'` 94 | ;; 95 | CYGWIN*) 96 | # UID PID PPID TTY STIME COMMAND 97 | PID=`ps -efW|grep "$RUNNER_BASE_DIR/.*/[b]eam"|awk '{print $2}'` 98 | ;; 99 | esac 100 | $NODETOOL stop 101 | ES=$? 102 | if [ "$ES" -ne 0 ]; then 103 | exit $ES 104 | fi 105 | while `kill -0 $PID 2>/dev/null`; 106 | do 107 | sleep 1 108 | done 109 | ;; 110 | 111 | restart) 112 | ## Restart the VM without exiting the process 113 | $NODETOOL restart 114 | ES=$? 115 | if [ "$ES" -ne 0 ]; then 116 | exit $ES 117 | fi 118 | ;; 119 | 120 | reboot) 121 | ## Restart the VM completely (uses heart to restart it) 122 | $NODETOOL reboot 123 | ES=$? 124 | if [ "$ES" -ne 0 ]; then 125 | exit $ES 126 | fi 127 | ;; 128 | 129 | ping) 130 | ## See if the VM is alive 131 | $NODETOOL ping 132 | ES=$? 133 | if [ "$ES" -ne 0 ]; then 134 | exit $ES 135 | fi 136 | ;; 137 | 138 | attach) 139 | # Make sure a node IS running 140 | RES=`$NODETOOL ping` 141 | ES=$? 142 | if [ "$ES" -ne 0 ]; then 143 | echo "Node is not running!" 144 | exit $ES 145 | fi 146 | 147 | shift 148 | exec $ERTS_PATH/to_erl $PIPE_DIR 149 | ;; 150 | 151 | console|console_clean) 152 | # .boot file typically just $SCRIPT (ie, the app name) 153 | # however, for debugging, sometimes start_clean.boot is useful: 154 | case "$1" in 155 | console) BOOTFILE=$SCRIPT ;; 156 | console_clean) BOOTFILE=start_clean ;; 157 | esac 158 | # Setup beam-required vars 159 | ROOTDIR=$RUNNER_BASE_DIR 160 | BINDIR=$ROOTDIR/erts-$ERTS_VSN/bin 161 | EMU=beam 162 | PROGNAME=`echo $0 | sed 's/.*\\///'` 163 | CMD="$BINDIR/erlexec -boot $RUNNER_BASE_DIR/releases/$APP_VSN/$BOOTFILE -mode embedded -config $CONFIG_PATH -args_file $VMARGS_PATH -- ${1+"$@"}" 164 | export EMU 165 | export ROOTDIR 166 | export BINDIR 167 | export PROGNAME 168 | 169 | # Dump environment info for logging purposes 170 | echo "Exec: $CMD" 171 | echo "Root: $ROOTDIR" 172 | 173 | # Log the startup 174 | logger -t "$SCRIPT[$$]" "Starting up" 175 | 176 | # Start the VM 177 | exec $CMD 178 | ;; 179 | 180 | *) 181 | echo "Usage: $SCRIPT {start|stop|restart|reboot|ping|console|console_clean|attach}" 182 | exit 1 183 | ;; 184 | esac 185 | 186 | exit 0 187 | -------------------------------------------------------------------------------- /rel/files/erlangsp.cmd: -------------------------------------------------------------------------------- 1 | @setlocal 2 | 3 | @set node_name=erlangsp 4 | 5 | @rem Get the abolute path to the parent directory, 6 | @rem which is assumed to be the node root. 7 | @for /F "delims=" %%I in ("%~dp0..") do @set node_root=%%~fI 8 | 9 | @set releases_dir=%node_root%\releases 10 | 11 | @rem Parse ERTS version and release version from start_erl.data 12 | @for /F "tokens=1,2" %%I in (%releases_dir%\start_erl.data) do @( 13 | @call :set_trim erts_version %%I 14 | @call :set_trim release_version %%J 15 | ) 16 | 17 | @set erts_bin=%node_root%\erts-%erts_version%\bin 18 | 19 | @set service_name=%node_name%_%release_version% 20 | 21 | @if "%1"=="install" @goto install 22 | @if "%1"=="uninstall" @goto uninstall 23 | @if "%1"=="start" @goto start 24 | @if "%1"=="stop" @goto stop 25 | @if "%1"=="restart" @call :stop && @goto start 26 | @if "%1"=="console" @goto console 27 | @rem TODO: attach, ping, restart and reboot 28 | 29 | :usage 30 | @echo Usage: %0 {install|uninstall|start|stop|restart|console} 31 | @goto :EOF 32 | 33 | :install 34 | @%erts_bin%\erlsrv.exe add %service_name% -c "Erlang node %node_name% in %node_root%" -sname %node_name% -w %node_root% -m %node_root%\bin\start_erl.cmd -args " ++ %node_name% ++ %node_root%" -stopaction "init:stop()." 35 | @goto :EOF 36 | 37 | :uninstall 38 | @%erts_bin%\erlsrv.exe remove %service_name% 39 | @%erts_bin%\epmd.exe -kill 40 | @goto :EOF 41 | 42 | :start 43 | @%erts_bin%\erlsrv.exe start %service_name% 44 | @goto :EOF 45 | 46 | :stop 47 | @%erts_bin%\erlsrv.exe stop %service_name% 48 | @goto :EOF 49 | 50 | :console 51 | @start %erts_bin%\werl.exe -boot %releases_dir%\%release_version%\%node_name% 52 | @goto :EOF 53 | 54 | :set_trim 55 | @set %1=%2 56 | @goto :EOF 57 | -------------------------------------------------------------------------------- /rel/files/nodetool: -------------------------------------------------------------------------------- 1 | %% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*- 2 | %% ex: ft=erlang ts=4 sw=4 et 3 | %% ------------------------------------------------------------------- 4 | %% 5 | %% nodetool: Helper Script for interacting with live nodes 6 | %% 7 | %% ------------------------------------------------------------------- 8 | 9 | main(Args) -> 10 | ok = start_epmd(), 11 | %% Extract the args 12 | {RestArgs, TargetNode} = process_args(Args, [], undefined), 13 | 14 | %% See if the node is currently running -- if it's not, we'll bail 15 | case {net_kernel:hidden_connect_node(TargetNode), net_adm:ping(TargetNode)} of 16 | {true, pong} -> 17 | ok; 18 | {_, pang} -> 19 | io:format("Node ~p not responding to pings.\n", [TargetNode]), 20 | halt(1) 21 | end, 22 | 23 | case RestArgs of 24 | ["ping"] -> 25 | %% If we got this far, the node already responsed to a ping, so just dump 26 | %% a "pong" 27 | io:format("pong\n"); 28 | ["stop"] -> 29 | io:format("~p\n", [rpc:call(TargetNode, init, stop, [], 60000)]); 30 | ["restart"] -> 31 | io:format("~p\n", [rpc:call(TargetNode, init, restart, [], 60000)]); 32 | ["reboot"] -> 33 | io:format("~p\n", [rpc:call(TargetNode, init, reboot, [], 60000)]); 34 | ["rpc", Module, Function | RpcArgs] -> 35 | case rpc:call(TargetNode, list_to_atom(Module), list_to_atom(Function), 36 | [RpcArgs], 60000) of 37 | ok -> 38 | ok; 39 | {badrpc, Reason} -> 40 | io:format("RPC to ~p failed: ~p\n", [TargetNode, Reason]), 41 | halt(1); 42 | _ -> 43 | halt(1) 44 | end; 45 | ["rpcterms", Module, Function, ArgsAsString] -> 46 | case rpc:call(TargetNode, list_to_atom(Module), list_to_atom(Function), 47 | consult(ArgsAsString), 60000) of 48 | {badrpc, Reason} -> 49 | io:format("RPC to ~p failed: ~p\n", [TargetNode, Reason]), 50 | halt(1); 51 | Other -> 52 | io:format("~p\n", [Other]) 53 | end; 54 | Other -> 55 | io:format("Other: ~p\n", [Other]), 56 | io:format("Usage: nodetool {ping|stop|restart|reboot}\n") 57 | end, 58 | net_kernel:stop(). 59 | 60 | process_args([], Acc, TargetNode) -> 61 | {lists:reverse(Acc), TargetNode}; 62 | process_args(["-setcookie", Cookie | Rest], Acc, TargetNode) -> 63 | erlang:set_cookie(node(), list_to_atom(Cookie)), 64 | process_args(Rest, Acc, TargetNode); 65 | process_args(["-name", TargetName | Rest], Acc, _) -> 66 | ThisNode = append_node_suffix(TargetName, "_maint_"), 67 | {ok, _} = net_kernel:start([ThisNode, longnames]), 68 | process_args(Rest, Acc, nodename(TargetName)); 69 | process_args(["-sname", TargetName | Rest], Acc, _) -> 70 | ThisNode = append_node_suffix(TargetName, "_maint_"), 71 | {ok, _} = net_kernel:start([ThisNode, shortnames]), 72 | process_args(Rest, Acc, nodename(TargetName)); 73 | process_args([Arg | Rest], Acc, Opts) -> 74 | process_args(Rest, [Arg | Acc], Opts). 75 | 76 | 77 | start_epmd() -> 78 | [] = os:cmd(epmd_path() ++ " -daemon"), 79 | ok. 80 | 81 | epmd_path() -> 82 | ErtsBinDir = filename:dirname(escript:script_name()), 83 | Name = "epmd", 84 | case os:find_executable(Name, ErtsBinDir) of 85 | false -> 86 | case os:find_executable(Name) of 87 | false -> 88 | io:format("Could not find epmd.~n"), 89 | halt(1); 90 | GlobalEpmd -> 91 | GlobalEpmd 92 | end; 93 | Epmd -> 94 | Epmd 95 | end. 96 | 97 | 98 | nodename(Name) -> 99 | case string:tokens(Name, "@") of 100 | [_Node, _Host] -> 101 | list_to_atom(Name); 102 | [Node] -> 103 | [_, Host] = string:tokens(atom_to_list(node()), "@"), 104 | list_to_atom(lists:concat([Node, "@", Host])) 105 | end. 106 | 107 | append_node_suffix(Name, Suffix) -> 108 | case string:tokens(Name, "@") of 109 | [Node, Host] -> 110 | list_to_atom(lists:concat([Node, Suffix, os:getpid(), "@", Host])); 111 | [Node] -> 112 | list_to_atom(lists:concat([Node, Suffix, os:getpid()])) 113 | end. 114 | 115 | 116 | %% 117 | %% Given a string or binary, parse it into a list of terms, ala file:consult/0 118 | %% 119 | consult(Str) when is_list(Str) -> 120 | consult([], Str, []); 121 | consult(Bin) when is_binary(Bin)-> 122 | consult([], binary_to_list(Bin), []). 123 | 124 | consult(Cont, Str, Acc) -> 125 | case erl_scan:tokens(Cont, Str, 0) of 126 | {done, Result, Remaining} -> 127 | case Result of 128 | {ok, Tokens, _} -> 129 | {ok, Term} = erl_parse:parse_term(Tokens), 130 | consult([], Remaining, [Term | Acc]); 131 | {eof, _Other} -> 132 | lists:reverse(Acc); 133 | {error, Info, _} -> 134 | {error, Info} 135 | end; 136 | {more, Cont1} -> 137 | consult(Cont1, eof, Acc) 138 | end. 139 | -------------------------------------------------------------------------------- /rel/files/start_erl.cmd: -------------------------------------------------------------------------------- 1 | @setlocal 2 | 3 | @rem Parse arguments. erlsrv.exe prepends erl arguments prior to first ++. 4 | @rem Other args are position dependent. 5 | @set args="%*" 6 | @for /F "delims=++ tokens=1,2,3" %%I in (%args%) do @( 7 | @set erl_args=%%I 8 | @call :set_trim node_name %%J 9 | @call :set_trim node_root %%K 10 | ) 11 | 12 | @set releases_dir=%node_root%\releases 13 | 14 | @rem parse ERTS version and release version from start_erl.dat 15 | @for /F "tokens=1,2" %%I in (%releases_dir%\start_erl.data) do @( 16 | @call :set_trim erts_version %%I 17 | @call :set_trim release_version %%J 18 | ) 19 | 20 | @set erl_exe=%node_root%\erts-%erts_version%\bin\erl.exe 21 | @set boot_file=%releases_dir%\%release_version%\%node_name% 22 | 23 | @if exist %releases_dir%\%release_version%\sys.config ( 24 | @set app_config=%releases_dir%\%release_version%\sys.config 25 | ) else ( 26 | @set app_config=%node_root%\etc\app.config 27 | ) 28 | 29 | @if exist %releases_dir%\%release_version%\vm.args ( 30 | @set vm_args=%releases_dir%\%release_version%\vm.args 31 | ) else ( 32 | @set vm_args=%node_root%\etc\vm.args 33 | ) 34 | 35 | @%erl_exe% %erl_args% -boot %boot_file% -config %app_config% -args_file %vm_args% 36 | 37 | :set_trim 38 | @set %1=%2 39 | @goto :EOF 40 | -------------------------------------------------------------------------------- /rel/files/vm.args: -------------------------------------------------------------------------------- 1 | ## Name of the node 2 | -name erlangsp@127.0.0.1 3 | 4 | ## Cookie for distributed erlang 5 | -setcookie erlangsp 6 | 7 | ## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive 8 | ## (Disabled by default..use with caution!) 9 | ##-heart 10 | 11 | ## Enable kernel poll and a few async threads 12 | ##+K true 13 | ##+A 5 14 | 15 | ## Increase number of concurrent ports/sockets 16 | ##-env ERL_MAX_PORTS 4096 17 | 18 | ## Tweak GC to run more often 19 | ##-env ERL_FULLSWEEP_AFTER 10 20 | -------------------------------------------------------------------------------- /rel/reltool.config: -------------------------------------------------------------------------------- 1 | %%-*- mode: erlang -*- 2 | 3 | {sys, [ 4 | {lib_dirs, [ "../apps", "../apps/examples", "../deps" ]}, 5 | {erts, [{mod_cond, derived}, {app_file, strip}]}, 6 | {app_file, strip}, 7 | {rel, "erlangsp", "0.0.1", 8 | [ 9 | kernel, stdlib, sasl, gs, appmon, 10 | coop, esp_cache, erlangsp 11 | ]}, 12 | {rel, "start_clean", "", [kernel, stdlib]}, 13 | {boot_rel, "erlangsp"}, 14 | {profile, embedded}, 15 | 16 | {incl_cond, exclude}, 17 | {excl_archive_filters, [".*"]}, %% Do not archive built libs 18 | {excl_sys_filters, ["^bin/.*", "^erts.*/bin/(dialyzer|typer)", 19 | "^erts.*/(doc|info|include|lib|man|src)"]}, 20 | {excl_app_filters, ["\.gitignore"]}, 21 | 22 | %% Artifact of having tsung installed in $ERL_TOP 23 | %% {app, tsung, [{incl_cond, exclude}]}, 24 | %% {app, tsung_recorder, [{incl_cond, exclude}]}, 25 | %% {app, tsung_controller, [{incl_cond, exclude}]}, 26 | 27 | %% System libraries 28 | {app, kernel, [{incl_cond, include}]}, 29 | {app, stdlib, [{incl_cond, include}]}, 30 | {app, sasl, [{incl_cond, include}]}, 31 | {app, gs, [{incl_cond, include}]}, 32 | {app, appmon, [{incl_cond, include}]}, 33 | 34 | %% Erlang/SP libraries 35 | {app, coop, [{incl_cond, include}]}, 36 | {app, esp_cache, [{incl_cond, include}]}, 37 | {app, erlangsp, [{incl_cond, include}]} 38 | ]}. 39 | 40 | {target_dir, "erlangsp"}. 41 | 42 | {overlay, [ 43 | {mkdir, "log/sasl"}, 44 | 45 | {copy, "files/erl", "\{\{erts_vsn\}\}/bin/erl"}, 46 | {copy, "files/nodetool", "\{\{erts_vsn\}\}/bin/nodetool"}, 47 | {copy, "files/erlangsp", "bin/erlangsp"}, 48 | 49 | {copy, "files/app.config", "releases/\{\{rel_vsn\}\}/app.config"}, 50 | {copy, "files/erlangsp.cmd", "bin/erlangsp.cmd"}, 51 | {copy, "files/start_erl.cmd", "bin/start_erl.cmd"}, 52 | {copy, "files/vm.args", "releases/\{\{rel_vsn\}\}/vm.args"}, 53 | 54 | {template, "files/vm.args", "etc/vm.args"}, 55 | {template, "files/app.config", "etc/app.config"} 56 | ]}. 57 | --------------------------------------------------------------------------------