├── .gitignore ├── .travis.yml ├── Dockerfiles ├── exp-base └── exp-copy ├── GCLOUD.md ├── LICENSE ├── LOCALKUBE.md ├── Makefile ├── README.md ├── bin ├── bench_metadata.sh ├── bench_micro.sh ├── bench_retwis.sh ├── dash-proxy.sh ├── deploy-exp.sh ├── end_status.sh ├── env ├── fmt.sh ├── g-cluster.sh ├── icde19-exp.sh ├── image.sh ├── k8s_api_server.sh ├── k8s_api_token.sh ├── lsim-dash-deploy.sh ├── redis-deploy.sh ├── redis-sync.erl ├── start-redis-sync.sh ├── status.sh ├── stop.sh └── zipf.py ├── config └── vm.args ├── emulab ├── README.md ├── emulab-list-of-nodes.png ├── emulab.config ├── init-master.sh ├── init-node.sh ├── init.sh ├── killall.sh └── nodes-table ├── evaluation ├── .gitignore ├── Makefile ├── generic.R ├── icde19.tar.gz ├── icde19 │ ├── README.md │ ├── first.R │ ├── gmap.R │ ├── gset_gcounter.R │ ├── memory.R │ ├── metadata.R │ ├── retwis.R │ ├── retwis_processing.R │ └── second.R ├── icde19_revision.tar.gz ├── more_plots │ ├── crdt.R │ ├── plot1.R │ ├── plot4.R │ ├── plot5.R │ ├── processing.R │ └── retwis_latency.R ├── preprocess.py └── util.R ├── include └── exp.hrl ├── priv └── .gitkeep ├── rebar.config ├── rebar.config.script ├── rebar.lock ├── rebar3 ├── src ├── exp.app.src ├── exp_app.erl ├── exp_barrier_peer_service.erl ├── exp_barrier_peer_service_client.erl ├── exp_barrier_peer_service_server.erl ├── exp_config.erl ├── exp_kube_orchestration.erl ├── exp_local_simulations_support.erl ├── exp_orchestration.erl ├── exp_overlay.erl ├── exp_redis_metrics_store.erl ├── exp_resource.erl ├── exp_rsg.erl ├── exp_rsg_master.erl ├── exp_simulation_runner.erl ├── exp_simulations.erl ├── exp_simulations_support.erl ├── exp_sup.erl └── exp_util.erl └── test ├── exp_modes_SUITE.erl └── exp_simulations_SUITE.erl /.gitignore: -------------------------------------------------------------------------------- 1 | TEST-* 2 | _build/ 3 | log/ 4 | *.swo 5 | *.swp 6 | *.dump 7 | *.DS_Store 8 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: false 2 | language: erlang 3 | otp_release: 4 | - 21.1 5 | install: 6 | - make 7 | before_script: 8 | - epmd -daemon 9 | script: 10 | - make test 11 | - make xref 12 | - make dialyzer 13 | - make lint 14 | - rebar3 coveralls send 15 | notifications: 16 | slack: haslab:jhAnACJzswFtYwKXpbZzjbFq 17 | email: false 18 | -------------------------------------------------------------------------------- /Dockerfiles/exp-base: -------------------------------------------------------------------------------- 1 | FROM erlang:20.2.2-slim 2 | 3 | MAINTAINER Vitor Enes 4 | 5 | 6 | RUN apt-get update && apt-get install -y software-properties-common 7 | -------------------------------------------------------------------------------- /Dockerfiles/exp-copy: -------------------------------------------------------------------------------- 1 | FROM vitorenesduarte/exp-base 2 | 3 | MAINTAINER Vitor Enes 4 | 5 | ARG profile=default 6 | 7 | ENV NAME=/exp 8 | 9 | COPY _build/${profile}/rel/$NAME $NAME 10 | COPY bin/zipf.py $NAME/bin 11 | 12 | WORKDIR $NAME 13 | 14 | CMD ["bin/env"] 15 | -------------------------------------------------------------------------------- /GCLOUD.md: -------------------------------------------------------------------------------- 1 | ### Kubernetes on Google Cloud 2 | 3 | - Install `gcloud` [link](https://cloud.google.com/sdk/downloads#apt-get) 4 | ```bash 5 | gcloud init 6 | ``` 7 | - zone: __us-east1-d__ 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | -------------------------------------------------------------------------------- /LOCALKUBE.md: -------------------------------------------------------------------------------- 1 | ### Kubernetes on CentOS 7 2 | 3 | [__[Original link]__](https://kubernetes.io/docs/getting-started-guides/kubeadm/) 4 | 5 | - On all nodes: 6 | 7 | ```bash 8 | vi d.sh 9 | ``` 10 | 11 | ```bash 12 | cat < /etc/yum.repos.d/kubernetes.repo 13 | [kubernetes] 14 | name=Kubernetes 15 | baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64 16 | enabled=1 17 | gpgcheck=1 18 | repo_gpgcheck=1 19 | gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg 20 | https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 21 | EOF 22 | setenforce 0 23 | yum install -y docker kubelet kubeadm kubectl kubernetes-cni 24 | systemctl enable docker && systemctl start docker 25 | systemctl enable kubelet && systemctl start kubelet 26 | ``` 27 | 28 | ```bash 29 | sudo bash d.sh 30 | ``` 31 | 32 | - On master (with public ip __192.168.116.101__): 33 | 1. __Initialize__: 34 | ```bash 35 | sudo kubeadm init --api-advertise-addresses 192.168.116.101 36 | ``` 37 | 2. __Save the output__ (`kube join ...`) 38 | 3. __Install a pod network__ 39 | ```bash 40 | kubectl apply -f https://git.io/weave-kube 41 | ``` 42 | 4. __Copy config file__ 43 | ```bash 44 | sudo cp /etc/kubernetes/admin.conf . 45 | sudo chown centos admin.conf 46 | exit 47 | scp ubuntu@192.168.116.101:admin.conf . 48 | ``` 49 | 50 | - On the other nodes: 51 | 1. __Join cluster__: 52 | ```bash 53 | kube join ... 54 | ``` 55 | 56 | - On your machine: 57 | ```bash 58 | kubectl --kubeconfig ./admin.conf get nodes 59 | ``` 60 | 61 | 62 | #### Dashboard 63 | To access the [dashboard](https://github.com/kubernetes/dashboard), 64 | first on your master: 65 | 66 | ```bash 67 | kubectl create -f https://rawgit.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml 68 | ``` 69 | 70 | And on your machine: 71 | ```bash 72 | kubectl proxy 73 | ``` 74 | 75 | Now the dashboard is available at [http://localhost:8001/ui](http://localhost:8001/ui). 76 | 77 | 78 | ##### Dashboard metrics 79 | For metrics we need to install [Heapster](https://github.com/kubernetes/heapster/). On your master: 80 | 81 | ```bash 82 | sudo yum install git 83 | git clone https://github.com/kubernetes/heapster 84 | cd heapster/ 85 | kubectl create -f deploy/kube-config/influxdb/ 86 | ``` 87 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PACKAGE ?= exp 2 | REBAR = $(shell pwd)/rebar3 3 | 4 | .PHONY: test rel 5 | 6 | all: compile 7 | 8 | compile: 9 | $(REBAR) compile 10 | 11 | check: test xref dialyzer lint 12 | 13 | test: ct eunit 14 | ${REBAR} cover -v 15 | 16 | lint: erl-lint 17 | 18 | erl-lint: 19 | ${REBAR} as lint lint 20 | 21 | shell-lint: 22 | ls -d bin/* | grep -v ".erl" | xargs shellcheck 23 | 24 | docker-lint: 25 | for f in $$(ls -d Dockerfiles/*); do dockerlint $$f; done 26 | 27 | eunit: 28 | ${REBAR} eunit 29 | 30 | ct: 31 | ${REBAR} ct --readable=false --verbose 32 | 33 | xref: 34 | ${REBAR} xref skip_deps=true 35 | 36 | dialyzer: 37 | ${REBAR} dialyzer 38 | 39 | cover: test 40 | open _build/test/cover/index.html 41 | 42 | shell: 43 | ${REBAR} shell --apps ${PACKAGE} 44 | 45 | rel: 46 | rm -rf _build/default/rel/ 47 | ${REBAR} release 48 | 49 | modes: 50 | pkill -9 beam.smp ; rm -rf priv/lager ; ${REBAR} ct --readable=false --verbose --suite exp_modes_SUITE 51 | 52 | simulations: 53 | pkill -9 beam.smp ; rm -rf priv/lager ; ${REBAR} ct --readable=false --verbose --suite exp_simulations_SUITE 54 | 55 | logs: 56 | tail -F priv/lager/*/log/*.log 57 | 58 | run: 59 | _build/default/rel/${PACKAGE}/bin/env 60 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # exp 2 | 3 | [![Build Status](https://img.shields.io/travis/vitorenesduarte/exp/master.svg)](https://travis-ci.org/vitorenesduarte/exp) 4 | [![Coverage Status](https://img.shields.io/coveralls/github/vitorenesduarte/exp/master.svg?maxAge=60)](https://coveralls.io/github/vitorenesduarte/exp?branch=master) 5 | 6 | 7 | This is the source code for our [ICDE 2019](https://vitorenes.org/publication/enes-efficient-synchronization/enes-efficient-synchronization.pdf) paper. 8 | 9 | #### Experiments 10 | 11 | - __LDB_MODE__: 12 | - `state_based` 13 | - `delta_based` 14 | - `scuttlebutt` 15 | - `op_based` 16 | - __LDB_STATE_SYNC_INTERVAL__: milliseconds 17 | - __LDB_REDUNDANT_DGROUPS__: boolean 18 | - __LDB_DGROUP_BACK_PROPAGATION__: boolean 19 | - __LDB_SCUTTLEBUTT_GC__: boolean 20 | - __LDB_OP_II__: boolean 21 | - __NODE_NUMBER__: number of nodes 22 | - __OVERLAY__: 23 | - `fullmesh` 24 | - `line` 25 | - `ring` 26 | - `partialmesh` 27 | - `tree` 28 | - __SIMULATION__: 29 | - `gcounter` 30 | - `gset` 31 | - `gmap` 32 | - `retwis` 33 | - __NODE_EVENT_NUMBER__: number of events to be performed in 34 | the simulation 35 | - __EVENT_INTERVAL__: milliseconds between events 36 | - __GMAP_SIMULATION_KEY_PERCENTAGE__: percentage of keys update at each event in the gmap simulation 37 | - __RETWIS_ZIPF__: Zipf coefficient to be used in 38 | the Retwis application 39 | 40 | #### Run the experiments 41 | Please check [here](evaluation/icde19). 42 | 43 | 44 | 80 | -------------------------------------------------------------------------------- /bin/bench_metadata.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPS=1 4 | DIR=$(dirname "$0") 5 | PULL_IMAGE=Always 6 | 7 | # start redis 8 | "${DIR}"/redis-deploy.sh 9 | 10 | # ensures each node only 11 | # has one pod running (if nodes have 8 CPU) 12 | CPU=3 13 | 14 | # event number, event interval, state sync interval 15 | SPEED_CONFIG_=( 16 | "100 1000 1000" 17 | ) 18 | 19 | # overlay nodes 20 | OVERLAY_CONFIG_=( 21 | "partialmesh 16" 22 | "partialmesh 32" 23 | "partialmesh 64" 24 | ) 25 | 26 | # exp configuration retwis_zipf 27 | SIM_CONFIG_=( 28 | "gset 0 0" 29 | ) 30 | 31 | # ldb configuration 32 | # mode bp rr gc ii 33 | LDB_=( 34 | "op_based undefined undefined undefined false" 35 | "op_based undefined undefined undefined true" 36 | "scuttlebutt undefined undefined false undefined" 37 | "scuttlebutt undefined undefined true undefined" 38 | "delta_based true true undefined undefined" 39 | ) 40 | 41 | # number of experiments 42 | NEXP=$((${#OVERLAY_CONFIG_[@]} * ${#SIM_CONFIG_[@]} * ${#SPEED_CONFIG_[@]} * ${#LDB_[@]})) 43 | EXP=1 44 | 45 | echo "Found ${NEXP} configurations. Let's start!" 46 | 47 | # shellcheck disable=SC2034 48 | for REP in $(seq 1 $REPS); do 49 | for SPEED_CONFIG in "${SPEED_CONFIG_[@]}"; do 50 | SPEED_CONFIG=($(echo ${SPEED_CONFIG} | tr ' ' '\n')) 51 | NODE_EVENT_NUMBER=${SPEED_CONFIG[0]} 52 | EVENT_INTERVAL=${SPEED_CONFIG[1]} 53 | LDB_STATE_SYNC_INTERVAL=${SPEED_CONFIG[2]} 54 | 55 | for OVERLAY_CONFIG in "${OVERLAY_CONFIG_[@]}"; do 56 | OVERLAY_CONFIG=($(echo ${OVERLAY_CONFIG} | tr ' ' '\n')) 57 | OVERLAY=${OVERLAY_CONFIG[0]} 58 | NODE_NUMBER=${OVERLAY_CONFIG[1]} 59 | 60 | for SIM_CONFIG in "${SIM_CONFIG_[@]}"; do 61 | SIM_CONFIG=($(echo ${SIM_CONFIG} | tr ' ' '\n')) 62 | SIMULATION=${SIM_CONFIG[0]} 63 | GMAP_SIMULATION_KEY_PERCENTAGE=${SIM_CONFIG[1]} 64 | RETWIS_ZIPF=${SIM_CONFIG[2]} 65 | 66 | for LDB in "${LDB_[@]}"; do 67 | LDB=($(echo ${LDB} | tr ' ' '\n')) 68 | LDB_MODE=${LDB[0]} 69 | LDB_DGROUP_BACK_PROPAGATION=${LDB[1]} 70 | LDB_REDUNDANT_DGROUPS=${LDB[2]} 71 | LDB_SCUTTLEBUTT_GC=${LDB[3]} 72 | LDB_OP_II=${LDB[4]} 73 | 74 | IMAGE=${IMAGE} \ 75 | PULL_IMAGE=${PULL_IMAGE} \ 76 | LDB_MODE=${LDB_MODE} \ 77 | LDB_STATE_SYNC_INTERVAL=${LDB_STATE_SYNC_INTERVAL} \ 78 | LDB_DGROUP_BACK_PROPAGATION=${LDB_DGROUP_BACK_PROPAGATION} \ 79 | LDB_REDUNDANT_DGROUPS=${LDB_REDUNDANT_DGROUPS} \ 80 | LDB_SCUTTLEBUTT_GC=${LDB_SCUTTLEBUTT_GC} \ 81 | LDB_OP_II=${LDB_OP_II} \ 82 | OVERLAY=${OVERLAY} \ 83 | SIMULATION=${SIMULATION} \ 84 | GMAP_SIMULATION_KEY_PERCENTAGE=${GMAP_SIMULATION_KEY_PERCENTAGE} \ 85 | RETWIS_ZIPF=${RETWIS_ZIPF} \ 86 | NODE_NUMBER=${NODE_NUMBER} \ 87 | NODE_EVENT_NUMBER=${NODE_EVENT_NUMBER} \ 88 | EVENT_INTERVAL=${EVENT_INTERVAL} \ 89 | CPU=${CPU} "${DIR}"/deploy-exp.sh 90 | 91 | echo "[$(date +%T)] ${EXP} of ${NEXP} ended!" 92 | EXP=$((EXP + 1)) 93 | done 94 | done 95 | done 96 | done 97 | done 98 | -------------------------------------------------------------------------------- /bin/bench_micro.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPS=1 4 | DIR=$(dirname "$0") 5 | PULL_IMAGE=Always 6 | 7 | # start redis 8 | "${DIR}"/redis-deploy.sh 9 | 10 | # ensures each node only 11 | # has one pod running (if nodes have 8 CPU) 12 | CPU=7 13 | 14 | # event number, event interval, state sync interval 15 | SPEED_CONFIG_=( 16 | "100 1000 1000" 17 | ) 18 | 19 | # overlay nodes 20 | OVERLAY_CONFIG_=( 21 | "partialmesh 15" 22 | "tree 15" 23 | ) 24 | 25 | # exp configuration retwis_zipf 26 | SIM_CONFIG_=( 27 | "gset 0 0" 28 | "gcounter 0 0" 29 | "gmap 10 0" 30 | "gmap 30 0" 31 | "gmap 60 0" 32 | "gmap 100 0" 33 | ) 34 | 35 | # ldb configuration 36 | # mode bp rr gc ii 37 | LDB_=( 38 | "state_based undefined undefined undefined undefined" 39 | "op_based undefined undefined undefined false" 40 | "op_based undefined undefined undefined true" 41 | "scuttlebutt undefined undefined false undefined" 42 | "scuttlebutt undefined undefined true undefined" 43 | "delta_based false false undefined undefined" 44 | "delta_based true false undefined undefined" 45 | "delta_based false true undefined undefined" 46 | "delta_based true true undefined undefined" 47 | ) 48 | 49 | # number of experiments 50 | NEXP=$((${#OVERLAY_CONFIG_[@]} * ${#SIM_CONFIG_[@]} * ${#SPEED_CONFIG_[@]} * ${#LDB_[@]})) 51 | EXP=1 52 | 53 | echo "Found ${NEXP} configurations. Let's start!" 54 | 55 | # shellcheck disable=SC2034 56 | for REP in $(seq 1 $REPS); do 57 | for SPEED_CONFIG in "${SPEED_CONFIG_[@]}"; do 58 | SPEED_CONFIG=($(echo ${SPEED_CONFIG} | tr ' ' '\n')) 59 | NODE_EVENT_NUMBER=${SPEED_CONFIG[0]} 60 | EVENT_INTERVAL=${SPEED_CONFIG[1]} 61 | LDB_STATE_SYNC_INTERVAL=${SPEED_CONFIG[2]} 62 | 63 | for OVERLAY_CONFIG in "${OVERLAY_CONFIG_[@]}"; do 64 | OVERLAY_CONFIG=($(echo ${OVERLAY_CONFIG} | tr ' ' '\n')) 65 | OVERLAY=${OVERLAY_CONFIG[0]} 66 | NODE_NUMBER=${OVERLAY_CONFIG[1]} 67 | 68 | for SIM_CONFIG in "${SIM_CONFIG_[@]}"; do 69 | SIM_CONFIG=($(echo ${SIM_CONFIG} | tr ' ' '\n')) 70 | SIMULATION=${SIM_CONFIG[0]} 71 | GMAP_SIMULATION_KEY_PERCENTAGE=${SIM_CONFIG[1]} 72 | RETWIS_ZIPF=${SIM_CONFIG[2]} 73 | 74 | for LDB in "${LDB_[@]}"; do 75 | LDB=($(echo ${LDB} | tr ' ' '\n')) 76 | LDB_MODE=${LDB[0]} 77 | LDB_DGROUP_BACK_PROPAGATION=${LDB[1]} 78 | LDB_REDUNDANT_DGROUPS=${LDB[2]} 79 | LDB_SCUTTLEBUTT_GC=${LDB[3]} 80 | LDB_OP_II=${LDB[4]} 81 | 82 | IMAGE=${IMAGE} \ 83 | PULL_IMAGE=${PULL_IMAGE} \ 84 | LDB_MODE=${LDB_MODE} \ 85 | LDB_STATE_SYNC_INTERVAL=${LDB_STATE_SYNC_INTERVAL} \ 86 | LDB_DGROUP_BACK_PROPAGATION=${LDB_DGROUP_BACK_PROPAGATION} \ 87 | LDB_REDUNDANT_DGROUPS=${LDB_REDUNDANT_DGROUPS} \ 88 | LDB_SCUTTLEBUTT_GC=${LDB_SCUTTLEBUTT_GC} \ 89 | LDB_OP_II=${LDB_OP_II} \ 90 | OVERLAY=${OVERLAY} \ 91 | SIMULATION=${SIMULATION} \ 92 | GMAP_SIMULATION_KEY_PERCENTAGE=${GMAP_SIMULATION_KEY_PERCENTAGE} \ 93 | RETWIS_ZIPF=${RETWIS_ZIPF} \ 94 | NODE_NUMBER=${NODE_NUMBER} \ 95 | NODE_EVENT_NUMBER=${NODE_EVENT_NUMBER} \ 96 | EVENT_INTERVAL=${EVENT_INTERVAL} \ 97 | CPU=${CPU} "${DIR}"/deploy-exp.sh 98 | 99 | echo "[$(date +%T)] ${EXP} of ${NEXP} ended!" 100 | EXP=$((EXP + 1)) 101 | done 102 | done 103 | done 104 | done 105 | done 106 | -------------------------------------------------------------------------------- /bin/bench_retwis.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPS=1 4 | DIR=$(dirname "$0") 5 | PULL_IMAGE=Always 6 | 7 | # start redis 8 | "${DIR}"/redis-deploy.sh 9 | 10 | # ensures each node only 11 | # has one pod running (if nodes have 8 CPU) 12 | CPU=7 13 | 14 | # event number, event interval, state sync interval 15 | SPEED_CONFIG_=( 16 | "100 1000 1000" 17 | ) 18 | 19 | # overlay nodes 20 | OVERLAY_CONFIG_=( 21 | "partialmesh 50" 22 | ) 23 | 24 | # exp configuration retwis_zipf 25 | SIM_CONFIG_=( 26 | "retwis 0 25" 27 | "retwis 0 50" 28 | "retwis 0 75" 29 | "retwis 0 100" 30 | "retwis 0 125" 31 | "retwis 0 150" 32 | ) 33 | 34 | # ldb configuration 35 | # mode bp rr gc ii 36 | LDB_=( 37 | "delta_based false false undefined undefined" 38 | "delta_based true true undefined undefined" 39 | ) 40 | 41 | # number of experiments 42 | NEXP=$((${#OVERLAY_CONFIG_[@]} * ${#SIM_CONFIG_[@]} * ${#SPEED_CONFIG_[@]} * ${#LDB_[@]})) 43 | EXP=1 44 | 45 | echo "Found ${NEXP} configurations. Let's start!" 46 | 47 | # shellcheck disable=SC2034 48 | for REP in $(seq 1 $REPS); do 49 | for SPEED_CONFIG in "${SPEED_CONFIG_[@]}"; do 50 | SPEED_CONFIG=($(echo ${SPEED_CONFIG} | tr ' ' '\n')) 51 | NODE_EVENT_NUMBER=${SPEED_CONFIG[0]} 52 | EVENT_INTERVAL=${SPEED_CONFIG[1]} 53 | LDB_STATE_SYNC_INTERVAL=${SPEED_CONFIG[2]} 54 | 55 | for OVERLAY_CONFIG in "${OVERLAY_CONFIG_[@]}"; do 56 | OVERLAY_CONFIG=($(echo ${OVERLAY_CONFIG} | tr ' ' '\n')) 57 | OVERLAY=${OVERLAY_CONFIG[0]} 58 | NODE_NUMBER=${OVERLAY_CONFIG[1]} 59 | 60 | for SIM_CONFIG in "${SIM_CONFIG_[@]}"; do 61 | SIM_CONFIG=($(echo ${SIM_CONFIG} | tr ' ' '\n')) 62 | SIMULATION=${SIM_CONFIG[0]} 63 | GMAP_SIMULATION_KEY_PERCENTAGE=${SIM_CONFIG[1]} 64 | RETWIS_ZIPF=${SIM_CONFIG[2]} 65 | 66 | for LDB in "${LDB_[@]}"; do 67 | LDB=($(echo ${LDB} | tr ' ' '\n')) 68 | LDB_MODE=${LDB[0]} 69 | LDB_DGROUP_BACK_PROPAGATION=${LDB[1]} 70 | LDB_REDUNDANT_DGROUPS=${LDB[2]} 71 | LDB_SCUTTLEBUTT_GC=${LDB[3]} 72 | LDB_OP_II=${LDB[4]} 73 | 74 | IMAGE=${IMAGE} \ 75 | PULL_IMAGE=${PULL_IMAGE} \ 76 | LDB_MODE=${LDB_MODE} \ 77 | LDB_STATE_SYNC_INTERVAL=${LDB_STATE_SYNC_INTERVAL} \ 78 | LDB_DGROUP_BACK_PROPAGATION=${LDB_DGROUP_BACK_PROPAGATION} \ 79 | LDB_REDUNDANT_DGROUPS=${LDB_REDUNDANT_DGROUPS} \ 80 | LDB_SCUTTLEBUTT_GC=${LDB_SCUTTLEBUTT_GC} \ 81 | LDB_OP_II=${LDB_OP_II} \ 82 | OVERLAY=${OVERLAY} \ 83 | SIMULATION=${SIMULATION} \ 84 | GMAP_SIMULATION_KEY_PERCENTAGE=${GMAP_SIMULATION_KEY_PERCENTAGE} \ 85 | RETWIS_ZIPF=${RETWIS_ZIPF} \ 86 | NODE_NUMBER=${NODE_NUMBER} \ 87 | NODE_EVENT_NUMBER=${NODE_EVENT_NUMBER} \ 88 | EVENT_INTERVAL=${EVENT_INTERVAL} \ 89 | CPU=${CPU} "${DIR}"/deploy-exp.sh 90 | 91 | echo "[$(date +%T)] ${EXP} of ${NEXP} ended!" 92 | EXP=$((EXP + 1)) 93 | done 94 | done 95 | done 96 | done 97 | done 98 | -------------------------------------------------------------------------------- /bin/dash-proxy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | POD_NAME=$(kubectl get pods | 4 | grep lsim-dash | 5 | grep Running | 6 | awk '{print $1}') 7 | 8 | POD_PORT=3000 9 | PORT=$RANDOM 10 | open "http://localhost:${PORT}" 11 | kubectl port-forward "${POD_NAME}" ${PORT}:${POD_PORT} 12 | -------------------------------------------------------------------------------- /bin/deploy-exp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DIR=$(dirname "$0") 4 | 5 | ENV_VARS=( 6 | IMAGE 7 | PULL_IMAGE 8 | LDB_MODE 9 | LDB_STATE_SYNC_INTERVAL 10 | LDB_REDUNDANT_DGROUPS 11 | LDB_DGROUP_BACK_PROPAGATION 12 | LDB_SCUTTLEBUTT_GC 13 | LDB_OP_II 14 | OVERLAY 15 | SIMULATION 16 | GMAP_SIMULATION_KEY_PERCENTAGE 17 | RETWIS_ZIPF 18 | NODE_NUMBER 19 | NODE_EVENT_NUMBER 20 | EVENT_INTERVAL 21 | CPU 22 | ) 23 | 24 | for ENV_VAR in "${ENV_VARS[@]}"; do 25 | if [ -z "${!ENV_VAR}" ]; then 26 | echo ">>> ${ENV_VAR} is not configured; please export it." 27 | exit 1 28 | fi 29 | done 30 | 31 | echo "[$(date +%T)] Configuration: " 32 | echo " IMAGE: ${IMAGE}" 33 | echo " PULL_IMAGE: ${PULL_IMAGE}" 34 | echo " LDB_MODE: ${LDB_MODE}" 35 | echo " LDB_STATE_SYNC_INTERVAL: ${LDB_STATE_SYNC_INTERVAL}" 36 | echo " LDB_REDUNDANT_DGROUPS: ${LDB_REDUNDANT_DGROUPS}" 37 | echo " LDB_DGROUP_BACK_PROPAGATION: ${LDB_DGROUP_BACK_PROPAGATION}" 38 | echo " LDB_SCUTTLEBUTT_GC: ${LDB_SCUTTLEBUTT_GC}" 39 | echo " LDB_OP_II: ${LDB_OP_II}" 40 | echo " OVERLAY: ${OVERLAY}" 41 | echo " SIMULATION: ${SIMULATION}" 42 | echo " GMAP_SIMULATION_KEY_PERCENTAGE: ${GMAP_SIMULATION_KEY_PERCENTAGE}" 43 | echo " RETWIS_ZIPF: ${RETWIS_ZIPF}" 44 | echo " NODE_NUMBER: ${NODE_NUMBER}" 45 | echo " NODE_EVENT_NUMBER: ${NODE_EVENT_NUMBER}" 46 | echo " EVENT_INTERVAL: ${EVENT_INTERVAL}" 47 | echo " CPU: ${CPU}" 48 | 49 | # ENV SETUP: 50 | # Kubernetes server and auth token 51 | APISERVER=$(bin/k8s_api_server.sh) 52 | TOKEN=$(bin/k8s_api_token.sh) 53 | 54 | ORCHESTRATION=kubernetes 55 | METRICS_STORE=redis 56 | 57 | # Port 58 | PEER_PORT=6866 59 | 60 | init_exp() { 61 | 62 | # Evaluation timestamp: unix timestamp + random 63 | R=$(echo $RANDOM + 10000 | bc) 64 | TIMESTAMP=$(date +%s)${R} 65 | 66 | # DEPLOYMENT: 67 | # Deployment names 68 | RSG_NAME=rsg-${TIMESTAMP} 69 | EXP_NAME=exp-${TIMESTAMP} 70 | 71 | # YAML file 72 | FILE=/tmp/${TIMESTAMP}.yaml 73 | 74 | cat <"${FILE}" 75 | apiVersion: extensions/v1beta1 76 | kind: Deployment 77 | metadata: 78 | name: "${RSG_NAME}" 79 | spec: 80 | replicas: 1 81 | template: 82 | metadata: 83 | labels: 84 | timestamp: "${TIMESTAMP}" 85 | tag: rsg 86 | spec: 87 | containers: 88 | - name: "${RSG_NAME}" 89 | image: "${IMAGE}" 90 | imagePullPolicy: "${PULL_IMAGE}" 91 | env: 92 | - name: ERL_MAX_PORTS 93 | value: "10000" 94 | - name: ORCHESTRATION 95 | value: "${ORCHESTRATION}" 96 | - name: METRICS_STORE 97 | value: "${METRICS_STORE}" 98 | - name: LDB_IP 99 | valueFrom: 100 | fieldRef: 101 | fieldPath: status.podIP 102 | - name: APISERVER 103 | value: "${APISERVER}" 104 | - name: TOKEN 105 | value: "${TOKEN}" 106 | - name: TIMESTAMP 107 | value: "${TIMESTAMP}" 108 | - name: LDB_MODE 109 | value: "${LDB_MODE}" 110 | - name: LDB_STATE_SYNC_INTERVAL 111 | value: "${LDB_STATE_SYNC_INTERVAL}" 112 | - name: LDB_REDUNDANT_DGROUPS 113 | value: "${LDB_REDUNDANT_DGROUPS}" 114 | - name: LDB_DGROUP_BACK_PROPAGATION 115 | value: "${LDB_DGROUP_BACK_PROPAGATION}" 116 | - name: LDB_SCUTTLEBUTT_GC 117 | value: "${LDB_SCUTTLEBUTT_GC}" 118 | - name: LDB_OP_II 119 | value: "${LDB_OP_II}" 120 | - name: OVERLAY 121 | value: "${OVERLAY}" 122 | - name: SIMULATION 123 | value: "${SIMULATION}" 124 | - name: GMAP_SIMULATION_KEY_PERCENTAGE 125 | value: "${GMAP_SIMULATION_KEY_PERCENTAGE}" 126 | - name: RETWIS_ZIPF 127 | value: "${RETWIS_ZIPF}" 128 | - name: NODE_NUMBER 129 | value: "${NODE_NUMBER}" 130 | - name: NODE_EVENT_NUMBER 131 | value: "${NODE_EVENT_NUMBER}" 132 | - name: EVENT_INTERVAL 133 | value: "${EVENT_INTERVAL}" 134 | - name: RSG 135 | value: "true" 136 | --- 137 | apiVersion: extensions/v1beta1 138 | kind: Deployment 139 | metadata: 140 | name: "${EXP_NAME}" 141 | spec: 142 | replicas: ${NODE_NUMBER} 143 | template: 144 | metadata: 145 | labels: 146 | timestamp: "${TIMESTAMP}" 147 | tag: exp 148 | spec: 149 | containers: 150 | - name: "${EXP_NAME}" 151 | image: "${IMAGE}" 152 | imagePullPolicy: "${PULL_IMAGE}" 153 | resources: 154 | requests: 155 | cpu: "${CPU}" 156 | securityContext: 157 | privileged: true 158 | env: 159 | - name: ERL_MAX_PORTS 160 | value: "10000" 161 | - name: ORCHESTRATION 162 | value: "${ORCHESTRATION}" 163 | - name: METRICS_STORE 164 | value: "${METRICS_STORE}" 165 | - name: LDB_IP 166 | valueFrom: 167 | fieldRef: 168 | fieldPath: status.podIP 169 | - name: LDB_PORT 170 | value: "6866" 171 | - name: APISERVER 172 | value: "${APISERVER}" 173 | - name: TOKEN 174 | value: "${TOKEN}" 175 | - name: TIMESTAMP 176 | value: "${TIMESTAMP}" 177 | - name: LDB_MODE 178 | value: "${LDB_MODE}" 179 | - name: LDB_STATE_SYNC_INTERVAL 180 | value: "${LDB_STATE_SYNC_INTERVAL}" 181 | - name: LDB_REDUNDANT_DGROUPS 182 | value: "${LDB_REDUNDANT_DGROUPS}" 183 | - name: LDB_DGROUP_BACK_PROPAGATION 184 | value: "${LDB_DGROUP_BACK_PROPAGATION}" 185 | - name: LDB_SCUTTLEBUTT_GC 186 | value: "${LDB_SCUTTLEBUTT_GC}" 187 | - name: LDB_OP_II 188 | value: "${LDB_OP_II}" 189 | - name: OVERLAY 190 | value: "${OVERLAY}" 191 | - name: SIMULATION 192 | value: "${SIMULATION}" 193 | - name: GMAP_SIMULATION_KEY_PERCENTAGE 194 | value: "${GMAP_SIMULATION_KEY_PERCENTAGE}" 195 | - name: RETWIS_ZIPF 196 | value: "${RETWIS_ZIPF}" 197 | - name: NODE_NUMBER 198 | value: "${NODE_NUMBER}" 199 | - name: NODE_EVENT_NUMBER 200 | value: "${NODE_EVENT_NUMBER}" 201 | - name: EVENT_INTERVAL 202 | value: "${EVENT_INTERVAL}" 203 | - name: RSG 204 | value: "false" 205 | EOF 206 | 207 | kubectl create -f "${FILE}" 208 | sleep 3 209 | 210 | while [ $(kubectl get pods 2>/dev/null | grep exp- | grep Running | wc -l) -ne ${NODE_NUMBER} ]; do 211 | echo "nodes are not up yet..." 212 | sleep 3 213 | done 214 | echo "nodes are up!" 215 | 216 | # event number * event interval 217 | # - multiply by 5 (worst case; should never happen) 218 | START_TIME=$(date +%s) 219 | MAX_EXPECTED_TIME=$((5 * ${NODE_EVENT_NUMBER} * ${EVENT_INTERVAL} / 1000)) 220 | 221 | } 222 | 223 | # initialize all needed variables 224 | init_exp 225 | 226 | # wait until the end of the experiment 227 | while [ $(kubectl get pods -l timestamp=${TIMESTAMP} 2>/dev/null | wc -l) -gt 0 ]; do 228 | sleep 3 229 | 230 | MAX_TIME=$((${START_TIME} + ${MAX_EXPECTED_TIME})) 231 | CURRENT_TIME=$(date +%s) 232 | 233 | # maybe restart, if running for too long 234 | if [ ${CURRENT_TIME} -gt ${MAX_TIME} ]; then 235 | kubectl delete -f "${FILE}" 236 | while [ $(kubectl get pods -l timestamp=${TIMESTAMP} 2>/dev/null | wc -l) -gt 0 ]; do 237 | sleep 1 238 | done 239 | init_exp 240 | fi 241 | done 242 | 243 | # fetch logs from redis 244 | ${DIR}/start-redis-sync.sh 245 | 246 | echo "[$(date +%T)] Done!" 247 | -------------------------------------------------------------------------------- /bin/end_status.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | MAX_LINES=50 4 | 5 | show_pod() { 6 | pod=$1 7 | status=$(kubectl logs ${pod} --since=10s | 8 | grep -Eo "Events observed [0-9]+" | 9 | tail -n 1 | 10 | awk '{ print $3 }') 11 | echo "${pod}: ${status}" 12 | } 13 | 14 | pods=$(kubectl get pods --no-headers | 15 | grep Running | 16 | grep exp- | 17 | sort -R | 18 | head -n ${MAX_LINES} | 19 | awk '{ print $1 }') 20 | 21 | for pod in ${pods[@]}; do 22 | show_pod ${pod} & 23 | done 24 | wait 25 | -------------------------------------------------------------------------------- /bin/env: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # LDB_IP 4 | if [ -z "${LDB_IP}" ]; then 5 | export LDB_IP=127.0.0.1 6 | fi 7 | 8 | # Peer Port 9 | if [ -z "${LDB_PORT}" ]; then 10 | export LDB_PORT=6866 11 | fi 12 | 13 | export NODE_NAME=exp-${LDB_PORT}@${LDB_IP} 14 | 15 | if [ -z "${COOKIE}" ]; then 16 | export COOKIE=exp 17 | fi 18 | 19 | export RELX_REPLACE_OS_VARS=true 20 | 21 | echo "LDB_IP: ${LDB_IP}" 22 | echo "LDB_PORT: ${LDB_PORT}" 23 | echo "NODE_NAME: ${NODE_NAME}" 24 | 25 | RELNAME=$(dirname "$0")/exp 26 | exec "${RELNAME}" foreground "$@" 27 | -------------------------------------------------------------------------------- /bin/fmt.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install https://github.com/mvdan/sh 4 | 5 | DIR=$(dirname "${BASH_SOURCE[0]}") 6 | 7 | for f in $(ls -d ${DIR}/*.sh); do 8 | shfmt -i 4 -w ${f} 9 | done 10 | -------------------------------------------------------------------------------- /bin/g-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DIR=$(dirname "$0") 4 | 5 | NUM_NODES=3 6 | ZONE=europe-north1-b 7 | NAME=exp 8 | MACHINE=n1-standard-8 9 | MACHINE=n1-standard-1 10 | VERSION=1.9.7-gke.6 11 | 12 | if [ "$1" = "start" ]; then 13 | 14 | # create the cluster 15 | gcloud container clusters \ 16 | create ${NAME} \ 17 | --zone=${ZONE} \ 18 | --num-nodes=${NUM_NODES} \ 19 | --machine-type=${MACHINE} \ 20 | --cluster-version ${VERSION} \ 21 | --preemptible 22 | 23 | elif [ "$1" = "stop" ]; then 24 | 25 | # delete the cluster 26 | yes | gcloud container clusters \ 27 | delete ${NAME} 28 | 29 | fi 30 | -------------------------------------------------------------------------------- /bin/icde19-exp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DIR=$(dirname "$0") 4 | 5 | IMAGE=vitorenesduarte/exp-copy:latest 6 | 7 | IMAGE=${IMAGE} ${DIR}/bench_micro.sh 8 | IMAGE=${IMAGE} ${DIR}/bench_metadata.sh 9 | IMAGE=${IMAGE} ${DIR}/bench_retwis.sh 10 | -------------------------------------------------------------------------------- /bin/image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ -z "$1" ]; then 4 | TAG=latest 5 | else 6 | TAG="$1" 7 | fi 8 | 9 | DIR=$(dirname "$0") 10 | IMAGE=vitorenesduarte/exp-copy:${TAG} 11 | DOCKERFILE=${DIR}/../Dockerfiles/exp-copy 12 | 13 | # release vcd 14 | cd "${DIR}"/.. && make rel && cd - 15 | 16 | # build image 17 | docker build \ 18 | --no-cache \ 19 | -t "${IMAGE}" \ 20 | -f "${DOCKERFILE}" . 21 | 22 | # push image 23 | docker push "${IMAGE}" 24 | -------------------------------------------------------------------------------- /bin/k8s_api_server.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function get_api_server() { 4 | ### 5 | # Return the API server of the current context. 6 | ### 7 | 8 | kubectl config view --minify=true | grep server | awk '{ print $2 }' 9 | } 10 | 11 | get_api_server 12 | -------------------------------------------------------------------------------- /bin/k8s_api_token.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function get_api_token() { 4 | ### 5 | # Return the API server token of the current context. 6 | ### 7 | 8 | local _secret=$(kubectl get secrets | grep default | awk '{ print $1 }') 9 | kubectl describe secret ${_secret} | grep -E "^token" | awk '{ print $2 }' 10 | } 11 | 12 | get_api_token 13 | -------------------------------------------------------------------------------- /bin/lsim-dash-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | RUNNING=$(kubectl get pods | 4 | grep lsim-dash | 5 | grep Running) 6 | 7 | if [ ! -z "$RUNNING" ]; then 8 | echo "[$(date +%T)] lsim-dash already running. Exiting." 9 | exit 10 | fi 11 | 12 | # ENV SETUP: 13 | # Kubernetes server and auth token 14 | APISERVER=$(bin/k8s_api_server.sh) 15 | TOKEN=$(bin/k8s_api_token.sh) 16 | 17 | # YAML file 18 | FILE=/tmp/lsim-dash.yaml 19 | 20 | cat <${FILE} 21 | apiVersion: extensions/v1beta1 22 | kind: Deployment 23 | metadata: 24 | name: lsim-dash 25 | spec: 26 | replicas: 1 27 | template: 28 | metadata: 29 | labels: 30 | tag: lsim-dash 31 | spec: 32 | containers: 33 | - name: lsim-dash 34 | image: vitorenesduarte/lsim-dash 35 | imagePullPolicy: IfNotPresent 36 | env: 37 | - name: APISERVER 38 | value: "${APISERVER}" 39 | - name: TOKEN 40 | value: "${TOKEN}" 41 | EOF 42 | 43 | kubectl create -f ${FILE} 44 | -------------------------------------------------------------------------------- /bin/redis-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | RUNNING=$(kubectl get pods | 4 | grep redis | 5 | grep Running) 6 | 7 | if [ ! -z "$RUNNING" ]; then 8 | echo "[$(date +%T)] Redis already running. Exiting." 9 | exit 10 | fi 11 | 12 | # YAML file 13 | FILE=/tmp/redis.yaml 14 | 15 | cat <${FILE} 16 | apiVersion: extensions/v1beta1 17 | kind: Deployment 18 | metadata: 19 | name: redis 20 | spec: 21 | replicas: 1 22 | template: 23 | metadata: 24 | labels: 25 | tag: redis 26 | spec: 27 | containers: 28 | - name: redis 29 | image: redis 30 | imagePullPolicy: IfNotPresent 31 | EOF 32 | 33 | kubectl create -f ${FILE} 34 | 35 | while [ $(kubectl get pods 2>/dev/null | grep redis | grep Running | wc -l) -eq 0 ]; do 36 | echo "redis is not up yet..." 37 | sleep 3 38 | done 39 | -------------------------------------------------------------------------------- /bin/redis-sync.erl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env escript 2 | 3 | %%! -pa _build/default/lib/eredis/ebin/ 4 | 5 | main(_) -> 6 | %% connect to redis 7 | redis(connect), 8 | 9 | %% get all the redis keys 10 | Keys = redis(fetch_keys), 11 | io:format("Found ~p keys!~n", [length(Keys)]), 12 | 13 | %% get all non-existing keys 14 | NonExisting = [Key || Key <- Keys, not file(exists, Key)], 15 | KeyNumber = length(NonExisting), 16 | io:format("Non-existing keys: ~p~n", [NonExisting]), 17 | 18 | lists:foreach( 19 | fun({Index, Key}) -> 20 | io:format("(~p of ~p)~n", [Index, KeyNumber]), 21 | File = redis(fetch_key, Key), 22 | file(save, Key, File) 23 | end, 24 | lists:zip(lists:seq(1, KeyNumber), NonExisting) 25 | ), 26 | 27 | ok. 28 | 29 | %% @private 30 | redis(connect) -> 31 | {ok, Redis} = eredis:start_link(), 32 | put(redis, Redis); 33 | redis(fetch_keys) -> 34 | {ok, Keys} = eredis:q(get(redis), ["KEYS", "*"], infinity), 35 | lists:map(fun(Key) -> binary_to_list(Key) end, Keys). 36 | redis(fetch_key, Key) -> 37 | {ok, File} = eredis:q(get(redis), ["GET", Key], infinity), 38 | File. 39 | 40 | %% @private 41 | file(path, Key) -> 42 | filename:join( 43 | os:getenv("METRICS_DIR"), 44 | Key 45 | ); 46 | file(exists, Key) -> 47 | Path = file(path, Key), 48 | filelib:is_file(Path). 49 | file(save, Key, File) -> 50 | Path = file(path, Key), 51 | ok = filelib:ensure_dir(Path), 52 | ok = file:write_file(Path, File). 53 | -------------------------------------------------------------------------------- /bin/start-redis-sync.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | POD_NAME=$(kubectl get pods | 4 | grep redis | 5 | grep Running | 6 | awk '{print $1}') 7 | 8 | PORT=6379 9 | DIR=$(dirname "$0") 10 | METRICS_DIR=${DIR}/../evaluation/metrics 11 | 12 | kubectl port-forward "${POD_NAME}" ${PORT}:${PORT} & 13 | TUNNEL_PID=$! 14 | 15 | echo "[$(date +%T)] Port forwarding starting..." 16 | 17 | while [ "$(lsof -i:${PORT})" == "" ]; do 18 | sleep 1 19 | done 20 | 21 | cd "${DIR}"/.. 22 | METRICS_DIR=${METRICS_DIR} "${DIR}"/redis-sync.erl 23 | 24 | echo "[$(date +%T)] All files downloaded!" 25 | 26 | kill ${TUNNEL_PID} 27 | -------------------------------------------------------------------------------- /bin/status.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | MAX_LINES=20 4 | 5 | show_pod() { 6 | pod=$1 7 | status=$(kubectl logs ${pod} --since=10s | 8 | grep -Eo "Event [0-9]+ \| Observed [0-9]+" | 9 | tail -n 1 | 10 | awk '{ print $2" of " $5 }') 11 | echo "${pod}: ${status}" 12 | } 13 | 14 | pods=$(kubectl get pods --no-headers | 15 | grep Running | 16 | grep exp- | 17 | sort -R | 18 | head -n ${MAX_LINES} | 19 | awk '{ print $1 }') 20 | 21 | for pod in ${pods[@]}; do 22 | show_pod ${pod} & 23 | done 24 | wait 25 | -------------------------------------------------------------------------------- /bin/stop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | delete_all() { 4 | kubectl delete deployment --all 5 | kubectl delete pods --all 6 | } 7 | 8 | while [ "${empty}" != "0" ]; do 9 | empty=$(kubectl get pods 2>/dev/null | 10 | grep -E "(Running|Terminating)" | 11 | wc -l | 12 | xargs echo 13 | ) 14 | delete_all 15 | sleep 1 16 | done 17 | -------------------------------------------------------------------------------- /bin/zipf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import random 4 | import bisect 5 | import math 6 | from functools import reduce 7 | import sys 8 | 9 | class ZipfGenerator: 10 | 11 | def __init__(self, n, alpha): 12 | # Calculate Zeta values from 1 to n: 13 | tmp = [1. / (math.pow(float(i), alpha)) for i in range(1, n+1)] 14 | zeta = reduce(lambda sums, x: sums + [sums[-1] + x], tmp, [0]) 15 | 16 | # Store the translation map: 17 | self.distMap = [x / zeta[-1] for x in zeta] 18 | 19 | def next(self): 20 | # Take a uniform 0-1 pseudo-random value: 21 | u = random.random() 22 | 23 | # Translate the Zipf variable: 24 | return bisect.bisect(self.distMap, u) - 1 25 | 26 | if len(sys.argv) == 4: 27 | n = int(sys.argv[1]) 28 | alpha = float(sys.argv[2]) 29 | events = int(sys.argv[3]) 30 | gen = ZipfGenerator(n, alpha) 31 | for i in range(events): 32 | print(gen.next()) 33 | -------------------------------------------------------------------------------- /config/vm.args: -------------------------------------------------------------------------------- 1 | ## Name of the node 2 | -name ${NODE_NAME} 3 | 4 | ## Cookie for distributed erlang 5 | -setcookie ${COOKIE} 6 | -------------------------------------------------------------------------------- /emulab/README.md: -------------------------------------------------------------------------------- 1 | ##### How To 2 | 3 | - Swap in your experiment in Emulab 4 | - Copy the list of nodes to `nodes-table`. Example: 5 | ![Emulab list of nodes example](emulab-list-of-nodes.png) 6 | - Modify `emulab.config` 7 | - Run `init.sh` 8 | -------------------------------------------------------------------------------- /emulab/emulab-list-of-nodes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vitorenesduarte/exp/99486aba658c1b5f077275ceca3eef173375d050/emulab/emulab-list-of-nodes.png -------------------------------------------------------------------------------- /emulab/emulab.config: -------------------------------------------------------------------------------- 1 | user=vitor 2 | -------------------------------------------------------------------------------- /emulab/init-master.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | RED='\033[0;31m' 4 | NC='\033[0m' # No Color 5 | GREEN='\033[0;32m' 6 | 7 | INIT_NODE=$1; shift 8 | USER=$1; shift 9 | NODES=( "$@" ) 10 | 11 | sudo swapoff -a 12 | echo -e "sudo swapoff -a ${GREEN}successfull${NC}" 13 | 14 | sudo groupadd docker 15 | echo -e "sudo groupadd docker ${GREEN}successfull${NC}" 16 | 17 | sudo systemctl start docker.service 18 | echo -e "sudo systemctl start docker.service ${GREEN}successfull${NC}" 19 | 20 | APISERV=$(ifconfig | grep -Eb1 "enp6s7" | grep -Eo "inet addr:[0-9\.]+" | awk -F : '{print $2}') && 21 | echo -e "APISERVER ADVERTISE ADDRESS is ${APISERV}" && 22 | 23 | # sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address=${APISERV} --kubernetes-version stable-1.8 2>&1 | tee tmp && 24 | sudo kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=${APISERV} --kubernetes-version stable-1.8 2>&1 | tee tmp && 25 | 26 | #sudo kubeadm init --apiserver-advertise-address=${APISERV} --kubernetes-version stable-1.8 2>&1 | tee tmp && 27 | 28 | KUBEADMINIT=$(tail -2 tmp | grep join) && 29 | echo "${KUBEADMINIT}" > ~/kubeadmjoin && 30 | echo -e "sudo kubeadm init ${GREEN}successfull${NC}" && 31 | 32 | mkdir -p $HOME/.kube && 33 | echo -e "mkdir -p $HOME/.kube ${GREEN}successfull${NC}" && 34 | 35 | yes | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config && 36 | echo -e "sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config ${GREEN}successfull${NC}" && 37 | 38 | sudo chown `id -u`:`id -g` $HOME/.kube/config && 39 | echo -e "sudo chown `id -u`:`id -g` $HOME/.kube/config ${GREEN}successfull${NC}" && 40 | 41 | sudo kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.9.1/Documentation/kube-flannel.yml && 42 | # sudo kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml && 43 | echo -e "sudo kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml ${GREEN}successfull${NC}" && 44 | 45 | sudo kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=default:default && 46 | echo -e "sudo kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=default:default ${GREEN}successfull${NC}" && 47 | 48 | for NODE in ${NODES[@]}; do 49 | echo "$arg1 $NODE $USER" 50 | ssh -o "StrictHostKeyChecking no" ${USER}@"$NODE" 'bash -s' < $INIT_NODE & 51 | done 52 | wait 53 | echo -e "${GREEN}init-master.sh DONE${NC}" 54 | -------------------------------------------------------------------------------- /emulab/init-node.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | RED='\033[0;31m' 4 | NC='\033[0m' # No Color 5 | GREEN='\033[0;32m' 6 | 7 | sudo swapoff -a 8 | echo -e "swapoff -a ${GREEN}successfull${NC}" && 9 | sudo groupadd docker 10 | echo -e "groupadd docker ${GREEN}successfull${NC}" && 11 | sudo systemctl restart docker.service 12 | echo -e "systemctl start docker.service ${GREEN}successfull${NC}" && 13 | CMD=$(cat ~/kubeadmjoin) && 14 | echo "${CMD}" && 15 | sudo $CMD && 16 | echo -e "KUBEADMINITCLI ${GREEN}successfull${NC}" && 17 | echo -e "${GREEN}init-node.sh DONE${NC}" 18 | -------------------------------------------------------------------------------- /emulab/init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | NODES_FILE=nodes-table 4 | 5 | USER=$(grep user emulab.config | cut -d= -f2) 6 | RED='\033[0;31m' 7 | NC='\033[0m' # No Color 8 | GREEN='\033[0;32m' 9 | 10 | Main=$(cat ${NODES_FILE} | grep main | awk '{print $4}') 11 | Args=$(tail -n +2 ${NODES_FILE} | awk '{print $4}' | tr '\n' ' ') 12 | 13 | echo -e "Copying init-node.sh to ${USER}@${Main}" 14 | scp -o "StrictHostKeyChecking no" -o "UserKnownHostsFile /dev/null" ./init-node.sh ${USER}@"$Main":~/init-node.sh 15 | echo -e "scp init-node.sh done ${GREEN}successfull${NC}" 16 | 17 | echo -e "Executing init-master.sh in ${USER}@${Main}" 18 | ssh -o "StrictHostKeyChecking no" -o "UserKnownHostsFile /dev/null" ${USER}@"$Main" 'bash -s'< ./init-master.sh "~/init-node.sh" "${USER}" "${Args[@]}" 19 | echo -e "ssh init-master.sh done ${GREEN}successfull${NC}" 20 | 21 | echo -e "Copying k8s config from ${USER}@${Main}" 22 | scp -o "StrictHostKeyChecking no" -o "UserKnownHostsFile /dev/null" ${USER}@"$Main":~/.kube/config ~/.kube/config 23 | 24 | while [ $(kubectl get nodes | grep NotReady | wc -l) -gt 0 ]; do 25 | echo "Some nodes are not ready yet... Trying again in 2s" 26 | sleep 2 27 | done 28 | -------------------------------------------------------------------------------- /emulab/killall.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | NODES_FILE=nodes-table 4 | 5 | USER=$(grep user emulab.config | cut -d= -f2) 6 | RED='\033[0;31m' 7 | NC='\033[0m' # No Color 8 | GREEN='\033[0;32m' 9 | 10 | Main=$(cat ${NODES_FILE} | grep main | awk '{print $4}') 11 | echo -e "Main done ${GREEN}successfull${NC}" 12 | 13 | Args=$(tail -n +2 ${NODES_FILE} | awk '{print $4}' | tr '\n' ' ') 14 | echo -e "Args done ${GREEN}successfull${NC}" 15 | 16 | Nodes=$(kubectl get nodes | tail -n +3 | awk '{print $1}') 17 | echo -e "get each node name ${GREEN}successfull${NC}" 18 | 19 | MainNode=$(kubectl get nodes | head -n +2 | tail -n +1 | awk '{print $1}') 20 | echo -e "get main name ${GREEN}successfull${NC}" 21 | 22 | for NODE in ${Nodes[@]}; do 23 | cmd="sudo kubectl drain ${NODE} --delete-local-data --force --ignore-daemonsets && sudo kubectl delete node ${NODE}" 24 | ssh -o "StrictHostKeyChecking no" ${USER}@"$Main" ${cmd} & 25 | done 26 | wait 27 | echo -e "drain and delete each ${GREEN}successfull${NC}" 28 | 29 | for NODE in ${Args[@]}; do 30 | cmd="sudo kubeadm reset" 31 | ssh -o "StrictHostKeyChecking no" ${USER}@"$NODE" ${cmd} & 32 | done 33 | wait 34 | echo -e "reset each node kubeadm ${GREEN}successfull${NC}" 35 | 36 | cmd="sudo kubectl drain ${MainNode} --delete-local-data --force --ignore-daemonsets && sudo kubectl delete node ${MainNode}" 37 | 38 | ssh -o "StrictHostKeyChecking no" ${USER}@"$Main" ${cmd} 39 | echo -e "drain and delete node master ${GREEN}successfull${NC}" 40 | 41 | ssh -o "StrictHostKeyChecking no" ${USER}@"$Main" 'sudo kubeadm reset' 42 | echo -e "killall.sh ${GREEN}successfull${NC}" 43 | -------------------------------------------------------------------------------- /emulab/nodes-table: -------------------------------------------------------------------------------- 1 | main controller UB16-64-Docker-K8s++ main.join-decomposition.CRDT.emulab.net 2 | replica-0 replicas UB16-64-Docker-K8s++ replica-0.join-decomposition.CRDT.emulab.net 3 | replica-1 replicas UB16-64-Docker-K8s++ replica-1.join-decomposition.CRDT.emulab.net 4 | replica-10 replicas UB16-64-Docker-K8s++ replica-10.join-decomposition.CRDT.emulab.net 5 | replica-11 replicas UB16-64-Docker-K8s++ replica-11.join-decomposition.CRDT.emulab.net 6 | replica-12 replicas UB16-64-Docker-K8s++ replica-12.join-decomposition.CRDT.emulab.net 7 | replica-13 replicas UB16-64-Docker-K8s++ replica-13.join-decomposition.CRDT.emulab.net 8 | replica-14 replicas UB16-64-Docker-K8s++ replica-14.join-decomposition.CRDT.emulab.net 9 | replica-15 replicas UB16-64-Docker-K8s++ replica-15.join-decomposition.CRDT.emulab.net 10 | replica-16 replicas UB16-64-Docker-K8s++ replica-16.join-decomposition.CRDT.emulab.net 11 | replica-17 replicas UB16-64-Docker-K8s++ replica-17.join-decomposition.CRDT.emulab.net 12 | replica-18 replicas UB16-64-Docker-K8s++ replica-18.join-decomposition.CRDT.emulab.net 13 | replica-19 replicas UB16-64-Docker-K8s++ replica-19.join-decomposition.CRDT.emulab.net 14 | replica-2 replicas UB16-64-Docker-K8s++ replica-2.join-decomposition.CRDT.emulab.net 15 | replica-20 replicas UB16-64-Docker-K8s++ replica-20.join-decomposition.CRDT.emulab.net 16 | replica-21 replicas UB16-64-Docker-K8s++ replica-21.join-decomposition.CRDT.emulab.net 17 | replica-22 replicas UB16-64-Docker-K8s++ replica-22.join-decomposition.CRDT.emulab.net 18 | replica-23 replicas UB16-64-Docker-K8s++ replica-23.join-decomposition.CRDT.emulab.net 19 | replica-24 replicas UB16-64-Docker-K8s++ replica-24.join-decomposition.CRDT.emulab.net 20 | replica-25 replicas UB16-64-Docker-K8s++ replica-25.join-decomposition.CRDT.emulab.net 21 | replica-26 replicas UB16-64-Docker-K8s++ replica-26.join-decomposition.CRDT.emulab.net 22 | replica-27 replicas UB16-64-Docker-K8s++ replica-27.join-decomposition.CRDT.emulab.net 23 | replica-28 replicas UB16-64-Docker-K8s++ replica-28.join-decomposition.CRDT.emulab.net 24 | replica-29 replicas UB16-64-Docker-K8s++ replica-29.join-decomposition.CRDT.emulab.net 25 | replica-3 replicas UB16-64-Docker-K8s++ replica-3.join-decomposition.CRDT.emulab.net 26 | replica-30 replicas UB16-64-Docker-K8s++ replica-30.join-decomposition.CRDT.emulab.net 27 | replica-31 replicas UB16-64-Docker-K8s++ replica-31.join-decomposition.CRDT.emulab.net 28 | replica-32 replicas UB16-64-Docker-K8s++ replica-32.join-decomposition.CRDT.emulab.net 29 | replica-33 replicas UB16-64-Docker-K8s++ replica-33.join-decomposition.CRDT.emulab.net 30 | replica-34 replicas UB16-64-Docker-K8s++ replica-34.join-decomposition.CRDT.emulab.net 31 | replica-35 replicas UB16-64-Docker-K8s++ replica-35.join-decomposition.CRDT.emulab.net 32 | replica-36 replicas UB16-64-Docker-K8s++ replica-36.join-decomposition.CRDT.emulab.net 33 | replica-37 replicas UB16-64-Docker-K8s++ replica-37.join-decomposition.CRDT.emulab.net 34 | replica-38 replicas UB16-64-Docker-K8s++ replica-38.join-decomposition.CRDT.emulab.net 35 | replica-39 replicas UB16-64-Docker-K8s++ replica-39.join-decomposition.CRDT.emulab.net 36 | replica-4 replicas UB16-64-Docker-K8s++ replica-4.join-decomposition.CRDT.emulab.net 37 | replica-40 replicas UB16-64-Docker-K8s++ replica-40.join-decomposition.CRDT.emulab.net 38 | replica-41 replicas UB16-64-Docker-K8s++ replica-41.join-decomposition.CRDT.emulab.net 39 | replica-42 replicas UB16-64-Docker-K8s++ replica-42.join-decomposition.CRDT.emulab.net 40 | replica-43 replicas UB16-64-Docker-K8s++ replica-43.join-decomposition.CRDT.emulab.net 41 | replica-44 replicas UB16-64-Docker-K8s++ replica-44.join-decomposition.CRDT.emulab.net 42 | replica-45 replicas UB16-64-Docker-K8s++ replica-45.join-decomposition.CRDT.emulab.net 43 | replica-46 replicas UB16-64-Docker-K8s++ replica-46.join-decomposition.CRDT.emulab.net 44 | replica-47 replicas UB16-64-Docker-K8s++ replica-47.join-decomposition.CRDT.emulab.net 45 | replica-48 replicas UB16-64-Docker-K8s++ replica-48.join-decomposition.CRDT.emulab.net 46 | replica-5 replicas UB16-64-Docker-K8s++ replica-5.join-decomposition.CRDT.emulab.net 47 | replica-6 replicas UB16-64-Docker-K8s++ replica-6.join-decomposition.CRDT.emulab.net 48 | replica-7 replicas UB16-64-Docker-K8s++ replica-7.join-decomposition.CRDT.emulab.net 49 | replica-8 replicas UB16-64-Docker-K8s++ replica-8.join-decomposition.CRDT.emulab.net 50 | replica-9 replicas UB16-64-Docker-K8s++ replica-9.join-decomposition.CRDT.emulab.net 51 | -------------------------------------------------------------------------------- /evaluation/.gitignore: -------------------------------------------------------------------------------- 1 | *.png 2 | metrics/ 3 | processed/ 4 | -------------------------------------------------------------------------------- /evaluation/Makefile: -------------------------------------------------------------------------------- 1 | OS := $(shell uname) 2 | ifeq ($(OS), Linux) 3 | VIEWER=eog 4 | else ifeq ($(OS), Darwin) 5 | VIEWER=open 6 | endif 7 | 8 | default: plots show 9 | 10 | all: pre plots show 11 | 12 | pre: preprocess.py 13 | rm -rf processed 14 | python preprocess.py 15 | 16 | plots: micro retwis 17 | 18 | micro: 19 | Rscript icde19/first.R 20 | Rscript icde19/second.R 21 | Rscript icde19/gset_gcounter.R 22 | Rscript icde19/gmap.R 23 | Rscript icde19/memory.R 24 | Rscript icde19/metadata.R 25 | 26 | retwis: 27 | Rscript icde19/retwis.R 28 | Rscript icde19/retwis_processing.R 29 | 30 | show: 31 | $(VIEWER) *.png 32 | 33 | clean: 34 | rm -f *.png 35 | -------------------------------------------------------------------------------- /evaluation/generic.R: -------------------------------------------------------------------------------- 1 | x_axis_label <- function(text) { 2 | mtext( 3 | side=1, # bottom 4 | text=text, 5 | font=2, # bold 6 | line=.8, # closeness to plot 7 | outer=TRUE, # outside of the plot if TRUE 8 | cex=1 # size 9 | ) 10 | } 11 | 12 | y_axis_label <- function(text) { 13 | mtext( 14 | side=2, # left 15 | las=0, # vertical text 16 | text=text, 17 | font=2, # bold 18 | line=.8, # closeness to plot 19 | outer=TRUE, # outside of the plot if TRUE 20 | cex=1 # size 21 | ) 22 | } 23 | 24 | add_title <- function(text) { 25 | title(text, cex.main=1.3, line=0.7) 26 | } 27 | 28 | plot_bars <- function(title, lines, y_min, colors, angles, densities, 29 | y_max=-1, 30 | bar_cex=1) { 31 | 32 | if(y_max == -1) { 33 | # find the y max 34 | y_max = Reduce(max, lines) 35 | y_max = y_max + 0.15*y_max 36 | } 37 | 38 | # configure and draw 39 | p <- barplot( 40 | lines, 41 | ylim=c(y_min, y_max), 42 | xpd = FALSE, 43 | col=colors, 44 | horiz=FALSE, 45 | angle=angles, 46 | density=densities 47 | ) 48 | 49 | # round tops 50 | rounded <- map( 51 | lines, 52 | function(v) { 53 | x <- round(v, digits=1) 54 | if(x == 1) round(v, digits=2) else x 55 | } 56 | ) 57 | 58 | # add text at top of bars 59 | text( 60 | x=p, 61 | y=lines, 62 | label=rounded, 63 | pos=3, 64 | cex=bar_cex, 65 | font=2 66 | ) 67 | 68 | # title 69 | add_title(title) 70 | } 71 | 72 | plot_cdf <- function(title, lines, colors, y_max, y_step) { 73 | lines_y <- seq(0, y_max, by=y_step) 74 | lines_x <- lapply( 75 | lines, 76 | function(l) { quantile(l, probs=lines_y, names=FALSE) } 77 | ) 78 | 79 | # find x min and max 80 | x_min <- Reduce(min, lapply(lines_x, min)) 81 | x_max <- Reduce(max, lapply(lines_x, max)) 82 | 83 | # configure plot 84 | plot( 85 | range(x_max), 86 | range(1), 87 | xlim=c(x_min, x_max), 88 | ylim=c(0, y_max), 89 | xlab="", 90 | ylab="", 91 | ) 92 | 93 | # draw 94 | for(i in 1:length(lines_x)) { 95 | lines( 96 | lines_x[[i]], 97 | lines_y, 98 | col=colors[[i]], 99 | lty=i, 100 | lwd=i, 101 | ) 102 | } 103 | 104 | # title 105 | add_title(title) 106 | } 107 | 108 | plot_lines <- function(title, lines_x, lines_y, colors, 109 | lwd=1.5, pch=c(1:10)) { 110 | # find the x max and y max 111 | x_max <- Reduce(max, lapply(lines_x, max)) 112 | y_max <- Reduce(max, lapply(lines_y, max)) 113 | 114 | # configure plot 115 | plot( 116 | range(x_max), 117 | range(y_max), 118 | type="n", 119 | xlim=c(0, x_max), # max x 120 | ylim=c(0, y_max), # max y 121 | xlab="", 122 | ylab="", 123 | ) 124 | 125 | # draw 126 | for(i in 1:length(lines_y)) { 127 | lines( 128 | lines_x[[i]], 129 | lines_y[[i]], 130 | col=colors[[i]], 131 | type="b", 132 | pch=pch[[i]], 133 | lwd=lwd 134 | ) 135 | } 136 | 137 | # title 138 | add_title(title) 139 | } 140 | 141 | plot_lines_retwis <- function(lines_x, lines_y, colors, 142 | x_lab="", 143 | y_lab="", 144 | log="y", 145 | y_max=0, 146 | las=2, 147 | digits=1, 148 | lwd=1, 149 | pch=c(1:10)) { 150 | # find the x max and y max 151 | x_min <- Reduce(min, lapply(lines_x, min)) 152 | x_max <- Reduce(max, lapply(lines_x, max)) 153 | y_min <- Reduce(min, lapply(lines_y, min)) 154 | if(y_max == 0) { 155 | y_max <- Reduce(max, lapply(lines_y, max)) 156 | } 157 | 158 | # configure plot 159 | plot( 160 | range(x_max), 161 | range(y_max), 162 | type="n", 163 | xlim=c(x_min, x_max), # max x 164 | ylim=c(y_min, y_max), # max y 165 | xlab="", 166 | ylab="", 167 | log=log, 168 | xaxt="n", 169 | yaxt="n", 170 | ) 171 | # add custom axis 172 | xtick <- lines_x[[1]] 173 | axis( 174 | side=1, 175 | at=xtick, 176 | labels=TRUE, 177 | cex.axis=0.8 178 | ) 179 | ytick <- round(lines_y[[1]], digits=digits) 180 | axis( 181 | side=2, 182 | at=ytick, 183 | labels=TRUE, 184 | cex.axis=0.8, 185 | las=las 186 | ) 187 | 188 | # add custom labels 189 | mtext( 190 | side=1, # bottom 191 | las=0, # vertical text 192 | text=x_lab, 193 | font=2, # bold 194 | line=2.3, # closeness to plot 195 | cex=.9 # size 196 | ) 197 | mtext( 198 | side=2, # left 199 | las=0, # vertical text 200 | text=y_lab, 201 | font=2, # bold 202 | line=2.3, # closeness to plot 203 | cex=.9 # size 204 | ) 205 | 206 | # draw 207 | for(i in 1:length(lines_y)) { 208 | lines( 209 | lines_x[[i]], 210 | lines_y[[i]], 211 | col=colors[[i]], 212 | type="b", 213 | pch=pch[[i]], 214 | lwd=lwd 215 | ) 216 | } 217 | } 218 | 219 | plot_box <- function(title, lines, colors) { 220 | boxplot( 221 | lines, 222 | notch=TRUE, 223 | log="y" 224 | ) 225 | 226 | # title 227 | title(title, cex.main=1.5, line=0.7) 228 | } 229 | -------------------------------------------------------------------------------- /evaluation/icde19.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vitorenesduarte/exp/99486aba658c1b5f077275ceca3eef173375d050/evaluation/icde19.tar.gz -------------------------------------------------------------------------------- /evaluation/icde19/README.md: -------------------------------------------------------------------------------- 1 | # How to reproduce the experiments 2 | 3 | #### Prerequisites 4 | 5 | - A running Kubernetes cluster (version compatible with v1.8.1) 6 | - [`kubectl`](https://github.com/kubernetes/kubectl) 7 | - [Erlang](https://github.com/erlang/otp) 8 | - [R](https://www.r-project.org/) 9 | 10 | #### Instructions 11 | 12 | From the root of the repository: 13 | 14 | ```bash 15 | $ make 16 | $ bin/icde19-exp.sh 17 | ``` 18 | 19 | When it ends: 20 | ```bash 21 | $ cd evaluation/ 22 | $ make all 23 | ``` 24 | 25 | The plots: 26 | ```bash 27 | $ ls *.png 28 | ``` 29 | -------------------------------------------------------------------------------- /evaluation/icde19/first.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | TO_KEEP <- "'(110|460|490)'" 5 | 6 | # draw! 7 | main <- function() { 8 | output_file <- "first.png" 9 | 10 | cluster <- "ls -d processed/* | grep 0~gset~partialmesh~15" 11 | cluster <- paste(cluster, " | grep -E ", TO_KEEP, sep="") 12 | 13 | labels <- c( 14 | "State-based", 15 | "Delta-based", 16 | "This paper" 17 | ) 18 | 19 | # avoid scientific notation 20 | options(scipen=999) 21 | 22 | # open device 23 | png(filename=output_file, width=600, height=400, res=110) 24 | 5 25 | # change outer margins 26 | op <- par( 27 | oma=c(4,2,0,0), # room for the legend 28 | mfrow=c(1,1), # 2x4 matrix 29 | mar=c(1.5,1.5,0.5,0.5) # spacing between plots 30 | ) 31 | 32 | # style stuff 33 | colors <- c( 34 | "snow4", 35 | "springgreen4", 36 | "gray22" 37 | ) 38 | pch <- c(1, 3, 6) 39 | 40 | files <- system(cluster, intern=TRUE) 41 | 42 | # skip if no file 43 | if(length(files) == 0) next 44 | 45 | # keys 46 | key_x <- "transmission_compressed_x" 47 | key_y <- "transmission_compressed" 48 | 49 | # data 50 | title <- "" 51 | lines_x <- lapply(files, function(f) { json(c(f))[[key_x]] }) 52 | lines_y <- lapply(files, function(f) { json(c(f))[[key_y]] }) 53 | 54 | # plot lines 55 | plot_lines(title, lines_x, lines_y, colors, 56 | lwd=2, 57 | pch=pch) 58 | 59 | # axis labels 60 | x_axis_label("Time (s)") 61 | y_axis_label("Number of set elements") 62 | 63 | par(op) # Leave the last plot 64 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 65 | xpd=NA) # Allow plotting outside the plot region 66 | 67 | # legend 68 | legend( 69 | 0.05, # x 70 | -.32, # y 71 | cex=1, 72 | legend=labels, 73 | pch=pch, 74 | col=colors, 75 | horiz=TRUE, 76 | box.col=NA # remove box 77 | ) 78 | 79 | # close device 80 | dev.off() 81 | } 82 | 83 | main() 84 | warnings() 85 | -------------------------------------------------------------------------------- /evaluation/icde19/gmap.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | TO_KEEP <- "'(110|220|230|350|460|470|480|490)'" 5 | 6 | # draw! 7 | main <- function() { 8 | output_file <- "gmap.png" 9 | 10 | clusters <- c( 11 | "ls -d processed/* | grep 10~gmap~tree", 12 | "ls -d processed/* | grep 30~gmap~tree", 13 | "ls -d processed/* | grep 60~gmap~tree", 14 | "ls -d processed/* | grep 100~gmap~tree", 15 | "ls -d processed/* | grep 10~gmap~partialmesh", 16 | "ls -d processed/* | grep 30~gmap~partialmesh", 17 | "ls -d processed/* | grep 60~gmap~partialmesh", 18 | "ls -d processed/* | grep 100~gmap~partialmesh" 19 | ) 20 | clusters <- map(clusters, function(c) { 21 | paste(c, " | grep -E ", TO_KEEP, sep="") 22 | }) 23 | titles <- c( 24 | "GMap 10% - Tree", 25 | "GMap 30% - Tree", 26 | "GMap 60% - Tree", 27 | "GMap 100% - Tree", 28 | "GMap 10% - Mesh", 29 | "GMap 30% - Mesh", 30 | "GMap 60% - Mesh", 31 | "GMap 100% - Mesh" 32 | ) 33 | labels <- c( 34 | "State-based", 35 | "Scuttlebutt", 36 | "Scuttlebutt-GC", 37 | "Op-based", 38 | "Delta-based", 39 | "Delta-based BP", 40 | "Delta-based RR", 41 | "Delta-based BP+RR" 42 | ) 43 | 44 | # avoid scientific notation 45 | options(scipen=999) 46 | 47 | # open device 48 | png(filename=output_file, width=2600, height=1200, res=240) 49 | 50 | # change outer margins 51 | op <- par( 52 | oma=c(5,3,0,0), # room for the legend 53 | mfrow=c(2,4), # 2x4 matrix 54 | mar=c(2,2,3,1) # spacing between plots 55 | ) 56 | 57 | # style stuff 58 | colors <- c( 59 | "snow4", 60 | "darkgoldenrod", 61 | "steelblue4", 62 | "yellow3", 63 | "springgreen4", 64 | "darkorange1", 65 | "red4", 66 | "gray22" 67 | ) 68 | pch <- c(1,7,8,2,3,4,5,6) 69 | 70 | for(i in 1:length(clusters)) { 71 | files <- system(clusters[i], intern=TRUE) 72 | 73 | # skip if no file 74 | if(length(files) == 0) next 75 | 76 | # keys 77 | key_x <- "transmission_compressed_x" 78 | key_y <- "transmission_compressed" 79 | 80 | # data 81 | title <- titles[i] 82 | lines_x <- lapply(files, function(f) { json(c(f))[[key_x]] }) 83 | lines_y <- lapply(files, function(f) { json(c(f))[[key_y]] }) 84 | 85 | # metadata info 86 | # metadata_ratio <- map( 87 | # files, 88 | # function(f) { 89 | # j <- json(c(f)) 90 | # r <- sum(j[["transmission_metadata"]]) / sum(j[["transmission"]]) 91 | # round(r, 3) * 100 92 | # } 93 | # ) 94 | # print(metadata_ratio) 95 | 96 | avgs <- map( 97 | files, 98 | function(f) { 99 | j <- json(c(f)) 100 | sum(j[[key_y]]) / length(j[[key_y]]) 101 | } 102 | ) 103 | print(title) 104 | print(paste("scuttlebutt: ", 100 - 100 * round(avgs[[2]] / avgs[[1]], 2))) 105 | print(paste("scuttlebutt-gc: ", 100 - 100 * round(avgs[[3]] / avgs[[1]], 2))) 106 | print(paste("op-based: ", 100 - 100 * round(avgs[[4]] / avgs[[1]], 2))) 107 | print(paste("delta-based bp+rr: ", 100 - 100 * round(avgs[[8]] / avgs[[1]], 2))) 108 | 109 | # plot lines 110 | plot_lines(title, lines_x, lines_y, colors, 111 | pch=pch) 112 | } 113 | 114 | # axis labels 115 | x_axis_label("Time (s)") 116 | y_axis_label("Transmission") 117 | 118 | par(op) # Leave the last plot 119 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 120 | xpd=NA) # Allow plotting outside the plot region 121 | 122 | # legend 123 | legend( 124 | -.03, # x 125 | -.2, # y 126 | cex=0.92, 127 | legend=labels, 128 | pch=pch, 129 | text.width=c(0,0.09,0.085,0.092,0.087,0.089,0.095,0.098), 130 | col=colors, 131 | horiz=TRUE, 132 | box.col=NA # remove box 133 | ) 134 | 135 | # close device 136 | dev.off() 137 | } 138 | 139 | main() 140 | warnings() 141 | -------------------------------------------------------------------------------- /evaluation/icde19/gset_gcounter.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | TO_KEEP <- "'(110|220|230|350|460|470|480|490)'" 5 | 6 | # draw! 7 | main <- function() { 8 | output_file <- "gset_gcounter.png" 9 | 10 | clusters <- c( 11 | "ls -d processed/* | grep gset~tree~15", 12 | "ls -d processed/* | grep gcounter~tree~15", 13 | "ls -d processed/* | grep gset~partialmesh~15", 14 | "ls -d processed/* | grep gcounter~partialmesh~15" 15 | ) 16 | clusters <- map(clusters, function(c) { 17 | paste(c, " | grep -E ", TO_KEEP, sep="") 18 | }) 19 | titles <- c( 20 | "GSet - Tree", 21 | "GCounter - Tree", 22 | "GSet - Mesh", 23 | "GCounter - Mesh" 24 | ) 25 | labels <- c( 26 | "State-based", 27 | "Scuttlebutt", 28 | "Scuttlebutt-GC", 29 | "Op-based", 30 | "Delta-based", 31 | "Delta-based BP", 32 | "Delta-based RR", 33 | "Delta-based BP+RR" 34 | ) 35 | 36 | # avoid scientific notation 37 | options(scipen=999) 38 | 39 | # open device 40 | png(filename=output_file, width=800, height=650, res=130) 41 | 42 | # change outer margins 43 | op <- par( 44 | oma=c(3.5,2,0,0), # room for the legend 45 | mfrow=c(2,2), # 2x4 matrix 46 | mar=c(2.5,2,2,1) # spacing between plots 47 | ) 48 | 49 | # style stuff 50 | colors <- c( 51 | "snow4", 52 | "darkgoldenrod", 53 | "steelblue4", 54 | "yellow3", 55 | "springgreen4", 56 | "darkorange1", 57 | "red4", 58 | "gray22" 59 | ) 60 | angles <- c(0, 135, 45, 135, 45, 135, 45, 135) 61 | densities <- c(0, 15, 15, 22, 30, 30, 45, 45) 62 | 63 | for(i in 1:length(clusters)) { 64 | files <- system(clusters[i], intern=TRUE) 65 | 66 | # skip if no file 67 | if(length(files) == 0) next 68 | 69 | # keys 70 | key <- "transmission" 71 | 72 | # data 73 | title <- titles[i] 74 | lines <- map(files, function(f) { sum(json(c(f))[[key]]) }) 75 | 76 | # metadata info 77 | metadata_ratio <- map( 78 | files, 79 | function(f) { 80 | j <- json(c(f)) 81 | r <- sum(j[["transmission_metadata"]]) / sum(j[["transmission"]]) 82 | round(r, 3) * 100 83 | } 84 | ) 85 | print(metadata_ratio) 86 | 87 | # (wrto rr) 88 | if(length(lines) == length(labels)) { 89 | rr_index <- length(labels) 90 | rr <- lines[[rr_index]] 91 | lines <- map(lines, function(v) { v / rr }) 92 | 93 | # plot lines 94 | y_min <- 0 95 | plot_bars(title, lines, y_min, colors, angles, densities) 96 | } 97 | } 98 | 99 | # axis labels 100 | y_axis_label("Transmission ratio wrto BP+RR") 101 | 102 | par(op) # Leave the last plot 103 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 104 | xpd=NA) # Allow plotting outside the plot region 105 | 106 | # legend 107 | legend( 108 | 0.1, # x 109 | -.02, # y 110 | cex=1, 111 | legend=labels, 112 | angle=angles, 113 | density=densities, 114 | fill=colors, 115 | ncol=2, 116 | box.col=NA # remove box 117 | ) 118 | 119 | # close device 120 | dev.off() 121 | } 122 | 123 | main() 124 | warnings() 125 | -------------------------------------------------------------------------------- /evaluation/icde19/memory.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | TO_KEEP <- "'(110|220|230|350|460|470|480|490)'" 5 | 6 | # draw! 7 | main <- function() { 8 | output_file <- "memory.png" 9 | 10 | clusters <- c( 11 | "ls -d processed/* | grep 0~gset~partialmesh~15", 12 | "ls -d processed/* | grep 0~gcounter~partialmesh~15", 13 | "ls -d processed/* | grep 10~gmap~partialmesh~15", 14 | "ls -d processed/* | grep 100~gmap~partialmesh~15" 15 | ) 16 | clusters <- map(clusters, function(c) { 17 | paste(c, " | grep -E ", TO_KEEP, sep="") 18 | }) 19 | titles <- c( 20 | "GSet - Mesh", 21 | "GCounter - Mesh", 22 | "GMap 10% - Mesh", 23 | "GMap 100% - Mesh" 24 | ) 25 | labels <- c( 26 | "State-based", 27 | "Scuttlebutt", 28 | "Scuttlebutt-GC", 29 | "Op-based", 30 | "Delta-based", 31 | "Delta-based BP", 32 | "Delta-based RR", 33 | "Delta-based BP+RR" 34 | ) 35 | 36 | # avoid scientific notation 37 | options(scipen=999) 38 | 39 | # open device 40 | png(filename=output_file, width=800, height=650, res=130) 41 | 42 | # change outer margins 43 | op <- par( 44 | oma=c(3.5,2,0,0), # room for the legend 45 | mfrow=c(2,2), # 2x4 matrix 46 | mar=c(2.5,2,2,1) # spacing between plots 47 | ) 48 | 49 | # style stuff 50 | colors <- c( 51 | "snow4", 52 | "darkgoldenrod", 53 | "steelblue4", 54 | "yellow3", 55 | "springgreen4", 56 | "darkorange1", 57 | "red4", 58 | "gray22" 59 | ) 60 | angles <- c(0, 135, 45, 135, 45, 135, 45, 135) 61 | densities <- c(0, 15, 15, 22, 30, 30, 45, 45) 62 | 63 | for(i in 1:length(clusters)) { 64 | files <- system(clusters[i], intern=TRUE) 65 | 66 | # skip if no file 67 | if(length(files) == 0) next 68 | 69 | # keys 70 | key_a <- "memory_crdt" 71 | key_b <- "memory_algorithm" 72 | 73 | # data 74 | title <- titles[i] 75 | lines <- map( 76 | files, 77 | function(f) { 78 | data <- json(c(f)) 79 | avg_a <- mean(data[[key_a]]) 80 | avg_b <- mean(data[[key_b]]) 81 | avg_a + avg_b 82 | } 83 | ) 84 | 85 | # (wrto rr) 86 | if(length(lines) == length(labels)) { 87 | rr_index <- length(labels) 88 | rr <- lines[[rr_index]] 89 | lines <- map(lines, function(v) { v / rr }) 90 | 91 | # plot bars 92 | y_min <- 0 93 | plot_bars(title, lines, y_min, colors, angles, densities) 94 | } 95 | } 96 | 97 | # axis labels 98 | y_axis_label("Avg. Memory ratio wrto BP+RR") 99 | 100 | par(op) # Leave the last plot 101 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 102 | xpd=NA) # Allow plotting outside the plot region 103 | 104 | # legend 105 | legend( 106 | 0.1, # x 107 | -.02, # y 108 | cex=1, 109 | legend=labels, 110 | angle=angles, 111 | density=densities, 112 | fill=colors, 113 | ncol=2, 114 | box.col=NA # remove box 115 | ) 116 | 117 | # close device 118 | dev.off() 119 | } 120 | 121 | main() 122 | warnings() 123 | -------------------------------------------------------------------------------- /evaluation/icde19/metadata.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | TO_KEEP <- "'(220|230|350|490)'" 5 | 6 | # draw! 7 | main <- function() { 8 | output_file <- "metadata.png" 9 | 10 | clusters <- c( 11 | "ls -d processed/* | grep partialmesh~16", 12 | "ls -d processed/* | grep partialmesh~32", 13 | "ls -d processed/* | grep partialmesh~64" 14 | ) 15 | clusters <- map(clusters, function(c) { 16 | paste(c, " | grep -E ", TO_KEEP, sep="") 17 | }) 18 | titles <- c( 19 | "16 Nodes", 20 | "32 Nodes", 21 | "64 Nodes" 22 | ) 23 | labels <- c( 24 | "Scuttlebutt", 25 | "Scuttlebutt-GC", 26 | "Op-based", 27 | "Delta-based BP+RR" 28 | ) 29 | 30 | # avoid scientific notation 31 | options(scipen=999) 32 | 33 | # open device 34 | png(filename=output_file, width=550, height=350, res=130) 35 | 36 | # change outer margins 37 | op <- par( 38 | oma=c(2.5,2,0,0), # room for the legend 39 | mfrow=c(1, 3), # 2x4 matrix 40 | mar=c(2,2,2,1) # spacing between plots 41 | ) 42 | 43 | # style stuff 44 | colors <- c( 45 | "darkgoldenrod", 46 | "steelblue4", 47 | "yellow3", 48 | "gray22" 49 | ) 50 | angles <- c(135, 45, 135, 135) 51 | densities <- c(15, 15, 22, 45) 52 | 53 | for(i in 1:length(clusters)) { 54 | files <- system(clusters[i], intern=TRUE) 55 | 56 | # skip if no file 57 | if(length(files) == 0) next 58 | 59 | # keys 60 | key <- "transmission_metadata" 61 | 62 | id_size = 20 63 | 64 | # data 65 | title <- titles[i] 66 | lines <- map(files, function(f) { 67 | entries <- json(c(f))[[key]] 68 | sum(entries) / length(entries) * id_size / 1000 69 | }) 70 | 71 | # metadata info 72 | metadata_ratio <- map( 73 | files, 74 | function(f) { 75 | j <- json(c(f)) 76 | r <- sum(j[["transmission_metadata"]]) / sum(j[["transmission"]]) 77 | round(r, 3) * 100 78 | } 79 | ) 80 | print(metadata_ratio) 81 | 82 | # plot bars 83 | y_min <- 0 84 | plot_bars(title, lines, y_min, colors, angles, densities, 350) 85 | } 86 | 87 | # axis labels 88 | y_axis_label("Avg. metadata (MB)") 89 | 90 | par(op) # Leave the last plot 91 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 92 | xpd=NA) # Allow plotting outside the plot region 93 | 94 | # legend 95 | legend( 96 | -0.2, # x 97 | -.6, # y 98 | cex=1, 99 | legend=labels, 100 | angle=angles, 101 | density=densities, 102 | fill=colors, 103 | ncol=2, 104 | box.col=NA # remove box 105 | ) 106 | 107 | # close device 108 | dev.off() 109 | } 110 | 111 | main() 112 | warnings() 113 | -------------------------------------------------------------------------------- /evaluation/icde19/retwis.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | NLINES <- 2 5 | EXP_START <- 0 6 | EXP_END <- 50 7 | TO_KEEP <- "'(460|490)'" 8 | 9 | get_lines <- function(clusters, key, file_index, first_entry, last_entry) { 10 | any_non_zero <- 0.1 11 | map(clusters, function(cluster) { 12 | files <- system(cluster, intern=TRUE) 13 | 14 | # skip if no file 15 | if(length(files) == NLINES) { 16 | lines <- json(c(files[file_index]))[[key]] 17 | entries <- lines[first_entry:last_entry] 18 | sum(entries) / length(entries) / 1000 19 | } 20 | else { any_non_zero } 21 | }) 22 | } 23 | 24 | get_all_lines <- function(clusters, key, first_entry, last_entry) { 25 | lines_y <- list() 26 | for(i in seq(1, NLINES)) { 27 | lines_y[[i]] <- get_lines(clusters, key, i, first_entry, last_entry) 28 | } 29 | lines_y 30 | } 31 | 32 | # draw! 33 | main <- function() { 34 | output_file <- "retwis.png" 35 | 36 | clusters <- c( 37 | "ls -d processed/* | grep ~50~0~retwis", 38 | "ls -d processed/* | grep ~75~0~retwis", 39 | "ls -d processed/* | grep ~100~0~retwis", 40 | "ls -d processed/* | grep ~125~0~retwis", 41 | "ls -d processed/* | grep ~150~0~retwis" 42 | ) 43 | clusters <- map(clusters, function(c) { 44 | paste(c, " | grep -E ", TO_KEEP, sep="") 45 | }) 46 | labels <- c( 47 | "Delta-based", 48 | "Delta-based BP+RR" 49 | ) 50 | 51 | # avoid scientific notation 52 | options(scipen=999) 53 | 54 | # open device 55 | png(filename=output_file, width=850, height=700, res=130) 56 | 57 | # change outer margins 58 | op <- par( 59 | oma=c(5,2,0.5,0), # room for the legend 60 | mfrow=c(2,2), # 2x4 matrix 61 | mar=c(1.5,2.5,2,1) # spacing between plots 62 | ) 63 | 64 | # style stuff 65 | colors <- c( 66 | "springgreen4", 67 | "gray22" 68 | ) 69 | pch <- c(3,6) 70 | 71 | # assert there's enough info 72 | stopifnot(length(labels) == NLINES) 73 | stopifnot(length(colors) == NLINES) 74 | stopifnot(length(pch) == NLINES) 75 | 76 | coefs <- c(0.5, 0.75, 1, 1.25, 1.5) 77 | lines_x <- list() 78 | for(i in seq(1, NLINES)) { 79 | lines_x[[i]] <- coefs 80 | } 81 | x_lab <- "Zipf coefficients" 82 | 83 | # first plot 84 | key <- "transmission_term_size" 85 | y_lab <- "Transmission (GB/s)" 86 | lines_y_1 <- get_all_lines(clusters, key, EXP_START, EXP_END / 2) 87 | lines_y_2 <- get_all_lines(clusters, key, EXP_END / 2, EXP_END) 88 | 89 | plot_lines_retwis(lines_x, lines_y_1, colors, 90 | y_lab=y_lab, 91 | y_max=2, 92 | lwd=2, 93 | pch=pch) 94 | title("0%-50%", cex.main=1.3) 95 | 96 | plot_lines_retwis(lines_x, lines_y_2, colors, 97 | y_max=2, 98 | lwd=2, 99 | pch=pch) 100 | title("50%-100%", cex.main=1.3) 101 | print(lines_y_2) 102 | 103 | # second plot 104 | key <- "memory_term_size" 105 | y_lab <- "Avg. Memory (GB)" 106 | lines_y_1 <- get_all_lines(clusters, key, EXP_START, EXP_END / 2) 107 | lines_y_2 <- get_all_lines(clusters, key, EXP_END / 2, EXP_END) 108 | 109 | plot_lines_retwis(lines_x, lines_y_1, colors, 110 | y_lab=y_lab, 111 | y_max=2, 112 | lwd=2, 113 | pch=pch) 114 | 115 | plot_lines_retwis(lines_x, lines_y_2, colors, 116 | y_max=2, 117 | lwd=2, 118 | pch=pch) 119 | print(lines_y_2) 120 | 121 | x_axis_label(x_lab) 122 | 123 | par(op) # Leave the last plot 124 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 125 | xpd=NA) # Allow plotting outside the plot region 126 | 127 | # legend 128 | legend( 129 | "bottom", 130 | # 0.1, # x 131 | # 0, # y 132 | inset=-.25, 133 | cex=1, 134 | legend=labels, 135 | pch=pch, 136 | col=colors, 137 | ncol=2, 138 | box.col=NA # remove box 139 | ) 140 | 141 | # close device 142 | dev.off() 143 | } 144 | 145 | main() 146 | warnings() 147 | -------------------------------------------------------------------------------- /evaluation/icde19/retwis_processing.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | TO_KEEP <- "'(460|490)'" 5 | 6 | get_lines <- function(clusters, key, file_index) { 7 | map(clusters, function(cluster) { 8 | files <- system(cluster, intern=TRUE) 9 | 10 | # skip if no file 11 | if(length(files) == 2) { json(c(files[file_index]))[[key]] } 12 | else { any_non_zero } 13 | }) 14 | } 15 | 16 | get_all_lines <- function(clusters, key) { 17 | lines_y <- list() 18 | lines_y[[1]] <- (get_lines(clusters, key, 1) / get_lines(clusters, key, 2) * 100) - 100 19 | lines_y 20 | } 21 | 22 | # draw! 23 | main <- function() { 24 | output_file <- "retwis_processing.png" 25 | 26 | clusters <- c( 27 | "ls -d processed/* | grep ~50~0~retwis", 28 | "ls -d processed/* | grep ~75~0~retwis", 29 | "ls -d processed/* | grep ~100~0~retwis", 30 | "ls -d processed/* | grep ~125~0~retwis", 31 | "ls -d processed/* | grep ~150~0~retwis" 32 | ) 33 | clusters <- map(clusters, function(c) { 34 | paste(c, " | grep -E ", TO_KEEP, sep="") 35 | }) 36 | 37 | # avoid scientific notation 38 | options(scipen=999) 39 | 40 | # open device 41 | png(filename=output_file, width=550, height=300, res=130) 42 | 43 | # change outer margins 44 | op <- par( 45 | oma=c(4,3.5,1,1.5), # room for the legend 46 | mfrow=c(1,1), # 2x4 matrix 47 | mar=c(0, 0, 0, 0) # spacing between plots 48 | ) 49 | 50 | # style stuff 51 | colors <- c( 52 | "springgreen4" 53 | ) 54 | 55 | coefs <- c(0.5, 0.75, 1, 1.25, 1.5) 56 | lines_x <- list() 57 | lines_x[[1]] <- coefs 58 | x_lab <- "Zipf coefficients" 59 | 60 | # first plot 61 | key <- "processing" 62 | y_lab <- "CPU overhead (%)" 63 | lines_y <- get_all_lines(clusters, key) 64 | ytick <- round(lines_y[[1]]) 65 | 66 | print(lines_y[[1]] / 100) 67 | 68 | plot_lines_retwis(lines_x, lines_y, colors, 69 | x_lab=x_lab, 70 | y_lab=y_lab, 71 | log="", 72 | las=0, 73 | digits=0, 74 | lwd=2) 75 | polygon( 76 | c(min(lines_x[[1]]), lines_x[[1]], max(lines_x[[1]])), 77 | c(min(lines_y[[1]]), lines_y[[1]], min(lines_y[[1]])), 78 | col=rgb(37/255,211/255,102/255,0.2), 79 | border=F 80 | ) 81 | 82 | 83 | # close device 84 | dev.off() 85 | } 86 | 87 | main() 88 | warnings() 89 | -------------------------------------------------------------------------------- /evaluation/icde19/second.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | TO_KEEP <- "'(110|460|490)'" 5 | 6 | # draw! 7 | main <- function() { 8 | output_file <- "second.png" 9 | 10 | cluster <- "ls -d processed/* | grep 0~gset~partialmesh~15" 11 | cluster <- paste(cluster, " | grep -E ", TO_KEEP, sep="") 12 | 13 | labels <- c( 14 | "State-based", 15 | "Delta-based", 16 | "This paper" 17 | ) 18 | 19 | # avoid scientific notation 20 | options(scipen=999) 21 | 22 | # open device 23 | png(filename=output_file, width=200, height=410, res=110) 24 | 5 25 | # change outer margins 26 | op <- par( 27 | oma=c(5,2,2,0), # room for the legend 28 | mfrow=c(1,1), # 2x4 matrix 29 | mar=c(1.5,1.5,0.5,0.5) # spacing between plots 30 | ) 31 | 32 | # style stuff 33 | colors <- c( 34 | "snow4", 35 | "springgreen4", 36 | "gray22" 37 | ) 38 | angles <- c(0, 45, 135) 39 | densities <- c(0, 30, 45) 40 | 41 | files <- system(cluster, intern=TRUE) 42 | 43 | # skip if no file 44 | if(length(files) == 0) next 45 | 46 | # keys 47 | key <- "processing" 48 | 49 | # data 50 | title <- "" 51 | lines <- lapply(files, function(f) { json(c(f))[[key]] }) 52 | 53 | # wrto (state-based) 54 | if(length(lines) == length(labels)) { 55 | state_based <- lines[[1]] 56 | lines <- map(lines, function(v) { v / state_based }) 57 | 58 | # plot bars 59 | y_min <- 0 60 | plot_bars(title, lines, y_min, colors, angles, densities, 61 | bar_cex=0.8) 62 | } 63 | 64 | # axis labels 65 | y_axis_label("CPU time ratio wrto State-based") 66 | 67 | par(op) # Leave the last plot 68 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 69 | xpd=NA) # Allow plotting outside the plot region 70 | 71 | # legend 72 | legend( 73 | -.65, # x 74 | -.11, # y 75 | cex=1, 76 | legend=labels, 77 | angle=angles, 78 | density=densities, 79 | fill=colors, 80 | horiz=FALSE, 81 | box.col=NA # remove box 82 | ) 83 | 84 | # close device 85 | dev.off() 86 | } 87 | 88 | main() 89 | warnings() 90 | -------------------------------------------------------------------------------- /evaluation/icde19_revision.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vitorenesduarte/exp/99486aba658c1b5f077275ceca3eef173375d050/evaluation/icde19_revision.tar.gz -------------------------------------------------------------------------------- /evaluation/more_plots/crdt.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | TO_KEEP <- "'(110|220|230|340|350|490)'" 5 | 6 | # draw! 7 | main <- function() { 8 | output_file <- "crdt.png" 9 | 10 | clusters <- c( 11 | "ls -d processed/* | grep 10~gmap~tree", 12 | "ls -d processed/* | grep 30~gmap~tree", 13 | "ls -d processed/* | grep 60~gmap~tree", 14 | "ls -d processed/* | grep 100~gmap~tree", 15 | "ls -d processed/* | grep 10~gmap~partialmesh", 16 | "ls -d processed/* | grep 30~gmap~partialmesh", 17 | "ls -d processed/* | grep 60~gmap~partialmesh", 18 | "ls -d processed/* | grep 100~gmap~partialmesh" 19 | ) 20 | clusters <- map(clusters, function(c) { 21 | paste(c, " | grep -E ", TO_KEEP, sep="") 22 | }) 23 | titles <- c( 24 | "GMap 10% - Tree", 25 | "GMap 30% - Tree", 26 | "GMap 60% - Tree", 27 | "GMap 100% - Tree", 28 | "GMap 10% - Mesh", 29 | "GMap 30% - Mesh", 30 | "GMap 60% - Mesh", 31 | "GMap 100% - Mesh" 32 | ) 33 | labels <- c( 34 | "State-based", 35 | "Scuttlebutt", 36 | "Scuttlebutt-GC", 37 | "Op-based Naive", 38 | "Op-based", 39 | "Delta-based BP+RR" 40 | ) 41 | 42 | # avoid scientific notation 43 | options(scipen=999) 44 | 45 | # open device 46 | png(filename=output_file, width=2600, height=1200, res=240) 47 | 48 | # change outer margins 49 | op <- par( 50 | oma=c(5,3,0,0), # room for the legend 51 | mfrow=c(2,4), # 2x4 matrix 52 | mar=c(2,2,3,1) # spacing between plots 53 | ) 54 | 55 | # style stuff 56 | colors <- c( 57 | "snow4", 58 | "darkgoldenrod", 59 | "steelblue4", 60 | "tomato", 61 | "yellow3", 62 | "gray22" 63 | ) 64 | pch <- c(1,7,8,9,2,6) 65 | 66 | for(i in 1:length(clusters)) { 67 | files <- system(clusters[i], intern=TRUE) 68 | 69 | # skip if no file 70 | if(length(files) == 0) next 71 | 72 | # keys 73 | key_x <- "transmission_crdt_compressed_x" 74 | key_y <- "transmission_crdt_compressed" 75 | 76 | # data 77 | title <- titles[i] 78 | lines_x <- lapply(files, function(f) { json(c(f))[[key_x]] }) 79 | lines_y <- lapply(files, function(f) { json(c(f))[[key_y]] }) 80 | 81 | # plot lines 82 | plot_lines(title, lines_x, lines_y, colors, 83 | pch=pch) 84 | } 85 | 86 | # axis labels 87 | x_axis_label("Time (s)") 88 | y_axis_label("CRDT Transmission") 89 | 90 | par(op) # Leave the last plot 91 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 92 | xpd=NA) # Allow plotting outside the plot region 93 | 94 | # legend 95 | legend( 96 | -.03, # x 97 | -.2, # y 98 | cex=0.92, 99 | legend=labels, 100 | pch=pch, 101 | col=colors, 102 | horiz=TRUE, 103 | box.col=NA # remove box 104 | ) 105 | 106 | # close device 107 | dev.off() 108 | } 109 | 110 | main() 111 | warnings() 112 | -------------------------------------------------------------------------------- /evaluation/more_plots/plot1.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | # draw! 5 | main <- function() { 6 | output_file <- "plot1.png" 7 | 8 | clusters <- c( 9 | "ls -d processed/* | grep gset~tree", 10 | "ls -d processed/* | grep gset~partialmesh", 11 | "ls -d processed/* | grep gcounter~tree", 12 | "ls -d processed/* | grep gcounter~partialmesh" 13 | ) 14 | ## 0 transmission 15 | titles <- c( 16 | "GSet - Tree", 17 | "GSet - Mesh", 18 | "GCounter - Tree", 19 | "GCounter - Mesh" 20 | ) 21 | labels <- c( 22 | "State-based", 23 | "Scuttlebutt", 24 | "Delta-based", 25 | "Delta-based BP", 26 | "Delta-based RR", 27 | "Delta-based BP+RR" 28 | ) 29 | 30 | # avoid scientific notation 31 | options(scipen=999) 32 | 33 | # open device 34 | png(filename=output_file, width=2600, height=650, res=240) 35 | 36 | # change outer margins 37 | op <- par( 38 | oma=c(5,3,0,0), # room for the legend 39 | mfrow=c(1,4), # 2x4 matrix 40 | mar=c(2,2,3,1) # spacing between plots 41 | ) 42 | 43 | # style stuff 44 | colors <- c( 45 | "snow4", 46 | "steelblue4", 47 | "springgreen4", 48 | "darkorange1", 49 | "red4", 50 | "gray22" 51 | ) 52 | 53 | for(i in 1:length(clusters)) { 54 | files <- system(clusters[i], intern=TRUE) 55 | 56 | # skip if no file 57 | if(length(files) == 0) next 58 | 59 | # keys 60 | key_x <- "transmission_compressed_x" 61 | key_y <- "transmission_compressed" 62 | 63 | # data 64 | title <- titles[i] 65 | lines_x <- lapply(files, function(f) { json(c(f))[[key_x]] }) 66 | lines_y <- lapply(files, function(f) { json(c(f))[[key_y]] }) 67 | 68 | # plot lines 69 | plot_lines(title, lines_x, lines_y, colors) 70 | } 71 | 72 | # axis labels 73 | x_axis_label("Time (s)") 74 | y_axis_label("Transmission") 75 | 76 | par(op) # Leave the last plot 77 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 78 | xpd=NA) # Allow plotting outside the plot region 79 | 80 | # legend 81 | legend( 82 | -.03, # x 83 | -.75, # y 84 | cex=0.92, 85 | legend=labels, 86 | pch=c(1:10), 87 | col=colors, 88 | horiz=TRUE, 89 | box.col=NA # remove box 90 | ) 91 | 92 | # close device 93 | dev.off() 94 | } 95 | 96 | main() 97 | warnings() 98 | -------------------------------------------------------------------------------- /evaluation/more_plots/plot4.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | # draw! 5 | main <- function() { 6 | output_file <- "plot4.png" 7 | 8 | clusters <- c( 9 | "ls -d processed/* | grep -v False~True | grep -v True~False | grep 10~gmap~partialmesh", 10 | "ls -d processed/* | grep -v False~True | grep -v True~False | grep 100~gmap~partialmesh" 11 | ) 12 | titles <- c( 13 | "GMap 10%", 14 | "GMap 100%" 15 | ) 16 | labels <- c( 17 | "State-based", 18 | "Scuttlebutt", 19 | "Delta-based", 20 | "Delta-based BP+RR" 21 | ) 22 | 23 | # avoid scientific notation 24 | options(scipen=999) 25 | 26 | # open device 27 | png(filename=output_file, width=2600, height=650, res=240) 28 | 29 | # change outer margins 30 | op <- par( 31 | oma=c(5,3,0,0), # room for the legend 32 | mfrow=c(1,4), # 2x4 matrix 33 | mar=c(2,2,3,1) # spacing between plots 34 | ) 35 | 36 | # style stuff 37 | colors <- c( 38 | "snow4", 39 | "steelblue4", 40 | "red4", 41 | "gray22" 42 | ) 43 | 44 | for(i in 1:length(clusters)) { 45 | files <- system(clusters[i], intern=TRUE) 46 | 47 | # skip if no file 48 | if(length(files) == 0) next 49 | 50 | # keys 51 | key_a <- "latency_local" 52 | key_b <- "latency_remote" 53 | 54 | # data 55 | title_a <- paste(titles[i], "Sender", sep=" - ") 56 | title_b <- paste(titles[i], "Receiver", sep=" - ") 57 | lines_a <- lapply(files, function(f) { json(c(f))[[key_a]] }) 58 | lines_b <- lapply(files, function(f) { json(c(f))[[key_b]] }) 59 | 60 | # plot cdf 61 | y_max <- .94 62 | y_step <- 0.01 63 | plot_cdf(title_a, lines_a, colors, y_max, y_step) 64 | plot_cdf(title_b, lines_b, colors, y_max, y_step) 65 | } 66 | 67 | # axis labels 68 | x_axis_label("Processing (ms)") 69 | y_axis_label("CDF") 70 | 71 | par(op) # Leave the last plot 72 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 73 | xpd=NA) # Allow plotting outside the plot region 74 | 75 | # legend 76 | legend( 77 | "bottom", 78 | inset=-1.25, 79 | # 0, # x 80 | # -1, # y 81 | cex=0.92, 82 | legend=labels, 83 | col=colors, 84 | lty=c(1:4), 85 | lwd=c(1:4), 86 | horiz=TRUE, 87 | box.col=NA # remove box 88 | ) 89 | 90 | # close device 91 | dev.off() 92 | } 93 | 94 | main() 95 | warnings() 96 | -------------------------------------------------------------------------------- /evaluation/more_plots/plot5.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | # draw! 5 | main <- function() { 6 | output_file <- "plot5.png" 7 | 8 | clusters <- c( 9 | "ls -d processed/* | grep -v False~True | grep -v True~False | grep 10~gmap~partialmesh", 10 | "ls -d processed/* | grep -v False~True | grep -v True~False | grep 100~gmap~partialmesh" 11 | ) 12 | titles <- c( 13 | "GMap 10%", 14 | "GMap 100%" 15 | ) 16 | labels <- c( 17 | "State-based", 18 | "Scuttlebutt", 19 | "Delta-based", 20 | "Delta-based BP+RR" 21 | ) 22 | 23 | # avoid scientific notation 24 | options(scipen=999) 25 | 26 | # open device 27 | png(filename=output_file, width=2600, height=650, res=240) 28 | 29 | # change outer margins 30 | op <- par( 31 | oma=c(5,3,0,0), # room for the legend 32 | mfrow=c(1,4), # 2x4 matrix 33 | mar=c(2,2,3,1) # spacing between plots 34 | ) 35 | 36 | # style stuff 37 | colors <- c( 38 | "snow4", 39 | "steelblue4", 40 | "red4", 41 | "gray22" 42 | ) 43 | angles <- c(0, 45, 135, 45, 135, 45) 44 | densities <- c(0, 15, 15, 30, 30, 45) 45 | 46 | for(i in 1:length(clusters)) { 47 | files <- system(clusters[i], intern=TRUE) 48 | 49 | # skip if no file 50 | if(length(files) == 0) next 51 | 52 | # keys 53 | key_a <- "latency_local" 54 | key_b <- "latency_remote" 55 | 56 | # data 57 | title_a <- paste(titles[i], "Sender", sep=" - ") 58 | title_b <- paste(titles[i], "Receiver", sep=" - ") 59 | lines_a <- lapply(files, function(f) { json(c(f))[[key_a]] }) 60 | lines_b <- lapply(files, function(f) { json(c(f))[[key_b]] }) 61 | 62 | # plot cdf 63 | plot_box(title_a, lines_a, colors) 64 | plot_box(title_b, lines_b, colors) 65 | } 66 | 67 | # axis labels 68 | x_axis_label("Processing (ms)") 69 | y_axis_label("CDF") 70 | 71 | par(op) # Leave the last plot 72 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 73 | xpd=NA) # Allow plotting outside the plot region 74 | 75 | # legend 76 | legend( 77 | "bottom", 78 | inset=-1.25, 79 | # 0, # x 80 | # -1, # y 81 | cex=0.92, 82 | legend=labels, 83 | col=colors, 84 | lty=c(1:3), 85 | lwd=c(1:3), 86 | horiz=TRUE, 87 | box.col=NA # remove box 88 | ) 89 | 90 | # close device 91 | dev.off() 92 | } 93 | 94 | main() 95 | warnings() 96 | -------------------------------------------------------------------------------- /evaluation/more_plots/processing.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | # draw! 5 | main <- function() { 6 | output_file <- "processing.png" 7 | 8 | clusters <- c( 9 | "ls -d processed/* | grep 0~gset~partialmesh~15", 10 | "ls -d processed/* | grep 0~gcounter~partialmesh~15", 11 | "ls -d processed/* | grep 10~gmap~partialmesh~15", 12 | "ls -d processed/* | grep 100~gmap~partialmesh~15" 13 | ) 14 | titles <- c( 15 | "GSet - Mesh", 16 | "GCounter - Mesh", 17 | "GMap 10% - Mesh", 18 | "GMap 100% - Mesh" 19 | ) 20 | labels <- c( 21 | "State-based", 22 | "Scuttlebutt", 23 | "Scuttlebutt+", 24 | "Delta-based", 25 | "Delta-based BP", 26 | "Delta-based RR", 27 | "Delta-based BP+RR" 28 | ) 29 | 30 | # avoid scientific notation 31 | options(scipen=999) 32 | 33 | # open device 34 | png(filename=output_file, width=800, height=650, res=130) 35 | 36 | # change outer margins 37 | op <- par( 38 | oma=c(3.5,2,0,0), # room for the legend 39 | mfrow=c(2,2), # 2x4 matrix 40 | mar=c(2.5,2,2,1) # spacing between plots 41 | ) 42 | 43 | # style stuff 44 | colors <- c( 45 | "snow4", 46 | "steelblue4", 47 | "springgreen4", 48 | "darkorange1", 49 | "red4", 50 | "gray22" 51 | ) 52 | angles <- c(0, 45, 135, 45, 135, 45) 53 | densities <- c(0, 15, 15, 30, 30, 45) 54 | 55 | for(i in 1:length(clusters)) { 56 | files <- system(clusters[i], intern=TRUE) 57 | 58 | # skip if no file 59 | if(length(files) == 0) next 60 | 61 | # keys 62 | key <- "processing" 63 | 64 | # data 65 | title <- titles[i] 66 | lines <- lapply(files, function(f) { json(c(f))[[key]] }) 67 | 68 | # wrto (r) 69 | if(length(lines) == length(labels)) { 70 | rr_index <- length(labels) 71 | rr <- lines[[rr_index]] 72 | lines <- map(lines, function(v) { v / rr }) 73 | 74 | # plot bars 75 | y_min <- 0 76 | plot_bars(title, lines, y_min, colors, angles, densities) 77 | } 78 | } 79 | 80 | # axis labels 81 | y_axis_label("CPU time ratio wrto BP+RR") 82 | 83 | par(op) # Leave the last plot 84 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 85 | xpd=NA) # Allow plotting outside the plot region 86 | 87 | # legend 88 | legend( 89 | 0.1, # x 90 | -.02, # y 91 | cex=1, 92 | legend=labels, 93 | angle=angles, 94 | density=densities, 95 | fill=colors, 96 | ncol=2, 97 | box.col=NA # remove box 98 | ) 99 | 100 | # close device 101 | dev.off() 102 | } 103 | 104 | main() 105 | warnings() 106 | -------------------------------------------------------------------------------- /evaluation/more_plots/retwis_latency.R: -------------------------------------------------------------------------------- 1 | source("util.R") 2 | source("generic.R") 3 | 4 | get_lines <- function(cluster, key, file_index) { 5 | files <- system(cluster, intern=TRUE) 6 | 7 | # skip if no file 8 | if(length(files) == 2) { 9 | map(json(c(files[file_index]))[["latency"]][[key]], 10 | function(micro) { micro / 1000 }) 11 | } 12 | else { c() } 13 | } 14 | 15 | get_all_lines <- function(cluster, key) { 16 | lines_y <- list() 17 | lines_y[[1]] <- get_lines(cluster, key, 1) 18 | lines_y[[2]] <- get_lines(cluster, key, 2) 19 | lines_y 20 | } 21 | 22 | # draw! 23 | main <- function() { 24 | output_file <- "retwis_latency.png" 25 | 26 | cluster <- "ls -d processed/* | grep ~100~0~retwis" 27 | labels <- c( 28 | "Delta-based", 29 | "Delta-based BP+RR" 30 | ) 31 | 32 | # avoid scientific notation 33 | options(scipen=999) 34 | 35 | # open device 36 | png(filename=output_file, width=850, height=320, res=130) 37 | 38 | # change outer margins 39 | op <- par( 40 | oma=c(5,3,0,0), # room for the legend 41 | mfrow=c(1,3), # 2x4 matrix 42 | mar=c(2,2,3,1) # spacing between plots 43 | ) 44 | 45 | # style stuff 46 | colors <- c( 47 | "springgreen4", 48 | "gray22" 49 | ) 50 | 51 | # follow plot 52 | key <- "follow" 53 | lines <- get_all_lines(cluster, key) 54 | title <- "Follow" 55 | 56 | # plot cdf 57 | y_max <- .94 58 | y_step <- 0.01 59 | plot_cdf(title, lines, colors, y_max, y_step) 60 | 61 | # post plot 62 | key <- "post" 63 | lines <- get_all_lines(cluster, key) 64 | title <- "Post" 65 | 66 | # plot cdf 67 | y_max <- .94 68 | y_step <- 0.01 69 | plot_cdf(title, lines, colors, y_max, y_step) 70 | 71 | # update plot 72 | key <- "timeline" 73 | lines <- get_all_lines(cluster, key) 74 | title <- "Timeline" 75 | 76 | # plot cdf 77 | y_max <- .94 78 | y_step <- 0.01 79 | plot_cdf(title, lines, colors, y_max, y_step) 80 | 81 | # axis labels 82 | x_axis_label("Latency (ms)") 83 | y_axis_label("CDF") 84 | 85 | par(op) # Leave the last plot 86 | op <- par(usr=c(0,1,0,1), # Reset the coordinates 87 | xpd=NA) # Allow plotting outside the plot region 88 | 89 | # legend 90 | legend( 91 | "bottom", 92 | inset=-1.6, 93 | # 0, # x 94 | # -1, # y 95 | cex=0.92, 96 | legend=labels, 97 | col=colors, 98 | lty=c(1:4), 99 | lwd=c(1:4), 100 | horiz=TRUE, 101 | box.col=NA # remove box 102 | ) 103 | 104 | # close device 105 | dev.off() 106 | } 107 | 108 | main() 109 | warnings() 110 | -------------------------------------------------------------------------------- /evaluation/preprocess.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os, os.path, json 4 | import shutil 5 | 6 | METRIC_DIR = "metrics" 7 | PROCESSED_DIR = "processed" 8 | CONFIG_FILE = "rsg.json" 9 | TS="ts" 10 | SIZE="size" 11 | TERM_SIZE="term_size" 12 | COMPRESS=12 # every x 13 | #MAX_TIME=60 14 | 15 | def error(message): 16 | """ 17 | Display error message and exit. 18 | """ 19 | print(message) 20 | print("Exiting...") 21 | exit() 22 | 23 | def ls(dir): 24 | """ 25 | List a directory, returning full path. 26 | """ 27 | return ls_grep(dir, lambda x: True) 28 | 29 | def ls_grep(dir, filter): 30 | """ 31 | List a directory, returning full path. 32 | Only files that match the filter are returned. 33 | """ 34 | return [os.path.join(dir, f) for f in os.listdir(dir) if filter(f) and f[0] != '.'] 35 | 36 | def get_metric_files(): 37 | """ 38 | Return a dictionary from run (unique timestamp) 39 | to list of metric files. 40 | """ 41 | d = {} 42 | dirs = ls(METRIC_DIR) 43 | 44 | for dir in dirs: 45 | # only list files that are not the config file 46 | files = ls_grep(dir, lambda x: x != CONFIG_FILE) 47 | d[dir] = files 48 | 49 | return d 50 | 51 | def read_json(f): 52 | """ 53 | Read a json file. 54 | """ 55 | r = {} 56 | with open(f) as fd: 57 | r = json.load(fd) 58 | 59 | return r 60 | 61 | def key(config): 62 | """ 63 | Create a key from a config file. 64 | """ 65 | 66 | # get start_time from config 67 | start_time = config["start_time"] 68 | 69 | keys = [ 70 | "exp_retwis_zipf", 71 | "exp_gmap_simulation_key_percentage", 72 | "exp_simulation", 73 | "exp_overlay", 74 | "exp_node_number", 75 | "ldb_mode", 76 | "ldb_redundant_dgroups", 77 | "ldb_dgroup_back_propagation", 78 | "ldb_scuttlebutt_gc", 79 | "ldb_op_ii" 80 | ] 81 | 82 | l = [] 83 | for k in keys: 84 | v = "undefined" 85 | if k in config: 86 | v = str(config[k]) 87 | l.append(v) 88 | 89 | k = "~".join(l) 90 | return (start_time, k) 91 | 92 | def group_by_config(d): 93 | """ 94 | Given metric files, group them by config file. 95 | """ 96 | r = {} 97 | 98 | for dir in d: 99 | config_path = os.path.join(dir, CONFIG_FILE) 100 | (start_time, k) = key( 101 | read_json(config_path) 102 | ) 103 | 104 | # create empty dictionary if key not already in dictionary 105 | if k in r: 106 | error("key " + k + " already found!") 107 | 108 | r[k] = {} 109 | 110 | for file in d[dir]: 111 | # read metric file 112 | json = read_json(file) 113 | 114 | # for all time-series types (all but processing) 115 | # for all metrics remove start_time 116 | for type in json: 117 | if type in ["transmission", "memory"]: 118 | # init type 119 | if not type in r[k]: 120 | r[k][type] = [] 121 | 122 | # store it 123 | for m in json[type]: 124 | m[TS] -= start_time 125 | r[k][type].append(json[type]) 126 | 127 | elif type == "processing": 128 | # init type 129 | if not type in r[k]: 130 | r[k][type] = 0 131 | 132 | # store it 133 | r[k][type] += json[type] 134 | 135 | elif type == "latency": 136 | # init type 137 | if not type in r[k]: 138 | r[k][type] = {} 139 | 140 | for latency_type in json[type]: 141 | # init latency type 142 | if not latency_type in r[k][type]: 143 | r[k][type][latency_type] = [] 144 | 145 | # store it 146 | r[k][type][latency_type].extend(json[type][latency_type]) 147 | 148 | return r 149 | 150 | def get_higher_ts(runs): 151 | """ 152 | Find the higher timestamp of run. 153 | """ 154 | higher = 0 155 | for run in runs: 156 | for metric in run: 157 | higher = max(higher, metric[TS]) 158 | 159 | return higher 160 | 161 | def bottom_size(type): 162 | """ 163 | Return bottom size. 164 | """ 165 | 166 | if type in ["transmission", "memory"]: 167 | return [0, 0, 0] 168 | 169 | error("type not found.") 170 | 171 | def add(type, sizeA, sizeB): 172 | """ 173 | Sum two sizes 174 | """ 175 | 176 | if type in ["transmission", "memory"]: 177 | return [sizeA[0] + sizeB[0], 178 | sizeA[1] + sizeB[1], 179 | sizeA[2] + sizeB[2]] 180 | 181 | error("type not found") 182 | 183 | def default(type, previous): 184 | """ 185 | Default value given a type: 186 | - if transmission, 0 187 | - if memory, previous value 188 | """ 189 | one = ["transmission"] 190 | two = ["memory"] 191 | 192 | if type in one: 193 | return [0, 0, 0] 194 | if type in two: 195 | return previous 196 | 197 | error("type not found") 198 | 199 | def ignore_pre_big_bang(run): 200 | """ 201 | Remove metrics before timestamp 0. 202 | """ 203 | 204 | return [m for m in run if m[TS] >= 0] 205 | #return [m for m in run if m[TS] >= 0 and m[TS] < MAX_TIME] 206 | 207 | def b_to_mb(bytes): 208 | """ 209 | Convert from bytes to gigabytes. 210 | """ 211 | return bytes / 1000 212 | 213 | def assume_unknown_values(d): 214 | """ 215 | Assume values for timestamps not reported for transmission graphs. 216 | """ 217 | 218 | for key in d: 219 | # get all time-series types 220 | types = ["transmission", "memory"] 221 | 222 | for type in types: 223 | runs = [] 224 | # find the higher timestamp for all runs 225 | higher_ts = get_higher_ts(d[key][type]) 226 | 227 | for run in d[key][type]: 228 | # remove timestamps before 0 229 | run = ignore_pre_big_bang(run) 230 | 231 | # get bottom size 232 | bs = bottom_size(type) 233 | 234 | # since we can have several metrics 235 | # for the same timestamp, 236 | # aggregate metrics per timestamp 237 | ts_to_size = {} 238 | 239 | for metric in run: 240 | ts = metric[TS] 241 | size = metric[SIZE] 242 | term_size = b_to_mb(metric[TERM_SIZE]) 243 | size.append(term_size) 244 | 245 | # if ts not in map 246 | # create an entry 247 | if not ts in ts_to_size: 248 | ts_to_size[ts] = bs 249 | 250 | ts_to_size[ts] = add(type, ts_to_size[ts], size) 251 | 252 | previous = bs 253 | 254 | # create bottom values for unknown timestamps 255 | for ts in range(0, higher_ts): 256 | if not ts in ts_to_size: 257 | ts_to_size[ts] = default(type, previous) 258 | 259 | previous = ts_to_size[ts] 260 | 261 | # store the ts_to_size map 262 | runs.append(ts_to_size) 263 | 264 | d[key][type] = runs 265 | 266 | return d 267 | 268 | def sum_lists(ls): 269 | """ 270 | Sum two lists. 271 | """ 272 | return [sum(x) for x in zip(*ls)] 273 | 274 | def divide_lists(ls, by): 275 | """ 276 | Divide lists by 'by'. 277 | """ 278 | return [x / float(by) for x in ls] 279 | 280 | def average(d): 281 | """ 282 | Average runs. 283 | """ 284 | for key in d: 285 | # get all time-series types 286 | types = ["transmission", "memory"] 287 | 288 | for type in types: 289 | # number of runs 290 | runs_number = len(d[key][type]) 291 | # number of metrics (all but processing) 292 | metrics_number = len(d[key][type][0]) - 1 293 | # get bottom size 294 | bs = bottom_size(type) 295 | # list where we'll store the sum of the sizes 296 | sum = [bs for i in range(0, metrics_number)] 297 | # sum all runs 298 | for run in d[key][type]: 299 | for i in range(0, metrics_number): 300 | ls = [ 301 | sum[i], 302 | run[i] 303 | ] 304 | sum[i] = sum_lists(ls) 305 | # avg of sum 306 | avg = [divide_lists(ls, runs_number) for ls in sum] 307 | # store avg 308 | d[key][type] = avg 309 | return d 310 | 311 | def to_ms(microseconds): 312 | """ 313 | Convertes microseconds to milliseconds. 314 | """ 315 | return microseconds / float(1000) 316 | 317 | def aggregate(d): 318 | """ 319 | Aggregate types of the same run. 320 | """ 321 | 322 | def get_compress_index(key): 323 | m = { 324 | 110: 1, 325 | 220: 5, 326 | 230: 8, 327 | 340: 2, 328 | 350: 4, 329 | 460: 6, 330 | 470: 3, 331 | 480: 0, 332 | 490: 7 333 | } 334 | 335 | score = get_score(key) 336 | if score in m: 337 | return (COMPRESS * m[score]) / len(m) 338 | else: 339 | error("score not found: " + score) 340 | 341 | r = {} 342 | 343 | for key in d: 344 | # create key in dictionary 345 | r[key] = {} 346 | r[key]["transmission_metadata"] = [] 347 | r[key]["transmission_crdt"] = [] 348 | r[key]["transmission"] = [] 349 | r[key]["transmission_term_size"] = [] 350 | r[key]["memory_algorithm"] = [] 351 | r[key]["memory_crdt"] = [] 352 | r[key]["memory_term_size"] = [] 353 | r[key]["processing"] = d[key]["processing"] 354 | r[key]["latency"] = d[key]["latency"] 355 | 356 | # group transmission 357 | for [M, C, T] in d[key]["transmission"]: 358 | r[key]["transmission_metadata"].append(M) 359 | r[key]["transmission_crdt"].append(C) 360 | r[key]["transmission"].append(M + C) 361 | r[key]["transmission_term_size"].append(T) 362 | 363 | # compress transmissions (total and crdt only) 364 | # e.g. sum every 10 values 365 | # and average them 366 | for to_compress_key in ["transmission", "transmission_crdt"]: 367 | xs = [] 368 | ys = [] 369 | current_sum = 0 370 | run_len = len(r[key][to_compress_key]) 371 | index = get_compress_index(key) 372 | 373 | for i in range(run_len): 374 | # update sum 375 | current_sum += r[key][to_compress_key][i] 376 | 377 | if(i % COMPRESS == index): 378 | ys.append(current_sum) 379 | # reset sum 380 | current_sum = 0 381 | 382 | for i in range(len(ys)): 383 | xs.append((i * COMPRESS) + index) 384 | 385 | ys = divide_lists(ys, COMPRESS) 386 | r[key][to_compress_key + "_compressed"] = ys 387 | r[key][to_compress_key + "_compressed_x"] = xs 388 | 389 | # aggregate memory 390 | for [A, C, T] in d[key]["memory"]: 391 | r[key]["memory_algorithm"].append(A) 392 | r[key]["memory_crdt"].append(C) 393 | r[key]["memory_term_size"].append(T) 394 | 395 | return r 396 | 397 | def save_file(path, content): 398 | """ 399 | Save content in path. 400 | """ 401 | 402 | dir = os.path.dirname(path) 403 | 404 | # ensure directory exist 405 | if not os.path.exists(dir): 406 | os.makedirs(dir) 407 | 408 | # write content 409 | with open(path, "w") as fd: 410 | fd.write(content) 411 | 412 | def get_score(type): 413 | """ 414 | Returns the order of this type when drawing. 415 | """ 416 | score = 0 417 | 418 | parts = type.split("~") 419 | mode = parts[5] 420 | rest = "_".join(parts[6:]) 421 | 422 | if mode == "state_based": 423 | score += 100 424 | elif mode == "scuttlebutt": 425 | score += 200 426 | elif mode == "op_based": 427 | score += 300 428 | elif mode == "delta_based": 429 | score += 400 430 | else: 431 | error("Mode not found: " + mode) 432 | 433 | if rest == "undefined_undefined_undefined_undefined": 434 | score += 10 435 | elif rest == "undefined_undefined_False_undefined": 436 | score += 20 437 | elif rest == "undefined_undefined_True_undefined": 438 | score += 30 439 | elif rest == "undefined_undefined_undefined_False": 440 | score += 40 441 | elif rest == "undefined_undefined_undefined_True": 442 | score += 50 443 | elif rest == "False_False_undefined_undefined": 444 | score += 60 445 | elif rest == "False_True_undefined_undefined": 446 | score += 70 447 | elif rest == "True_False_undefined_undefined": 448 | score += 80 449 | elif rest == "True_True_undefined_undefined": 450 | score += 90 451 | else: 452 | error("Remaining configuration not found: " + rest) 453 | 454 | return score 455 | 456 | def dump(d): 457 | """ 458 | Save average to files. 459 | """ 460 | 461 | # clear folder 462 | shutil.rmtree(PROCESSED_DIR, ignore_errors=True) 463 | 464 | for type in d: 465 | score = get_score(type) 466 | path = os.path.join(*[PROCESSED_DIR, str(score) + "~" + type]) 467 | content = json.dumps(d[type]) 468 | save_file(path, content) 469 | 470 | def main(): 471 | """ 472 | Main. 473 | """ 474 | d = get_metric_files() 475 | d = group_by_config(d) 476 | d = assume_unknown_values(d) 477 | d = average(d) 478 | d = aggregate(d) 479 | dump(d) 480 | 481 | main() 482 | -------------------------------------------------------------------------------- /evaluation/util.R: -------------------------------------------------------------------------------- 1 | # Given a package name, 2 | # install it (if not already installed) and load it. 3 | load <- function(package) { 4 | mirror <- "http://cran.us.r-project.org" 5 | 6 | if(!require(package, character.only=TRUE)) { 7 | install.packages(package, repos=mirror, dependencies=TRUE) 8 | require(package, character.only=TRUE) 9 | } 10 | } 11 | 12 | # Load a list of dependencies. 13 | load_dependencies <- function(packages) { 14 | Map(load, packages) 15 | } 16 | 17 | # given the vector of subpaths, 18 | # return the json file 19 | json <- function(v) { 20 | load("jsonlite") 21 | file_path <- paste(v, collapse="/") 22 | fromJSON(file_path) 23 | } 24 | 25 | # map a list 26 | map <- function(l, f) { 27 | unlist(lapply(l, f)) 28 | } 29 | -------------------------------------------------------------------------------- /include/exp.hrl: -------------------------------------------------------------------------------- 1 | -define(APP, exp). 2 | -type error() :: {error, atom()}. 3 | 4 | %% peer service 5 | -type ldb_node_id() :: node(). 6 | -type node_ip() :: inet:ip_address(). 7 | -type node_port() :: non_neg_integer(). 8 | -type node_spec() :: {ldb_node_id(), node_ip(), node_port()}. 9 | -type handler() :: term(). %% module 10 | -type message() :: term(). 11 | -type timestamp() :: non_neg_integer(). 12 | 13 | %% defaults 14 | -define(DEFAULT_OVERLAY, hyparview). 15 | -define(DEFAULT_MODE, state_based). 16 | 17 | %% logging 18 | -ifdef(debug). 19 | -define(DEBUG(M), lager:info(M)). 20 | -define(DEBUG(M, A), lager:info(M, A)). 21 | -else. 22 | -define(DEBUG(_M), ok). 23 | -define(DEBUG(_M, _A), ok). 24 | -endif. 25 | 26 | %% barrier 27 | -define(PORT, 6866). 28 | -define(BARRIER_PORT, 6867). 29 | -define(REDIS_PORT, 6379). 30 | -define(TCP_OPTIONS, [binary, {active, true}, {packet, 4}, {keepalive, true}]). 31 | 32 | %% web config 33 | -define(WEB_IP, "0.0.0.0"). 34 | -define(WEB_PORT, 8080). 35 | -define(WEB_CONFIG, [{ip, ?WEB_IP}, 36 | {port, ?WEB_PORT}]). 37 | 38 | %% logs 39 | -type key() :: list(). 40 | -type value() :: binary(). 41 | -------------------------------------------------------------------------------- /priv/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vitorenesduarte/exp/99486aba658c1b5f077275ceca3eef173375d050/priv/.gitkeep -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | {deps, [ 2 | acceptor_pool, 3 | eredis, 4 | jsx, 5 | lager, 6 | ldb, 7 | mochiweb, 8 | types, 9 | erlang_term 10 | ]}. 11 | 12 | {dialyzer_base_plt_apps, [kernel, stdlib, erts, sasl, eunit, syntax_tools, compiler, crypto]}. 13 | {xref_checks, [undefined_function_calls]}. 14 | {erl_opts, [debug_info, 15 | warnings_as_errors, 16 | {platform_define, "^[0-9]+", namespaced_types}, 17 | {parse_transform, lager_transform}]}. 18 | 19 | {eunit_opts, [verbose, {report,{eunit_surefire,[{dir,"."}]}}]}. 20 | {edoc_opts, [{preprocess, true}]}. 21 | 22 | %% coveralls 23 | {plugins, [coveralls]}. 24 | {cover_enabled, true}. 25 | {cover_export_enabled, true}. 26 | {coveralls_coverdata, ["_build/test/cover/eunit.coverdata", 27 | "_build/test/cover/ct.coverdata"]}. 28 | {coveralls_service_name, "travis-ci"}. 29 | 30 | {profiles, [ 31 | {test, [ 32 | {erl_opts, [{d, debug}]} 33 | ]}, 34 | {lint, [ 35 | {plugins, [rebar3_lint]} 36 | ]} 37 | ]}. 38 | 39 | {elvis, 40 | [#{dirs => ["src"], 41 | filter => "*.erl", 42 | rules => [ 43 | %% {elvis_style, line_length, 44 | %% #{ignore => [], 45 | %% limit => 80, 46 | %% skip_comments => false}}, 47 | {elvis_style, no_tabs}, 48 | {elvis_style, no_trailing_whitespace}, 49 | {elvis_style, macro_names, #{ignore => []}}, 50 | %% {elvis_style, macro_module_names}, 51 | {elvis_style, operator_spaces, #{rules => [{right, ","}, 52 | {right, "++"}, 53 | {left, "++"}]}}, 54 | %% {elvis_style, nesting_level, #{level => 3}}, 55 | {elvis_style, god_modules, 56 | #{limit => 25, 57 | ignore => []}}, 58 | {elvis_style, no_if_expression}, 59 | %% {elvis_style, invalid_dynamic_call, #{ignore => []}}, 60 | {elvis_style, no_behavior_info}, 61 | { 62 | elvis_style, 63 | module_naming_convention, 64 | #{regex => "^[a-z]([a-z0-9]*_?)*(_SUITE)?$", 65 | ignore => []} 66 | }, 67 | { 68 | elvis_style, 69 | function_naming_convention, 70 | #{regex => "^([a-z][a-z0-9]*_?)*$"} 71 | }, 72 | {elvis_style, state_record_and_type}, 73 | {elvis_style, no_spec_with_records} 74 | %% {elvis_style, dont_repeat_yourself, #{min_complexity => 10}} 75 | %% {elvis_style, no_debug_call, #{ignore => []}} 76 | ] 77 | }, 78 | #{dirs => ["."], 79 | filter => "Makefile", 80 | rules => [{elvis_project, no_deps_master_erlang_mk, #{ignore => []}}, 81 | {elvis_project, protocol_for_deps_erlang_mk, #{ignore => []}}] 82 | }, 83 | #{dirs => ["."], 84 | filter => "rebar.config", 85 | rules => [{elvis_project, no_deps_master_rebar, #{ignore => []}}, 86 | {elvis_project, protocol_for_deps_rebar, #{ignore => []}}] 87 | } 88 | ] 89 | }. 90 | 91 | {relx, [{release, {exp, "0.0.1"}, [exp]}, 92 | {extended_start_script, true}, 93 | {dev_mode, false}, 94 | {include_erts, false}, 95 | {include_src, false}, 96 | {overlay, [{copy, "bin/env", "bin"}, 97 | {copy, "config/vm.args", "releases/\{\{release_version\}\}/vm.args"}]}]}. 98 | -------------------------------------------------------------------------------- /rebar.config.script: -------------------------------------------------------------------------------- 1 | case os:getenv("TRAVIS") of 2 | "true" -> 3 | JobId = os:getenv("TRAVIS_JOB_ID"), 4 | lists:keystore(coveralls_service_job_id, 5 | 1, 6 | CONFIG, 7 | {coveralls_service_job_id, JobId}); 8 | _ -> 9 | CONFIG 10 | end. 11 | -------------------------------------------------------------------------------- /rebar.lock: -------------------------------------------------------------------------------- 1 | {"1.1.0", 2 | [{<<"acceptor_pool">>,{pkg,<<"acceptor_pool">>,<<"1.0.0-rc.0">>},0}, 3 | {<<"eredis">>,{pkg,<<"eredis">>,<<"1.0.8">>},0}, 4 | {<<"erlang_term">>,{pkg,<<"erlang_term">>,<<"1.7.3">>},1}, 5 | {<<"goldrush">>,{pkg,<<"goldrush">>,<<"0.1.9">>},1}, 6 | {<<"jsx">>,{pkg,<<"jsx">>,<<"2.8.0">>},0}, 7 | {<<"lager">>,{pkg,<<"lager">>,<<"3.2.4">>},0}, 8 | {<<"ldb">>, 9 | {git,"https://github.com/vitorenesduarte/ldb", 10 | {ref,"4e6167b997ff6eedb934f767cda26c2860fca9b1"}}, 11 | 0}, 12 | {<<"mochiweb">>,{pkg,<<"mochiweb">>,<<"2.15.0">>},0}, 13 | {<<"ranch">>,{pkg,<<"ranch">>,<<"1.5.0">>},1}, 14 | {<<"types">>, 15 | {git,"https://github.com/vitorenesduarte/types", 16 | {ref,"60359c30e0c0acd833c5c3cfe724f988dae099e7"}}, 17 | 0}]}. 18 | [ 19 | {pkg_hash,[ 20 | {<<"acceptor_pool">>, <<"679D741DF87FC13599B1AEF2DF8F78F1F880449A6BEFAB7C44FB6FAE0E92A2DE">>}, 21 | {<<"eredis">>, <<"AB4FDA1C4BA7FBE6C19C26C249DC13DA916D762502C4B4FA2DF401A8D51C5364">>}, 22 | {<<"erlang_term">>, <<"9A77FAFE03DA5726A2013F12246CE2D2068B5DEE6C03752EC80A6575AFD3AC4B">>}, 23 | {<<"goldrush">>, <<"F06E5D5F1277DA5C413E84D5A2924174182FB108DABB39D5EC548B27424CD106">>}, 24 | {<<"jsx">>, <<"749BEC6D205C694AE1786D62CEA6CC45A390437E24835FD16D12D74F07097727">>}, 25 | {<<"lager">>, <<"A6DEB74DAE7927F46BD13255268308EF03EB206EC784A94EAF7C1C0F3B811615">>}, 26 | {<<"mochiweb">>, <<"E1DAAC474DF07651E5D17CC1E642C4069C7850DC4508D3DB7263A0651330AACC">>}, 27 | {<<"ranch">>, <<"F04166F456790FEE2AC1AA05A02745CC75783C2BFB26D39FAF6AEFC9A3D3A58A">>}]} 28 | ]. 29 | -------------------------------------------------------------------------------- /rebar3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vitorenesduarte/exp/99486aba658c1b5f077275ceca3eef173375d050/rebar3 -------------------------------------------------------------------------------- /src/exp.app.src: -------------------------------------------------------------------------------- 1 | {application, exp, 2 | [ 3 | {description, "Running CRDT experiments"}, 4 | {vsn, "0.0.1"}, 5 | {registered, []}, 6 | {included_applications,[]}, 7 | {applications, [ 8 | kernel, 9 | stdlib, 10 | crypto, 11 | lager, 12 | ssl, 13 | ldb, 14 | jsx, 15 | eredis, 16 | mochiweb, 17 | erlang_term 18 | ]}, 19 | {mod, {exp_app, []}}, 20 | {modules, []} 21 | ]}. 22 | -------------------------------------------------------------------------------- /src/exp_app.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_app). 21 | -author("Vitor Enes 30 | case exp_sup:start_link() of 31 | {ok, Pid} -> 32 | {ok, Pid}; 33 | Other -> 34 | {error, Other} 35 | end. 36 | 37 | %% @doc Stop the application. 38 | stop(_State) -> 39 | ok. 40 | -------------------------------------------------------------------------------- /src/exp_barrier_peer_service.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_barrier_peer_service). 21 | -author("Vitor Enes {ok, pid()} | ignore | {error, term()}. 44 | start_link() -> 45 | gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). 46 | 47 | -spec members() -> {ok, [ldb_node_id()]}. 48 | members() -> 49 | gen_server:call(?MODULE, members, infinity). 50 | 51 | -spec join(node_spec()) -> ok | error(). 52 | join(NodeSpec) -> 53 | gen_server:call(?MODULE, {join, NodeSpec}, infinity). 54 | 55 | -spec forward_message(ldb_node_id(), handler(), message()) -> 56 | ok | error(). 57 | forward_message(LDBId, Handler, Message) -> 58 | gen_server:call(?MODULE, {forward_message, LDBId, Handler, Message}, infinity). 59 | 60 | %% gen_server callbacks 61 | init([]) -> 62 | {ok, _} = exp_barrier_peer_service_server:start_link(?BARRIER_PORT), 63 | lager:info("exp_barrier_peer_service initialized!"), 64 | {ok, #state{connected=orddict:new()}}. 65 | 66 | handle_call(members, _From, #state{connected=Connected}=State) -> 67 | Result = {ok, orddict:fetch_keys(Connected)}, 68 | {reply, Result, State}; 69 | 70 | handle_call({join, {LDBId, {_, _, _, _}=Ip, Port}=NodeSpec}, _From, 71 | #state{connected=Connected0}=State) -> 72 | {Result, Connected1} = case orddict:find(LDBId, Connected0) of 73 | {ok, _} -> 74 | {ok, Connected0}; 75 | error -> 76 | case gen_tcp:connect(Ip, Port, ?TCP_OPTIONS) of 77 | {ok, Socket} -> 78 | {ok, Pid} = exp_barrier_peer_service_client:start_link(Socket), 79 | gen_tcp:controlling_process(Socket, Pid), 80 | {ok, orddict:store(LDBId, Pid, Connected0)}; 81 | Error -> 82 | lager:info("Error handling join call on node ~p to node ~p. Reason ~p", 83 | [ldb_config:id(), NodeSpec, Error]), 84 | {Error, Connected0} 85 | end 86 | end, 87 | {reply, Result, State#state{connected=Connected1}}; 88 | 89 | handle_call({forward_message, LDBId, Handler, Message}, _From, #state{connected=Connected}=State) -> 90 | Result = case orddict:find(LDBId, Connected) of 91 | {ok, Pid} -> 92 | Pid ! {forward_message, Handler, Message}, 93 | ok; 94 | error -> 95 | {error, not_connected} 96 | end, 97 | 98 | {reply, Result, State}; 99 | 100 | handle_call(Msg, _From, State) -> 101 | lager:warning("Unhandled call message: ~p", [Msg]), 102 | {noreply, State}. 103 | 104 | handle_cast(Msg, State) -> 105 | lager:warning("Unhandled cast message: ~p", [Msg]), 106 | {noreply, State}. 107 | 108 | handle_info(Msg, State) -> 109 | lager:warning("Unhandled info message: ~p", [Msg]), 110 | {noreply, State}. 111 | 112 | terminate(_Reason, _State) -> 113 | ok. 114 | 115 | code_change(_OldVsn, State, _Extra) -> 116 | {ok, State}. 117 | -------------------------------------------------------------------------------- /src/exp_barrier_peer_service_client.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_barrier_peer_service_client). 21 | -author("Vitor Enes {ok, pid()} | ignore | {error, term()}. 41 | start_link(Socket) -> 42 | gen_server:start_link(?MODULE, [Socket], []). 43 | 44 | %% gen_server callbacks 45 | init([Socket]) -> 46 | lager:info("exp_barrier_peer_service_client initialized! Node ~p listening to socket ~p", 47 | [ldb_config:id(), Socket]), 48 | {ok, #state{socket=Socket}}. 49 | 50 | handle_call(Msg, _From, State) -> 51 | lager:warning("Unhandled call message: ~p", [Msg]), 52 | {noreply, State}. 53 | 54 | handle_cast(Msg, State) -> 55 | lager:warning("Unhandled cast message: ~p", [Msg]), 56 | {noreply, State}. 57 | 58 | handle_info({forward_message, _Handler, _Message}=M, 59 | #state{socket=Socket}=State) -> 60 | case gen_tcp:send(Socket, encode(M)) of 61 | ok -> 62 | ok; 63 | Error -> 64 | lager:info("Failed to send message: ~p", [Error]) 65 | end, 66 | 67 | {noreply, State}; 68 | 69 | handle_info({tcp, _Socket, Data}, State) -> 70 | handle_message(decode(Data)), 71 | {noreply, State}; 72 | 73 | handle_info({tcp_closed, Socket}, State) -> 74 | lager:info("Barrier TCP closed ~p", [Socket]), 75 | {stop, normal, State}; 76 | 77 | handle_info(Msg, State) -> 78 | lager:warning("Unhandled info message: ~p", [Msg]), 79 | {noreply, State}. 80 | 81 | terminate(_Reason, _State) -> 82 | ok. 83 | 84 | code_change(_OldVsn, State, _Extra) -> 85 | {ok, State}. 86 | 87 | %% @private 88 | encode(Message) -> 89 | term_to_binary(Message). 90 | 91 | %% @private 92 | decode(Message) -> 93 | binary_to_term(Message). 94 | 95 | %% @private 96 | handle_message({forward_message, Handler, Message}) -> 97 | gen_server:cast(Handler, Message). 98 | -------------------------------------------------------------------------------- /src/exp_barrier_peer_service_server.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_barrier_peer_service_server). 21 | -author("Vitor Enes {ok, pid()} | ignore | {error, term()}. 41 | start_link(Port) -> 42 | gen_server:start_link({local, ?MODULE}, ?MODULE, [Port], []). 43 | 44 | %% gen_server callbacks 45 | init([Port]) -> 46 | {ok, Listener} = gen_tcp:listen(Port, ?TCP_OPTIONS), 47 | 48 | prepare_accept(), 49 | 50 | lager:info("exp_barrier_peer_service_server initialized"), 51 | {ok, #state{listener=Listener}}. 52 | 53 | handle_call(Msg, _From, State) -> 54 | lager:warning("Unhandled call message: ~p", [Msg]), 55 | {noreply, State}. 56 | 57 | handle_cast(accept, #state{listener=Listener}=State) -> 58 | {ok, Socket} = gen_tcp:accept(Listener), 59 | 60 | {ok, Pid} = exp_barrier_peer_service_client:start_link(Socket), 61 | gen_tcp:controlling_process(Socket, Pid), 62 | 63 | prepare_accept(), 64 | 65 | {noreply, State}; 66 | 67 | handle_cast(Msg, State) -> 68 | lager:warning("Unhandled cast message: ~p", [Msg]), 69 | {noreply, State}. 70 | 71 | handle_info(Msg, State) -> 72 | lager:warning("Unhandled info message: ~p", [Msg]), 73 | {noreply, State}. 74 | 75 | terminate(_Reason, _State) -> 76 | ok. 77 | 78 | code_change(_OldVsn, State, _Extra) -> 79 | {ok, State}. 80 | 81 | %% @private 82 | prepare_accept() -> 83 | gen_server:cast(?MODULE, accept). 84 | -------------------------------------------------------------------------------- /src/exp_config.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_config). 21 | -author("Vitor Enes term(). 30 | get(Property) -> 31 | {ok, Value} = application:get_env(?APP, Property), 32 | Value. 33 | 34 | -spec get(atom(), term()) -> term(). 35 | get(Property, Default) -> 36 | application:get_env(?APP, Property, Default). 37 | 38 | -spec set(atom(), term()) -> ok. 39 | set(Property, Value) -> 40 | application:set_env(?APP, Property, Value). 41 | -------------------------------------------------------------------------------- /src/exp_kube_orchestration.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_kube_orchestration). 21 | -author("Vitor Enes [node_spec()]. 31 | get_tasks(Tag, Port, FilterByTimestamp) -> 32 | Path = pods_path() ++ selector(Tag, FilterByTimestamp), 33 | case http(get, Path) of 34 | {ok, Nodes} -> 35 | generate_nodes(Nodes, Port); 36 | {error, invalid} -> 37 | [] 38 | end. 39 | 40 | -spec stop_tasks([atom()]) -> ok. 41 | stop_tasks(Tags) -> 42 | lists:foreach( 43 | fun(Tag) -> 44 | ok = delete_task(Tag) 45 | end, 46 | Tags 47 | ), 48 | ok. 49 | 50 | %% @private 51 | delete_task(Tag) -> 52 | Path = deploy_path() ++ "/" ++ name(Tag), 53 | 54 | Result = case http(get, Path) of 55 | {ok, Body0} -> 56 | Body1 = set_replicas_as_zero(Body0), 57 | PR = http(put, Path, Body1), 58 | DR = http(delete, Path), 59 | case {PR, DR} of 60 | {{ok, _}, {ok, _}} -> 61 | ok; 62 | _ -> 63 | error 64 | end; 65 | {error, invalid} -> 66 | error 67 | end, 68 | 69 | case Result of 70 | ok -> 71 | ok; 72 | error -> 73 | lager:info("Delete failed. Trying again in 1 second"), 74 | timer:sleep(1000), 75 | delete_task(Tag) 76 | end. 77 | 78 | %% @private 79 | http(Method, Path) -> 80 | URL = server() ++ Path, 81 | Headers = headers(), 82 | run_http(Method, {URL, Headers}). 83 | 84 | %% @private 85 | http(Method, Path, Body0) -> 86 | URL = server() ++ Path, 87 | Headers = headers(), 88 | ContentType = "application/json", 89 | Body1 = binary_to_list(ldb_json:encode(Body0)), 90 | run_http(Method, {URL, Headers, ContentType, Body1}). 91 | 92 | %% @private 93 | run_http(Method, Request) -> 94 | Options = [{body_format, binary}], 95 | 96 | case httpc:request(Method, Request, [], Options) of 97 | {ok, {{_, 200, _}, _, Body}} -> 98 | {ok, ldb_json:decode(Body)}; 99 | {error, Reason} -> 100 | lager:info("Couldn't process ~p request. Reason ~p", 101 | [Method, Reason]), 102 | {error, invalid} 103 | end. 104 | 105 | %% @private 106 | headers() -> 107 | Token = exp_config:get(exp_token), 108 | [{"Authorization", "Bearer " ++ Token}]. 109 | 110 | %% @private 111 | server() -> 112 | exp_config:get(exp_api_server). 113 | 114 | %% @private 115 | timestamp() -> 116 | integer_to_list(exp_config:get(exp_timestamp)). 117 | 118 | %% @private 119 | pods_path() -> 120 | "/api/v1/pods". 121 | 122 | %% @private 123 | selector(Tag, FilterByTimestamp) -> 124 | Selector = "?labelSelector=" ++ "tag%3D" ++ atom_to_list(Tag), 125 | 126 | case FilterByTimestamp of 127 | true -> 128 | Selector ++ ",timestamp%3D" ++ timestamp(); 129 | false -> 130 | Selector 131 | end. 132 | 133 | %% @private 134 | name(Tag) -> 135 | atom_to_list(Tag) ++ "-" ++ timestamp(). 136 | 137 | %% @private 138 | prefix() -> 139 | "/apis/extensions/v1beta1/namespaces/default". 140 | 141 | %% @private 142 | deploy_path() -> 143 | prefix() ++ "/deployments". 144 | 145 | %% @private 146 | generate_nodes(Map, Port) -> 147 | Items = maps:get(items, Map), 148 | lists:foldl( 149 | fun(Item, Nodes) -> 150 | %% find ip 151 | Status = maps:get(status, Item), 152 | 153 | case maps:is_key(podIP, Status) of 154 | true -> 155 | IP = binary_to_list( 156 | maps:get(podIP, Status) 157 | ), 158 | Node = exp_util:generate_spec(IP, Port), 159 | [Node | Nodes]; 160 | false -> 161 | Nodes 162 | end 163 | end, 164 | [], 165 | Items 166 | ). 167 | 168 | %% @private 169 | set_replicas_as_zero(Map) -> 170 | Spec0 = maps:get(spec, Map), 171 | Spec1 = maps:put(replicas, 0, Spec0), 172 | maps:put(spec, Spec1, Map). 173 | -------------------------------------------------------------------------------- /src/exp_local_simulations_support.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% Copyright (c) 2016 Christopher Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(exp_local_simulations_support). 22 | -author("Vitor Enes 29 | IdToNode = start(Options), 30 | construct_overlay(Options, IdToNode), 31 | start_experiment(IdToNode), 32 | wait_for_completion(IdToNode), 33 | stop(IdToNode). 34 | 35 | %% @private 36 | start_experiment(IdToNode) -> 37 | %% wait for connectedness 38 | timer:sleep(5000), 39 | lists:foreach( 40 | fun({_Id, Node}) -> 41 | ok = rpc:call(Node, exp_simulation_runner, start_simulation, []) 42 | end, 43 | IdToNode 44 | ). 45 | 46 | %% @private Start nodes. 47 | start(Options) -> 48 | ok = start_erlang_distribution(), 49 | NodeNumber = proplists:get_value(node_number, Options), 50 | 51 | InitializerFun = fun(I, Acc) -> 52 | %ct:pal("Starting node: ~p", [I]), 53 | 54 | %% Start node 55 | Config = [{monitor_master, true}, 56 | {startup_functions, [{code, set_path, [codepath()]}]}], 57 | 58 | Name = get_node_name(I), 59 | case ct_slave:start(Name, Config) of 60 | {ok, Node} -> 61 | orddict:store(I, Node, Acc); 62 | Error -> 63 | ct:fail(Error) 64 | end 65 | end, 66 | 67 | IdToNode = lists:foldl(InitializerFun, 68 | orddict:new(), 69 | lists:seq(0, NodeNumber - 1)), 70 | 71 | LoaderFun = fun({_Id, Node}) -> 72 | %% Load ldb 73 | ok = rpc:call(Node, application, load, [ldb]), 74 | 75 | %% Load exp 76 | ok = rpc:call(Node, application, load, [?APP]), 77 | 78 | %% Set lager log dir 79 | PrivDir = code:priv_dir(?APP), 80 | NodeDir = filename:join([PrivDir, "lager", Node]), 81 | ok = rpc:call(Node, 82 | application, 83 | set_env, 84 | [lager, log_root, NodeDir]) 85 | end, 86 | lists:foreach(LoaderFun, IdToNode), 87 | 88 | ConfigureFun = fun({Id, Node}) -> 89 | %% Configure exp 90 | LSimSettings0 = proplists:get_value(exp_settings, Options), 91 | LSimSettings1 = LSimSettings0 92 | ++ [{exp_timestamp, timestamp()}, 93 | {exp_numerical_id, Id}], 94 | 95 | lists:foreach( 96 | fun({Property, Value}) -> 97 | ok = rpc:call(Node, 98 | exp_config, 99 | set, 100 | [Property, Value]) 101 | end, 102 | LSimSettings1 103 | ), 104 | 105 | %% Configure ldb 106 | LDBSettings = proplists:get_value(ldb_settings, Options), 107 | lists:foreach( 108 | fun({Property, Value}) -> 109 | ok = rpc:call(Node, 110 | ldb_config, 111 | set, 112 | [Property, Value]) 113 | end, 114 | [{node_number, NodeNumber}, 115 | {ldb_port, get_port(Id)} | LDBSettings] 116 | ) 117 | end, 118 | lists:foreach(ConfigureFun, IdToNode), 119 | 120 | StartFun = fun({_Id, Node}) -> 121 | {ok, _} = rpc:call(Node, 122 | application, 123 | ensure_all_started, 124 | [?APP]) 125 | end, 126 | lists:foreach(StartFun, IdToNode), 127 | 128 | IdToNode. 129 | 130 | %% @private Connect each node to its peers. 131 | construct_overlay(Options, IdToNode) -> 132 | Overlay = proplists:get_value( 133 | exp_overlay, 134 | proplists:get_value( 135 | exp_settings, 136 | Options 137 | ) 138 | ), 139 | 140 | IdToNodeSpec = lists:map( 141 | fun({Id, Node}) -> 142 | Spec = rpc:call(Node, ldb_hao, myself, []), 143 | {Id, Spec} 144 | end, 145 | IdToNode 146 | ), 147 | 148 | NodeNumber = orddict:size(IdToNode), 149 | Graph = exp_overlay:get(Overlay, NodeNumber), 150 | 151 | lists:foreach( 152 | fun({I, Peers}) -> 153 | Node = orddict:fetch(I, IdToNode), 154 | 155 | lists:foreach( 156 | fun(Peer) -> 157 | PeerSpec = orddict:fetch(Peer, IdToNodeSpec), 158 | 159 | ok = rpc:call(Node, 160 | ldb_hao, 161 | join, 162 | [PeerSpec]) 163 | end, 164 | Peers 165 | ) 166 | end, 167 | Graph 168 | ). 169 | 170 | %% @private Poll nodes to see if simulation is ended. 171 | wait_for_completion(IdToNode) -> 172 | ct:pal("Waiting for simulation to end"), 173 | 174 | NodeNumber = length(IdToNode), 175 | 176 | Result = wait_until( 177 | fun() -> 178 | Ended = lists:foldl( 179 | fun({_Id, Node}, Acc) -> 180 | SimulationEnd = rpc:call(Node, 181 | exp_config, 182 | get, 183 | [exp_simulation_end, 184 | false], 185 | infinity), 186 | 187 | case SimulationEnd of 188 | true -> 189 | Acc + 1; 190 | false -> 191 | Acc 192 | end 193 | end, 194 | 0, 195 | IdToNode 196 | ), 197 | 198 | %ct:pal("~p of ~p with simulation as true", [Ended, NodeNumber]), 199 | 200 | Ended == NodeNumber 201 | end, 202 | 100, %% 100 retries 203 | 10 * 1000 %% every 10 seconds 204 | ), 205 | 206 | case Result of 207 | ok -> 208 | ct:pal("Simulation ended with success"); 209 | fail -> 210 | ct:fail("Simulation failed") 211 | end. 212 | 213 | %% @private Stop nodes. 214 | stop(IdToNode) -> 215 | StopFun = fun({I, _Node}) -> 216 | Name = get_node_name(I), 217 | case ct_slave:stop(Name) of 218 | {ok, _} -> 219 | ok; 220 | Error -> 221 | ct:fail(Error) 222 | end 223 | end, 224 | lists:foreach(StopFun, IdToNode). 225 | 226 | %% @private Start erlang distribution. 227 | start_erlang_distribution() -> 228 | os:cmd(os:find_executable("epmd") ++ " -daemon"), 229 | {ok, Hostname} = inet:gethostname(), 230 | case net_kernel:start([list_to_atom("runner@" ++ Hostname), shortnames]) of 231 | {ok, _} -> 232 | ok; 233 | {error, {already_started, _}} -> 234 | ok 235 | end. 236 | 237 | %% @private 238 | codepath() -> 239 | lists:filter(fun filelib:is_dir/1, code:get_path()). 240 | 241 | %% @private 242 | get_node_name(I) -> 243 | list_to_atom("n" ++ integer_to_list(I)). 244 | 245 | %% @private 246 | timestamp() -> 247 | erlang:system_time(microsecond). 248 | 249 | %% @doc Wait until `Fun' returns true or `Retry' reaches 0. 250 | %% The sleep time between retries is `Delay'. 251 | wait_until(_Fun, 0, _Delay) -> 252 | fail; 253 | wait_until(Fun, Retry, Delay) when Retry > 0 -> 254 | case Fun() of 255 | true -> 256 | ok; 257 | _ -> 258 | timer:sleep(Delay), 259 | wait_until(Fun, Retry - 1, Delay) 260 | end. 261 | 262 | %% @private 263 | get_port(Id) -> 264 | 5000 + Id. 265 | -------------------------------------------------------------------------------- /src/exp_orchestration.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_orchestration). 21 | -author("Vitor Enes [node_spec()]. 32 | 33 | %% @doct Stop tasks, given a list of tags 34 | -callback stop_tasks([atom()]) -> ok. 35 | 36 | 37 | -spec get_task(atom(), node_port(), boolean()) -> 38 | {ok, node_spec()} | {error, not_connected}. 39 | get_task(Tag, Port, FilterByTimestamp) -> 40 | Nodes = get_tasks(Tag, Port, FilterByTimestamp), 41 | 42 | case Nodes of 43 | [] -> 44 | {error, not_connected}; 45 | [Task|_] -> 46 | {ok, Task} 47 | end. 48 | 49 | -spec get_tasks(atom(), node_port(), boolean()) -> [node_spec()]. 50 | get_tasks(Tag, Port, FilterByTimestamp) -> 51 | do(get_tasks, [Tag, Port, FilterByTimestamp]). 52 | 53 | -spec stop_tasks([atom()]) -> ok. 54 | stop_tasks(Tags) -> 55 | do(stop_tasks, [Tags]). 56 | 57 | %% @private 58 | do(Function, Args) -> 59 | Orchestration = exp_config:get(exp_orchestration), 60 | case Orchestration of 61 | kubernetes -> 62 | erlang:apply(exp_kube_orchestration, Function, Args) 63 | end. 64 | -------------------------------------------------------------------------------- /src/exp_overlay.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_overlay). 21 | -author("Vitor Enes orddict:orddict(). 34 | get(_, 1) -> 35 | []; 36 | get(fullmesh, N) -> 37 | All = lists:seq(0, N - 1), 38 | lists:foldl( 39 | fun(I, Acc) -> 40 | orddict:store(I, All -- [I], Acc) 41 | end, 42 | orddict:new(), 43 | All 44 | ); 45 | get(tree, 14) -> 46 | %% automatically generated by topologies.py 47 | [{0, [6]}, {1, [5, 11, 10]}, {2, [7]}, {3, [7]}, {4, [6]}, {5, [1]}, {6, [0, 4, 11]}, {7, [3, 2, 12]}, {8, [9]}, {9, [12, 13, 8]}, {10, [1]}, {11, [1, 12, 6]}, {12, [9, 7, 11]}, {13, [9]}]; 48 | get(tree, 15) -> 49 | %% automatically generated by topologies.py 50 | [{0, [1, 8, 13]}, {1, [0, 6]}, {2, [5]}, {3, [10]}, {4, [5]}, {5, [4, 6, 2]}, {6, [1, 10, 5]}, {7, [13]}, {8, [9, 0, 14]}, {9, [8]}, {10, [12, 3, 6]}, {11, [13]}, {12, [10]}, {13, [11, 0, 7]}, {14, [8]}]; 51 | get(partialmesh, 8) -> 52 | [{0, [1, 7, 4]}, {1, [0, 2, 5]}, {2, [1, 6, 3]}, {3, [2, 7, 4]}, {4, [5, 0, 3]}, {5, [6, 4, 1]}, {6, [5, 2, 7]}, {7, [6, 0, 3]}]; 53 | get(partialmesh, 15) -> 54 | [{0, [14, 11, 4, 1]}, {1, [2, 5, 0, 12]}, {2, [1, 13, 6, 3]}, {3, [7, 14, 2, 4]}, {4, [8, 0, 5, 3]}, {5, [9, 6, 1, 4]}, {6, [5, 7, 10, 2]}, {7, [3, 11, 6, 8]}, {8, [4, 9, 12, 7]}, {9, [5, 8, 10, 13]}, {10, [11, 6, 9, 14]}, {11, [10, 7, 0, 12]}, {12, [13, 11, 1, 8]}, {13, [12, 2, 9, 14]}, {14, [0, 3, 13, 10]}]; 55 | get(partialmesh, 16) -> 56 | %% automatically generated by topologies.py 57 | [{0, [15, 4, 12, 1]}, {1, [2, 5, 13, 0]}, {2, [1, 6, 3, 14]}, {3, [15, 7, 2, 4]}, {4, [8, 0, 5, 3]}, {5, [6, 1, 4, 9]}, {6, [5, 7, 10, 2]}, {7, [3, 11, 6, 8]}, {8, [4, 9, 12, 7]}, {9, [8, 13, 5, 10]}, {10, [11, 6, 9, 14]}, {11, [10, 7, 12, 15]}, {12, [13, 0, 11, 8]}, {13, [12, 9, 1, 14]}, {14, [15, 2, 13, 10]}, {15, [3, 0, 14, 11]}]; 58 | get(partialmesh, 32) -> 59 | [{0, [28, 4, 1, 31]}, {1, [2, 5, 29, 0]}, {2, [1, 30, 6, 3]}, {3, [7, 31, 2, 4]}, {4, [8, 0, 5, 3]}, {5, [9, 6, 1, 4]}, {6, [10, 5, 7, 2]}, {7, [11, 3, 6, 8]}, {8, [4, 9, 12, 7]}, {9, [5, 8, 13, 10]}, {10, [6, 11, 9, 14]}, {11, [10, 7, 12, 15]}, {12, [16, 13, 11, 8]}, {13, [17, 12, 9, 14]}, {14, [15, 13, 18, 10]}, {15, [14, 19, 16, 11]}, {16, [12, 17, 15, 20]}, {17, [21, 13, 16, 18]}, {18, [22, 19, 17, 14]}, {19, [18, 20, 15, 23]}, {20, [19, 21, 24, 16]}, {21, [17, 20, 22, 25]}, {22, [18, 26, 21, 23]}, {23, [24, 27, 22, 19]}, {24, [23, 28, 20, 25]}, {25, [26, 21, 24, 29]}, {26, [25, 22, 30, 27]}, {27, [23, 28, 31, 26]}, {28, [29, 0, 27, 24]}, {29, [28, 30, 1, 25]}, {30, [2, 29, 26, 31]}, {31, [3, 30, 27, 0]}]; 60 | get(partialmesh, 49) -> 61 | [{0, [4, 45, 1, 48]}, {1, [5, 46, 2, 0]}, {2, [6, 47, 3, 1]}, {3, [7, 2, 4, 48]}, {4, [0, 8, 3, 5]}, {5, [1, 9, 6, 4]}, {6, [7, 2, 10, 5]}, {7, [8, 3, 6, 11]}, {8, [7, 12, 9, 4]}, {9, [8, 13, 5, 10]}, {10, [14, 6, 11, 9]}, {11, [15, 7, 12, 10]}, {12, [8, 13, 11, 16]}, {13, [17, 12, 9, 14]}, {14, [18, 15, 10, 13]}, {15, [14, 16, 19, 11]}, {16, [17, 15, 12, 20]}, {17, [13, 16, 21, 18]}, {18, [19, 14, 17, 22]}, {19, [18, 23, 15, 20]}, {20, [24, 16, 19, 21]}, {21, [17, 25, 22, 20]}, {22, [23, 26, 18, 21]}, {23, [19, 22, 24, 27]}, {24, [28, 23, 25, 20]}, {25, [26, 24, 29, 21]}, {26, [25, 22, 30, 27]}, {27, [31, 28, 23, 26]}, {28, [32, 24, 29, 27]}, {29, [30, 33, 28, 25]}, {30, [29, 26, 31, 34]}, {31, [35, 27, 30, 32]}, {32, [28, 36, 33, 31]}, {33, [34, 29, 32, 37]}, {34, [38, 33, 35, 30]}, {35, [31, 36, 34, 39]}, {36, [37, 35, 32, 40]}, {37, [36, 41, 38, 33]}, {38, [34, 37, 39, 42]}, {39, [38, 35, 40, 43]}, {40, [41, 39, 36, 44]}, {41, [40, 37, 45, 42]}, {42, [38, 43, 41, 46]}, {43, [42, 44, 39, 47]}, {44, [45, 43, 48, 40]}, {45, [44, 0, 41, 46]}, {46, [47, 45, 1, 42]}, {47, [2, 46, 48, 43]}, {48, [3, 47, 44, 0]}]; 62 | get(partialmesh, 50) -> 63 | [{0, [4, 49, 46, 1]}, {1, [5, 47, 2, 0]}, {2, [48, 6, 3, 1]}, {3, [7, 49, 2, 4]}, {4, [0, 8, 3, 5]}, {5, [1, 9, 6, 4]}, {6, [7, 2, 10, 5]}, {7, [8, 3, 6, 11]}, {8, [7, 12, 4, 9]}, {9, [13, 8, 5, 10]}, {10, [14, 6, 11, 9]}, {11, [15, 7, 12, 10]}, {12, [8, 13, 11, 16]}, {13, [17, 12, 9, 14]}, {14, [18, 15, 10, 13]}, {15, [14, 16, 19, 11]}, {16, [17, 15, 12, 20]}, {17, [13, 16, 21, 18]}, {18, [19, 14, 17, 22]}, {19, [18, 23, 15, 20]}, {20, [24, 16, 19, 21]}, {21, [17, 25, 22, 20]}, {22, [23, 26, 18, 21]}, {23, [19, 22, 24, 27]}, {24, [28, 23, 25, 20]}, {25, [26, 24, 29, 21]}, {26, [25, 22, 30, 27]}, {27, [31, 28, 23, 26]}, {28, [32, 24, 29, 27]}, {29, [30, 33, 28, 25]}, {30, [29, 26, 31, 34]}, {31, [35, 27, 30, 32]}, {32, [28, 36, 33, 31]}, {33, [34, 29, 32, 37]}, {34, [38, 33, 35, 30]}, {35, [31, 36, 34, 39]}, {36, [37, 35, 32, 40]}, {37, [36, 41, 38, 33]}, {38, [34, 37, 39, 42]}, {39, [38, 35, 40, 43]}, {40, [41, 39, 36, 44]}, {41, [40, 37, 45, 42]}, {42, [38, 43, 41, 46]}, {43, [42, 44, 39, 47]}, {44, [45, 43, 48, 40]}, {45, [44, 41, 46, 49]}, {46, [47, 45, 0, 42]}, {47, [46, 1, 48, 43]}, {48, [2, 47, 44, 49]}, {49, [0, 3, 45, 48]}]; 64 | get(partialmesh, 64) -> 65 | [{0, [4, 63, 60, 1]}, {1, [5, 2, 0, 61]}, {2, [6, 3, 62, 1]}, {3, [7, 2, 4, 63]}, {4, [0, 5, 8, 3]}, {5, [4, 1, 9, 6]}, {6, [7, 2, 10, 5]}, {7, [8, 3, 6, 11]}, {8, [7, 12, 9, 4]}, {9, [8, 13, 5, 10]}, {10, [14, 6, 11, 9]}, {11, [15, 7, 12, 10]}, {12, [8, 13, 11, 16]}, {13, [17, 12, 9, 14]}, {14, [18, 15, 10, 13]}, {15, [14, 11, 16, 19]}, {16, [17, 15, 12, 20]}, {17, [13, 16, 21, 18]}, {18, [19, 14, 17, 22]}, {19, [18, 15, 23, 20]}, {20, [24, 16, 19, 21]}, {21, [17, 25, 22, 20]}, {22, [23, 26, 18, 21]}, {23, [22, 19, 24, 27]}, {24, [28, 23, 25, 20]}, {25, [26, 24, 29, 21]}, {26, [25, 22, 30, 27]}, {27, [31, 28, 23, 26]}, {28, [32, 24, 29, 27]}, {29, [30, 33, 28, 25]}, {30, [29, 26, 31, 34]}, {31, [35, 27, 30, 32]}, {32, [28, 36, 33, 31]}, {33, [34, 29, 32, 37]}, {34, [38, 33, 35, 30]}, {35, [31, 36, 34, 39]}, {36, [37, 35, 32, 40]}, {37, [36, 41, 33, 38]}, {38, [34, 39, 42, 37]}, {39, [38, 35, 40, 43]}, {40, [41, 39, 36, 44]}, {41, [40, 37, 45, 42]}, {42, [38, 43, 41, 46]}, {43, [42, 44, 47, 39]}, {44, [45, 43, 48, 40]}, {45, [44, 41, 49, 46]}, {46, [47, 45, 50, 42]}, {47, [51, 46, 48, 43]}, {48, [52, 47, 44, 49]}, {49, [45, 50, 53, 48]}, {50, [51, 49, 46, 54]}, {51, [52, 47, 50, 55]}, {52, [51, 48, 56, 53]}, {53, [54, 52, 49, 57]}, {54, [55, 58, 53, 50]}, {55, [54, 56, 59, 51]}, {56, [55, 52, 60, 57]}, {57, [61, 53, 58, 56]}, {58, [54, 59, 62, 57]}, {59, [58, 55, 60, 63]}, {60, [59, 0, 56, 61]}, {61, [57, 62, 60, 1]}, {62, [61, 2, 58, 63]}, {63, [0, 3, 59, 62]}]; 66 | get(ring, N) -> 67 | lists:foldl( 68 | fun(I, Acc) -> 69 | Peers = [ 70 | previous(I, N), 71 | next(I, N) 72 | ], 73 | orddict:store(I, Peers, Acc) 74 | end, 75 | orddict:new(), 76 | lists:seq(0, N - 1) 77 | ); 78 | get(line, N) -> 79 | T0 = get(ring, N), 80 | First = 0, 81 | Last = N - 1, 82 | T1 = lists:keyreplace( 83 | First, 84 | 1, 85 | T0, 86 | {First, [next(First, N)]} 87 | ), 88 | T2 = lists:keyreplace( 89 | Last, 90 | 1, 91 | T1, 92 | {Last, [previous(Last, N)]} 93 | ), 94 | T2. 95 | 96 | 97 | %% @doc The first argument is my node spec, 98 | %% the second argument is a list of node specs, 99 | %% and the third argument is the overlay. 100 | -spec numerical_id_and_neighbors(ldb_node_id(), list(node_spec()), atom()) -> 101 | {non_neg_integer(), list(node_spec())}. 102 | numerical_id_and_neighbors(MyName, Nodes, Overlay) -> 103 | NodeNumber = length(Nodes), 104 | Sorted = lists:sort(Nodes), 105 | 106 | NumericalId = numerical_id(MyName, Sorted), 107 | 108 | %% id -> [id] 109 | Topology = get(Overlay, NodeNumber), 110 | 111 | {NumericalId, [lists:nth(I + 1, Sorted) || I <- orddict:fetch(NumericalId, Topology)]}. 112 | 113 | %% @private Get numerical id, given the name a list of sorted specs by name. 114 | -spec numerical_id(ldb_node_id(), list(node_spec())) -> non_neg_integer(). 115 | numerical_id(MyName, Sorted) -> 116 | %% compute id 117 | lists:foldl( 118 | fun({Name, _, _}, Acc) -> 119 | case Name < MyName of 120 | true -> 121 | Acc + 1; 122 | false -> 123 | Acc 124 | end 125 | end, 126 | 0, 127 | Sorted 128 | ). 129 | 130 | %% @private 131 | previous(I, N) -> 132 | First = 0, 133 | case I of 134 | First -> 135 | N - 1; 136 | _ -> 137 | I - 1 138 | end. 139 | 140 | %% @private 141 | next(I, N) -> 142 | Last = N - 1, 143 | case I of 144 | Last -> 145 | 0; 146 | _ -> 147 | I + 1 148 | end. 149 | -------------------------------------------------------------------------------- /src/exp_redis_metrics_store.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_redis_metrics_store). 21 | -author("Vitor Enes {ok, pid()} | ignore | {error, term()}. 42 | start_link() -> 43 | gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). 44 | 45 | -spec put(key(), value()) -> ok. 46 | put(Key, Value) -> 47 | gen_server:call(?MODULE, {put, Key, Value}, infinity). 48 | 49 | %% gen_server callbacks 50 | init([]) -> 51 | {Host, Port} = get_redis_config(), 52 | {ok, Redis} = eredis:start_link(Host, Port), 53 | lager:info("exp_redis_metrics_store initialized"), 54 | {ok, #state{redis=Redis}}. 55 | 56 | handle_call({put, Key, Value}, _From, #state{redis=Redis}=State) -> 57 | {ok, <<"OK">>} = eredis:q(Redis, ["SET", Key, Value]), 58 | {reply, ok, State}; 59 | 60 | handle_call(Msg, _From, State) -> 61 | lager:warning("Unhandled call message: ~p", [Msg]), 62 | {noreply, State}. 63 | 64 | handle_cast(Msg, State) -> 65 | lager:warning("Unhandled cast message: ~p", [Msg]), 66 | {noreply, State}. 67 | 68 | handle_info(Msg, State) -> 69 | lager:warning("Unhandled info message: ~p", [Msg]), 70 | {noreply, State}. 71 | 72 | terminate(_Reason, _State) -> 73 | ok. 74 | 75 | code_change(_OldVsn, State, _Extra) -> 76 | {ok, State}. 77 | 78 | %% @private 79 | get_redis_config() -> 80 | case exp_orchestration:get_task(redis, ?REDIS_PORT, false) of 81 | {ok, {_, IpAddress, Port}} -> 82 | Ip = inet_parse:ntoa(IpAddress), 83 | {Ip, Port}; 84 | {error, not_connected} -> 85 | lager:info("Redis not connected. Trying again in 5 seconds."), 86 | timer:sleep(5000), 87 | get_redis_config() 88 | end. 89 | -------------------------------------------------------------------------------- /src/exp_resource.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_resource). 21 | -author("Vitor Enes {ok, pid()} | ignore | {error, term()}. 44 | start_link() -> 45 | gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). 46 | 47 | %% gen_server callbacks 48 | init([]) -> 49 | lager:info("exp_resource initialized"), 50 | 51 | Loop = fun(Req) -> 52 | ?MODULE:loop(Req) 53 | end, 54 | mochiweb_http:start([{loop, Loop} | ?WEB_CONFIG]), 55 | 56 | {ok, #state{}}. 57 | 58 | handle_call(Msg, _From, State) -> 59 | lager:warning("Unhandled call message: ~p", [Msg]), 60 | {noreply, State}. 61 | 62 | handle_cast(Msg, State) -> 63 | lager:warning("Unhandled cast message: ~p", [Msg]), 64 | {noreply, State}. 65 | 66 | handle_info(Msg, State) -> 67 | lager:warning("Unhandled info message: ~p", [Msg]), 68 | {noreply, State}. 69 | 70 | terminate(_Reason, _State) -> 71 | ok. 72 | 73 | code_change(_OldVsn, State, _Extra) -> 74 | {ok, State}. 75 | 76 | %% mochiweb 77 | loop(Req) -> 78 | Path = Req:get(path), 79 | 80 | case string:tokens(Path, "/") of 81 | ["membership"] -> 82 | {ok, Names} = ldb_hao:members(), 83 | 84 | Req:ok({ 85 | _ContentType = "application/javascript", 86 | ldb_json:encode(Names) 87 | }); 88 | _ -> 89 | Req:not_found() 90 | end. 91 | -------------------------------------------------------------------------------- /src/exp_rsg.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_rsg). 21 | -author("Vitor Enes {ok, pid()} | ignore | {error, term()}. 44 | start_link() -> 45 | gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). 46 | 47 | -spec simulation_end() -> ok. 48 | simulation_end() -> 49 | gen_server:call(?MODULE, simulation_end, infinity). 50 | 51 | %% gen_server callbacks 52 | init([]) -> 53 | schedule_create_barrier(), 54 | 55 | NodeNumber = exp_config:get(exp_node_number), 56 | 57 | lager:info("exp_rsg initialized"), 58 | {ok, #state{node_number=NodeNumber}}. 59 | 60 | handle_call(simulation_end, _From, State) -> 61 | tell({sim_done, ldb_config:id()}), 62 | {reply, ok, State}; 63 | 64 | handle_call(Msg, _From, State) -> 65 | lager:warning("Unhandled call message: ~p", [Msg]), 66 | {noreply, State}. 67 | 68 | handle_cast(sim_go, State) -> 69 | lager:info("Received SIM GO. Starting simulation."), 70 | exp_simulation_runner:start_simulation(), 71 | {noreply, State}; 72 | 73 | handle_cast(metrics_go, State) -> 74 | lager:info("Received METRICS GO. Pushing metrics."), 75 | exp_simulations_support:push_ldb_metrics(), 76 | tell({metrics_done, ldb_config:id()}), 77 | {noreply, State}; 78 | 79 | handle_cast(Msg, State) -> 80 | lager:warning("Unhandled cast message: ~p", [Msg]), 81 | {noreply, State}. 82 | 83 | handle_info(create_barrier, State) -> 84 | case exp_orchestration:get_task(rsg, ?BARRIER_PORT, true) of 85 | {ok, RSG} -> 86 | ok = connect([RSG], exp_barrier_peer_service), 87 | schedule_join_peers(); 88 | {error, not_connected} -> 89 | schedule_create_barrier() 90 | end, 91 | 92 | {noreply, State}; 93 | 94 | handle_info(join_peers, #state{node_number=NodeNumber}=State) -> 95 | MyName = ldb_config:id(), 96 | Nodes = exp_orchestration:get_tasks(exp, ?PORT, true), 97 | Overlay = exp_config:get(exp_overlay), 98 | 99 | case length(Nodes) == NodeNumber of 100 | true -> 101 | %% if all nodes are connected 102 | {NumericalId, ToConnect} = exp_overlay:numerical_id_and_neighbors(MyName, 103 | Nodes, 104 | Overlay), 105 | %% set numerical id 106 | exp_config:set(exp_numerical_id, NumericalId), 107 | %% and connect to neighbors 108 | ok = connect(ToConnect, ldb_hao), 109 | tell({connect_done, ldb_config:id()}); 110 | _ -> 111 | schedule_join_peers() 112 | end, 113 | {noreply, State}; 114 | 115 | handle_info(Msg, State) -> 116 | lager:warning("Unhandled info message: ~p", [Msg]), 117 | {noreply, State}. 118 | 119 | terminate(_Reason, _State) -> 120 | ok. 121 | 122 | code_change(_OldVsn, State, _Extra) -> 123 | {ok, State}. 124 | 125 | %% @private 126 | schedule_create_barrier() -> 127 | timer:send_after(?INTERVAL, create_barrier). 128 | 129 | %% @private 130 | schedule_join_peers() -> 131 | timer:send_after(?INTERVAL, join_peers). 132 | 133 | %% @private 134 | connect([], _) -> 135 | ok; 136 | connect([Node|Rest]=All, Manager) -> 137 | case Manager:join(Node) of 138 | ok -> 139 | connect(Rest, Manager); 140 | Error -> 141 | lager:info("Couldn't connect to ~p. Reason ~p. Will try again in ~p ms", 142 | [Node, Error, ?INTERVAL]), 143 | timer:sleep(?INTERVAL), 144 | connect(All, Manager) 145 | end. 146 | 147 | %% @private 148 | tell(Msg) -> 149 | {ok, Members} = exp_barrier_peer_service:members(), 150 | lists:foreach( 151 | fun(Peer) -> 152 | exp_barrier_peer_service:forward_message( 153 | Peer, 154 | exp_rsg_master, 155 | Msg 156 | ) 157 | end, 158 | without_me(Members) 159 | ). 160 | 161 | %% @private 162 | without_me(Members) -> 163 | Members -- [ldb_config:id()]. 164 | -------------------------------------------------------------------------------- /src/exp_rsg_master.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_rsg_master). 21 | -author("Vitor Enes {ok, pid()} | ignore | {error, term()}. 47 | start_link() -> 48 | gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). 49 | 50 | %% gen_server callbacks 51 | init([]) -> 52 | schedule_create_barrier(), 53 | lager:info("exp_rsg_master initialized"), 54 | 55 | {ok, #state{nodes=undefined, 56 | connect_done=ordsets:new(), 57 | sim_done=ordsets:new(), 58 | metrics_done=ordsets:new(), 59 | start_time=undefined}}. 60 | 61 | handle_call(Msg, _From, State) -> 62 | lager:warning("Unhandled call message: ~p", [Msg]), 63 | {noreply, State}. 64 | 65 | handle_cast({connect_done, NodeName}, 66 | #state{connect_done=ConnectDone0}=State) -> 67 | 68 | lager:info("Received CONNECT DONE from ~p", [NodeName]), 69 | 70 | ConnectDone1 = ordsets:add_element(NodeName, ConnectDone0), 71 | 72 | T1 = case ordsets:size(ConnectDone1) == node_number() of 73 | true -> 74 | lager:info("Everyone is CONNECT DONE. SIM GO!"), 75 | T0 = ldb_util:unix_timestamp(), 76 | tell(sim_go), 77 | T0; 78 | false -> 79 | undefined 80 | end, 81 | 82 | {noreply, State#state{connect_done=ConnectDone1, 83 | start_time=T1}}; 84 | 85 | handle_cast({sim_done, NodeName}, 86 | #state{sim_done=SimDone0}=State) -> 87 | 88 | lager:info("Received SIM DONE from ~p", [NodeName]), 89 | 90 | SimDone1 = ordsets:add_element(NodeName, SimDone0), 91 | 92 | case ordsets:size(SimDone1) == node_number() of 93 | true -> 94 | lager:info("Everyone is SIM DONE. METRICS GO!"), 95 | tell(metrics_go); 96 | false -> 97 | ok 98 | end, 99 | 100 | {noreply, State#state{sim_done=SimDone1}}; 101 | 102 | handle_cast({metrics_done, NodeName}, 103 | #state{metrics_done=MetricsDone0, 104 | start_time=StartTime}=State) -> 105 | 106 | lager:info("Received METRICS DONE from ~p", [NodeName]), 107 | 108 | MetricsDone1 = ordsets:add_element(NodeName, MetricsDone0), 109 | 110 | case ordsets:size(MetricsDone1) == node_number() of 111 | true -> 112 | lager:info("Everyone is METRICS DONE. STOP!!!"), 113 | exp_simulations_support:push_exp_metrics(StartTime), 114 | exp_orchestration:stop_tasks([exp, rsg]); 115 | false -> 116 | ok 117 | end, 118 | 119 | {noreply, State#state{metrics_done=MetricsDone1}}; 120 | 121 | handle_cast(Msg, State) -> 122 | lager:warning("Unhandled cast message: ~p", [Msg]), 123 | {noreply, State}. 124 | 125 | handle_info(create_barrier, State) -> 126 | Nodes = exp_orchestration:get_tasks(exp, ?BARRIER_PORT, true), 127 | 128 | case length(Nodes) == node_number() of 129 | true -> 130 | ok = connect(Nodes); 131 | false -> 132 | schedule_create_barrier() 133 | end, 134 | {noreply, State#state{nodes=Nodes}}; 135 | 136 | handle_info(Msg, State) -> 137 | lager:warning("Unhandled info message: ~p", [Msg]), 138 | {noreply, State}. 139 | 140 | terminate(_Reason, _State) -> 141 | ok. 142 | 143 | code_change(_OldVsn, State, _Extra) -> 144 | {ok, State}. 145 | 146 | %% @private 147 | node_number() -> 148 | exp_config:get(exp_node_number). 149 | 150 | %% @private 151 | schedule_create_barrier() -> 152 | timer:send_after(?INTERVAL, create_barrier). 153 | 154 | %% @private 155 | connect([]) -> 156 | ok; 157 | connect([Node|Rest]=All) -> 158 | case exp_barrier_peer_service:join(Node) of 159 | ok -> 160 | connect(Rest); 161 | Error -> 162 | lager:info("Couldn't connect to ~p. Reason ~p. Will try again in ~p ms", 163 | [Node, Error, ?INTERVAL]), 164 | timer:sleep(?INTERVAL), 165 | connect(All) 166 | end. 167 | 168 | %% @private send to all 169 | tell(Msg) -> 170 | tell(Msg, rsgs()). 171 | 172 | %% @private send to some 173 | tell(Msg, Peers) -> 174 | lists:foreach( 175 | fun(Peer) -> 176 | exp_barrier_peer_service:forward_message( 177 | Peer, 178 | exp_rsg, 179 | Msg 180 | ) 181 | end, 182 | Peers 183 | ). 184 | 185 | %% @private 186 | rsgs() -> 187 | {ok, Members} = exp_barrier_peer_service:members(), 188 | without_me(Members). 189 | 190 | %% @private 191 | without_me(Members) -> 192 | Members -- [ldb_config:id()]. 193 | -------------------------------------------------------------------------------- /src/exp_simulation_runner.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_simulation_runner). 21 | -author("Vitor Enes 53 | {ok, pid()} | ignore | {error, term()}. 54 | start_link(Funs) -> 55 | gen_server:start_link({local, ?MODULE}, ?MODULE, Funs, []). 56 | 57 | -spec start_simulation() -> ok. 58 | start_simulation() -> 59 | gen_server:call(?MODULE, start_simulation, infinity). 60 | 61 | -spec get_metrics() -> ldb_metrics:st(). 62 | get_metrics() -> 63 | gen_server:call(?MODULE, get_metrics, infinity). 64 | 65 | %% gen_server callbacks 66 | init([StartFun, EventFun, TotalEventsFun, CheckEndFun]) -> 67 | %% start fun is called here, 68 | %% and start simulation schedules the first event 69 | SimulationSt = StartFun(), 70 | 71 | lager:info("exp_simulation_runner initialized with state ~p", [SimulationSt]), 72 | 73 | %% get node number and node event number 74 | NodeNumber = exp_config:get(exp_node_number), 75 | NodeEventNumber = exp_config:get(exp_node_event_number), 76 | EventInterval = exp_config:get(exp_event_interval), 77 | 78 | %% metrics 79 | MetricsSt = ldb_metrics:new(), 80 | 81 | {ok, #state{event_count=0, 82 | event_fun=EventFun, 83 | total_events_fun=TotalEventsFun, 84 | check_end_fun=CheckEndFun, 85 | node_number=NodeNumber, 86 | node_event_number=NodeEventNumber, 87 | event_interval=EventInterval, 88 | metrics_st=MetricsSt, 89 | simulation_st=SimulationSt}}. 90 | 91 | handle_call(start_simulation, _From, #state{event_interval=EventInterval}=State) -> 92 | schedule_event(EventInterval), 93 | {reply, ok, State}; 94 | 95 | handle_call(get_metrics, _From, #state{metrics_st=MetricsSt}=State) -> 96 | {reply, MetricsSt, State}; 97 | 98 | handle_call(Msg, _From, State) -> 99 | lager:warning("Unhandled call message: ~p", [Msg]), 100 | {noreply, State}. 101 | 102 | handle_cast(Msg, State) -> 103 | lager:warning("Unhandled cast message: ~p", [Msg]), 104 | {noreply, State}. 105 | 106 | handle_info(event, #state{event_count=Events0, 107 | event_fun=EventFun, 108 | total_events_fun=TotalEventsFun, 109 | node_number=NodeNumber, 110 | node_event_number=NodeEventNumber, 111 | event_interval=EventInterval, 112 | metrics_st=MetricsSt0, 113 | simulation_st=SimulationSt0}=State) -> 114 | Events = Events0 + 1, 115 | {MetricsSt, SimulationSt} = EventFun(Events, NodeNumber, NodeEventNumber, 116 | {MetricsSt0, SimulationSt0}), 117 | TotalEvents = TotalEventsFun(), 118 | case Events rem 10 of 119 | 0 -> lager:info("Event ~p | Observed ~p | Node ~p", 120 | [Events, TotalEvents, ldb_config:id()]); 121 | _ -> ok 122 | end, 123 | 124 | case Events == NodeEventNumber of 125 | true -> 126 | %% If I did all the events I should do 127 | schedule_simulation_end(); 128 | false -> 129 | schedule_event(EventInterval) 130 | end, 131 | 132 | {noreply, State#state{event_count=Events, 133 | metrics_st=MetricsSt, 134 | simulation_st=SimulationSt}}; 135 | 136 | handle_info(simulation_end, #state{total_events_fun=TotalEventsFun, 137 | check_end_fun=CheckEndFun, 138 | node_number=NodeNumber, 139 | node_event_number=NodeEventNumber}=State) -> 140 | TotalEvents = TotalEventsFun(), 141 | lager:info("Events observed ~p | Node ~p", [TotalEvents, ldb_config:id()]), 142 | 143 | case CheckEndFun(NodeNumber, NodeEventNumber) of 144 | true -> 145 | %% If everyone did all the events they should do 146 | lager:info("All events have been observed"), 147 | end_simulation(); 148 | false -> 149 | schedule_simulation_end() 150 | end, 151 | 152 | {noreply, State}; 153 | 154 | handle_info(Msg, State) -> 155 | lager:warning("Unhandled info message: ~p", [Msg]), 156 | {noreply, State}. 157 | 158 | terminate(_Reason, _State) -> 159 | ok. 160 | 161 | code_change(_OldVsn, State, _Extra) -> 162 | {ok, State}. 163 | 164 | %% @private 165 | schedule_event(EventInterval) -> 166 | timer:send_after(EventInterval, event). 167 | 168 | %% @private 169 | schedule_simulation_end() -> 170 | timer:send_after(?SIMULATION_END_INTERVAL, simulation_end). 171 | 172 | %% @private 173 | end_simulation() -> 174 | case exp_config:get(exp_orchestration) of 175 | undefined -> 176 | exp_config:set(exp_simulation_end, true); 177 | _ -> 178 | exp_rsg:simulation_end() 179 | end. 180 | -------------------------------------------------------------------------------- /src/exp_simulations.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_simulations). 21 | -author("Vitor Enes [term()]. 33 | get_specs(Simulation) -> 34 | Funs = case Simulation of 35 | undefined -> 36 | []; 37 | 38 | awset -> 39 | StartFun = fun() -> 40 | ldb:create(?KEY, awset) 41 | end, 42 | EventFun = fun(EventNumber, _NodeNumber, NodeEventNumber, St) -> 43 | Addition = EventNumber rem 4 /= 0, 44 | LastEvent = EventNumber == NodeEventNumber, 45 | 46 | %% if it's the last event, 47 | %% do an addition always, 48 | %% so that we have a way to 49 | %% detect when a node has 50 | %% observed all events 51 | case Addition orelse LastEvent of 52 | true -> 53 | Element = create_element(EventNumber), 54 | ldb:update(?KEY, {add, Element}); 55 | false -> 56 | %% remove an element added by me 57 | {ok, Query} = ldb:query(?KEY), 58 | ByMe = sets:to_list( 59 | sets:filter( 60 | fun(E) -> 61 | string:str(E, atom_to_list(ldb_config:id())) > 0 62 | end, 63 | Query 64 | ) 65 | ), 66 | Element = lists:nth( 67 | rand:uniform(length(ByMe)), 68 | ByMe 69 | ), 70 | ldb:update(?KEY, {rmv, Element}) 71 | end, 72 | St 73 | end, 74 | TotalEventsFun = fun() -> 75 | {ok, Value} = ldb:query(?KEY), 76 | sets:size(Value) 77 | end, 78 | CheckEndFun = fun(NodeNumber, NodeEventNumber) -> 79 | {ok, Query} = ldb:query(?KEY), 80 | %% a node has observed all events 81 | %% if it has in the set 82 | %% `NodeNumber` elements ending in 83 | %% `NodeEventNumber` 84 | LastElements = sets:filter( 85 | fun(E) -> 86 | string:str(E, element_sufix(NodeEventNumber)) > 0 87 | end, 88 | Query 89 | ), 90 | sets:size(LastElements) == NodeNumber 91 | end, 92 | [StartFun, 93 | EventFun, 94 | TotalEventsFun, 95 | CheckEndFun]; 96 | 97 | gcounter -> 98 | StartFun = fun() -> 99 | ldb:create(?KEY, gcounter) 100 | end, 101 | EventFun = fun(_EventNumber, _NodeNumber, _NodeEventNumber, St) -> 102 | ldb:update(?KEY, increment), 103 | St 104 | end, 105 | TotalEventsFun = fun() -> 106 | {ok, Value} = ldb:query(?KEY), 107 | Value 108 | end, 109 | CheckEndFun = fun(NodeNumber, NodeEventNumber) -> 110 | TotalEventsFun() == NodeNumber * NodeEventNumber 111 | end, 112 | [StartFun, 113 | EventFun, 114 | TotalEventsFun, 115 | CheckEndFun]; 116 | 117 | gset -> 118 | StartFun = fun() -> 119 | ldb:create(?KEY, gset) 120 | end, 121 | EventFun = fun(EventNumber, _NodeNumber, _NodeEventNumber, St) -> 122 | Element = create_element(EventNumber), 123 | ldb:update(?KEY, {add, Element}), 124 | St 125 | end, 126 | TotalEventsFun = fun() -> 127 | {ok, Value} = ldb:query(?KEY), 128 | sets:size(Value) 129 | end, 130 | CheckEndFun = fun(NodeNumber, NodeEventNumber) -> 131 | TotalEventsFun() == NodeNumber * NodeEventNumber 132 | end, 133 | [StartFun, 134 | EventFun, 135 | TotalEventsFun, 136 | CheckEndFun]; 137 | 138 | gmap -> 139 | StartFun = fun() -> 140 | ldb:create(?KEY, lwwmap), 141 | ldb:create("gmap_events", gcounter), 142 | ldb_forward:update_ignore_keys(sets:from_list(["gmap_events"])) 143 | end, 144 | EventFun = fun(_EventNumber, NodeNumber, _NodeEventNumber, St) -> 145 | Percentage = exp_config:get(exp_gmap_simulation_key_percentage), 146 | KeysPerNode = round_up(?GMAP_KEY_NUMBER / NodeNumber), 147 | 148 | %% node with id i has keys in 149 | %% [i * KeysPerNode, ((i + 1) * KeysPerNode) - 1] 150 | NumericalId = exp_config:get(exp_numerical_id), 151 | Start = NumericalId * KeysPerNode + 1, 152 | End0 = ((NumericalId + 1) * KeysPerNode), 153 | %% since `End0' can be bigger than `?GMAP_KEY_NUMBER': 154 | End = min(?GMAP_KEY_NUMBER, End0), 155 | 156 | %% create my keys 157 | MyKeys0 = lists:seq(Start, End), 158 | 159 | %% shuffle keys 160 | MyKeys = exp_util:shuffle_list(MyKeys0), 161 | 162 | %% take the first `KeysPerIteration' 163 | KeysPerIteration = round_up((Percentage * KeysPerNode) / 100), 164 | Keys = lists:sublist(MyKeys, KeysPerIteration), 165 | 166 | Ops = lists:map( 167 | fun(Key) -> 168 | Timestamp = erlang:system_time(nanosecond), 169 | Value = <<>>, 170 | {set, Key, Timestamp, Value} 171 | end, 172 | Keys 173 | ), 174 | ldb:update(?KEY, Ops), 175 | ldb:update("gmap_events", increment), 176 | St 177 | end, 178 | TotalEventsFun = fun() -> 179 | {ok, Value} = ldb:query("gmap_events"), 180 | Value 181 | end, 182 | CheckEndFun = fun(NodeNumber, NodeEventNumber) -> 183 | TotalEventsFun() == NodeNumber * NodeEventNumber 184 | end, 185 | [StartFun, 186 | EventFun, 187 | TotalEventsFun, 188 | CheckEndFun]; 189 | 190 | retwis -> 191 | StartFun = fun() -> 192 | SimulationSt = retwis_init(), 193 | ldb:create("retwis_events", gcounter), 194 | ldb_forward:update_ignore_keys(sets:from_list(["retwis_events"])), 195 | %% st for this experiment: 196 | SimulationSt 197 | end, 198 | EventFun = fun(_EventNumber, _NodeNumber, _NodeEventNumber, St0) -> 199 | St = retwis_event(St0), 200 | ldb:update("retwis_events", increment), 201 | St 202 | end, 203 | TotalEventsFun = fun() -> 204 | {ok, Value} = ldb:query("retwis_events"), 205 | Value 206 | end, 207 | CheckEndFun = fun(NodeNumber, NodeEventNumber) -> 208 | TotalEventsFun() == NodeNumber * NodeEventNumber 209 | end, 210 | [StartFun, 211 | EventFun, 212 | TotalEventsFun, 213 | CheckEndFun] 214 | 215 | end, 216 | 217 | create_spec(Funs). 218 | 219 | %% @private 220 | create_spec(Funs) -> 221 | case Funs of 222 | [] -> 223 | []; 224 | _ -> 225 | [{exp_simulation_runner, 226 | {exp_simulation_runner, start_link, [Funs]}, 227 | permanent, 5000, worker, [exp_simulation_runner]}] 228 | end. 229 | 230 | %% @private Create an unique element to be added to the set. 231 | create_element(EventNumber) -> 232 | MyName = ldb_config:id(), 233 | atom_to_list(MyName) ++ element_sufix(EventNumber). 234 | 235 | %% @private Create elements suffix. 236 | element_sufix(EventNumber) -> 237 | "#" ++ integer_to_list(EventNumber). 238 | 239 | %% @private Round up. 240 | round_up(A) -> 241 | trunc(A) + 1. 242 | 243 | -define(USER_NUMBER, 10000). 244 | -define(POST_SIZE, 270). 245 | -define(POST_ID_SIZE, 31). 246 | -define(TIMELINE_POSTS, 10). 247 | 248 | %% @private 249 | %% Sizes: 250 | %% - Post: 270 bytes 251 | %% - *Id: 31 bytes 252 | %% 253 | %% Data structures: 254 | %% - UserId_followers: GSet 255 | %% - UserId_posts: LWWMap 256 | %% - UserId_timeline: LWWMap 257 | %% 258 | %% Initial numbers: 259 | %% - ?USER_NUMBER users 260 | %% 261 | retwis_init() -> 262 | %% create all the keys that will ever exist 263 | lists:foreach( 264 | fun(UserId) -> 265 | ldb:create(followers_key(UserId), gset), 266 | ldb:create(posts_key(UserId), lwwmap), 267 | ldb:create(timeline_key(UserId), lwwmap) 268 | end, 269 | all_users() 270 | ), 271 | generate_experiment_ids(). 272 | 273 | %% @private 274 | -spec generate_experiment_ids() -> list(integer()). 275 | generate_experiment_ids() -> 276 | Zipf = exp_config:get(exp_retwis_zipf), 277 | NodeEventNumber = exp_config:get(exp_node_event_number), 278 | %% each follow (15%), requires 2 users 279 | %% each post (35%), requires 1 user 280 | Per100 = 15 * 2 + 35, 281 | ImpossibleCase = Per100 * 2, 282 | IdsRequiredEstimation = round_up(NodeEventNumber * ImpossibleCase / 100), 283 | 284 | case Zipf of 285 | 0 -> 286 | %% uniform 287 | [random_user() || _ <- lists:seq(1, IdsRequiredEstimation)]; 288 | _ -> 289 | CMD = "python3 bin/zipf.py " 290 | ++ integer_to_list(?USER_NUMBER) ++ " " 291 | ++ float_to_list(Zipf / 100, [{decimals, 1}]) ++ " " 292 | ++ integer_to_list(IdsRequiredEstimation), 293 | Result = os:cmd(CMD), 294 | [list_to_integer(Id) || Id <- string:lexemes(Result, "\n")] 295 | end. 296 | 297 | %% @private 298 | %% for timeline, ?TIMELINE_POSTS are read 299 | %% TODO does it matter this number? 300 | -spec retwis_event({ldb_metrics:st(), list(integer())}) -> ldb_metrics:st(). 301 | retwis_event({MetricsSt0, NextIds0}) -> 302 | 303 | %% the following code does not look good on purpose 304 | %% - to avoid the pattern matching, we repeat code, 305 | %% in principle, providing a more accurate measure 306 | case event_type() of 307 | follow -> 308 | {MicroSeconds, NextIds1} = timer:tc(fun retwis_follow/1, [NextIds0]), 309 | MetricsSt1 = ldb_metrics:record_latency(follow, MicroSeconds, MetricsSt0), 310 | {MetricsSt1, NextIds1}; 311 | post -> 312 | {MicroSeconds, NextIds1} = timer:tc(fun retwis_post/1, [NextIds0]), 313 | MetricsSt1 = ldb_metrics:record_latency(post, MicroSeconds, MetricsSt0), 314 | {MetricsSt1, NextIds1}; 315 | timeline -> 316 | {MicroSeconds, _} = timer:tc(fun retwis_timeline/0, []), 317 | MetricsSt1 = ldb_metrics:record_latency(timeline, MicroSeconds, MetricsSt0), 318 | {MetricsSt1, NextIds0} 319 | end. 320 | 321 | %% @private 322 | retwis_follow([User, NewFollowee | NextIds1]) -> 323 | ldb:update(followers_key(NewFollowee), {add, User}), 324 | NextIds1. 325 | 326 | %% @private 327 | retwis_post([User | NextIds1]) -> 328 | %% post data 329 | Post = create_post(), 330 | PostId = create_post_id(), 331 | Timestamp = erlang:system_time(nanosecond), 332 | 333 | %% create post 334 | ldb:update(posts_key(User), {set, PostId, Timestamp, Post}), 335 | 336 | %% get followers 337 | {ok, Followers} = ldb:query(followers_key(User)), 338 | 339 | %% add post to each follower timeline 340 | Op = {set, Timestamp, Timestamp, PostId}, 341 | sets:fold( 342 | fun(Follower, _) -> ldb:update(timeline_key(Follower), Op) end, 343 | undefined, 344 | Followers 345 | ), 346 | NextIds1. 347 | 348 | %% @private 349 | retwis_timeline() -> 350 | User = random_user(), 351 | %% read 10 posts from timeline 352 | {ok, _} = ldb:query(timeline_key(User), [?TIMELINE_POSTS]). 353 | 354 | %% @private 355 | %% Events: 356 | %% - follow user: 15% 357 | %% - post tweet: 35% 358 | %% - load timeline: 50% 359 | -spec event_type() -> follow | post | timeline. 360 | event_type() -> 361 | Random = rand:uniform(100), 362 | case Random =< 15 of 363 | true -> follow; 364 | false -> 365 | case Random =< 50 of 366 | true -> post; 367 | false -> timeline 368 | end 369 | end. 370 | 371 | %% @private 372 | -spec timeline_key(non_neg_integer()) -> string(). 373 | timeline_key(UserId) -> 374 | append_to_id(UserId, "_timeline"). 375 | 376 | %% @private 377 | -spec followers_key(non_neg_integer()) -> string(). 378 | followers_key(UserId) -> 379 | append_to_id(UserId, "_followers"). 380 | 381 | %% @private 382 | -spec posts_key(non_neg_integer()) -> string(). 383 | posts_key(UserId) -> 384 | append_to_id(UserId, "_posts"). 385 | 386 | %% @private 387 | -spec append_to_id(non_neg_integer(), string()) -> string(). 388 | append_to_id(UserId, End) -> 389 | integer_to_list(UserId) ++ End. 390 | 391 | -spec all_users() -> list(non_neg_integer()). 392 | all_users() -> 393 | lists:seq(0, ?USER_NUMBER + 1). 394 | 395 | -spec random_user() -> non_neg_integer(). 396 | random_user() -> 397 | rand:uniform(?USER_NUMBER). 398 | 399 | -spec create_post() -> binary(). 400 | create_post() -> 401 | random_bytes(?POST_SIZE). 402 | 403 | -spec create_post_id() -> binary(). 404 | create_post_id() -> 405 | random_bytes(?POST_ID_SIZE). 406 | 407 | %% @doc Generate a random byte array. 408 | -spec random_bytes(non_neg_integer()) -> binary(). 409 | random_bytes(Length) -> 410 | crypto:strong_rand_bytes(Length). 411 | -------------------------------------------------------------------------------- /src/exp_simulations_support.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_simulations_support). 21 | -author("Vitor Enes ok. 31 | push_exp_metrics(StartTime) -> 32 | LDBVars = [ldb_mode, 33 | ldb_state_sync_interval, 34 | ldb_redundant_dgroups, 35 | ldb_dgroup_back_propagation, 36 | ldb_scuttlebutt_gc, 37 | ldb_op_ii], 38 | LDBConfigs = get_configs(ldb, LDBVars), 39 | 40 | LSimVars = [exp_overlay, 41 | exp_node_number, 42 | exp_simulation, 43 | exp_node_event_number, 44 | exp_gmap_simulation_key_percentage, 45 | exp_retwis_zipf], 46 | LSimConfigs = get_configs(exp, LSimVars), 47 | 48 | All = [{start_time, StartTime}] 49 | ++ LDBConfigs 50 | ++ LSimConfigs, 51 | 52 | FilePath = file_path(rsg), 53 | File = ldb_json:encode(All), 54 | 55 | store(FilePath, File), 56 | ok. 57 | 58 | -spec push_ldb_metrics() -> ok. 59 | push_ldb_metrics() -> 60 | RunnerMetrics = exp_simulation_runner:get_metrics(), 61 | LDBMetrics = ldb_forward:get_metrics(), 62 | {Transmission0, Memory0, Latency0, Processing} = ldb_metrics:merge_all([RunnerMetrics | LDBMetrics]), 63 | 64 | %% process transmission 65 | Transmission = maps:fold( 66 | fun(Timestamp, {{A, B}, C}, Acc) -> 67 | V = [{ts, Timestamp}, 68 | {size, [A, B]}, 69 | {term_size, C}], 70 | [V | Acc] 71 | end, 72 | [], 73 | Transmission0 74 | ), 75 | 76 | %% process memory 77 | Memory = maps:fold( 78 | fun(Timestamp, {{A, B}, C}, Acc) -> 79 | V = [{ts, Timestamp}, 80 | {size, [A, B]}, 81 | {term_size, C}], 82 | [V | Acc] 83 | end, 84 | [], 85 | Memory0 86 | ), 87 | 88 | %% process latency 89 | Latency = maps:to_list(Latency0), 90 | 91 | All = [ 92 | {transmission, Transmission}, 93 | {memory, Memory}, 94 | {latency, Latency}, 95 | {processing, Processing} 96 | ], 97 | 98 | FilePath = file_path(ldb_config:id()), 99 | File = ldb_json:encode(All), 100 | 101 | store(FilePath, File), 102 | ok. 103 | 104 | %% @private 105 | file_path(Name) -> 106 | Timestamp = exp_config:get(exp_timestamp), 107 | Filename = str(Timestamp) ++ "/" 108 | ++ str(Name) ++ ".json", 109 | Filename. 110 | 111 | %% @private 112 | get_configs(App, Vars) -> 113 | Mod = case App of 114 | ldb -> ldb_config; 115 | exp -> exp_config 116 | end, 117 | lists:map( 118 | fun(Var) -> {Var, Mod:get(Var)} end, 119 | Vars 120 | ). 121 | 122 | %% @private 123 | str(V) when is_atom(V) -> 124 | atom_to_list(V); 125 | str(V) when is_integer(V) -> 126 | integer_to_list(V). 127 | 128 | %% @private 129 | store(FilePath, File) -> 130 | ok = exp_redis_metrics_store:put(FilePath, File). 131 | -------------------------------------------------------------------------------- /src/exp_sup.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_sup). 21 | -author("Vitor Enes 37 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 38 | 39 | init([]) -> 40 | configure_peer_service(), 41 | {Simulation, Orchestration, RSG} = configure(), 42 | 43 | Children = exp_specs(Simulation, Orchestration, RSG), 44 | 45 | lager:info("exp_sup initialized!"), 46 | RestartStrategy = {one_for_one, 10, 10}, 47 | {ok, {RestartStrategy, Children}}. 48 | 49 | %% @private 50 | configure_peer_service() -> 51 | %% configure exp overlay 52 | configure_var("OVERLAY", 53 | exp_overlay, 54 | ?DEFAULT_OVERLAY). 55 | 56 | %% @private 57 | configure() -> 58 | %% configure exp simulation 59 | Simulation = configure_var("SIMULATION", 60 | exp_simulation, 61 | undefined), 62 | 63 | %% configure node number 64 | configure_int("NODE_NUMBER", 65 | exp_node_number, 66 | 1), 67 | 68 | %% configure node event number 69 | configure_int("NODE_EVENT_NUMBER", 70 | exp_node_event_number, 71 | 30), 72 | 73 | %% configure event interval 74 | configure_int("EVENT_INTERVAL", 75 | exp_event_interval, 76 | 1000), %% milliseconds 77 | 78 | %% configure unique simulation timestamp 79 | configure_int("TIMESTAMP", 80 | exp_timestamp, 81 | 0), 82 | 83 | %% configure api server 84 | configure_str("APISERVER", 85 | exp_api_server, 86 | undefined), 87 | 88 | %% configure auth token 89 | configure_str("TOKEN", 90 | exp_token, 91 | undefined), 92 | 93 | %% configure orchestration 94 | Orchestration = configure_var("ORCHESTRATION", 95 | exp_orchestration, 96 | undefined), 97 | 98 | %% configure rsg master 99 | RSG = configure_var("RSG", 100 | exp_rsg, 101 | false), 102 | 103 | %% configure gmap simulation key percentage 104 | configure_int("GMAP_SIMULATION_KEY_PERCENTAGE", 105 | exp_gmap_simulation_key_percentage, 106 | 100), 107 | 108 | %% configure gmap simulation key percentage 109 | configure_int("RETWIS_ZIPF", 110 | exp_retwis_zipf, 111 | 0), 112 | 113 | {Simulation, Orchestration, RSG}. 114 | 115 | %% @private 116 | exp_specs(Simulation, Orchestration, RSG) -> 117 | SimulationSpecs = exp_simulations:get_specs(Simulation), 118 | 119 | OrchestrationSpecs = case Orchestration of 120 | undefined -> 121 | []; 122 | _ -> 123 | BarrierPeerServiceSpecs = [?CHILD(exp_barrier_peer_service)], 124 | Store = [?CHILD(exp_redis_metrics_store)], 125 | 126 | RSGSpecs = case RSG of 127 | true -> 128 | [?CHILD(exp_rsg_master)]; 129 | false -> 130 | [?CHILD(exp_rsg)] 131 | end, 132 | 133 | HTTPSpecs = case RSG of 134 | true -> 135 | []; 136 | false -> 137 | [?CHILD(exp_resource)] 138 | end, 139 | 140 | BarrierPeerServiceSpecs ++ Store ++ RSGSpecs ++ HTTPSpecs 141 | end, 142 | 143 | SimulationSpecs ++ OrchestrationSpecs. 144 | 145 | %% @private 146 | configure_var(Env, Var, Default) -> 147 | To = fun(V) -> atom_to_list(V) end, 148 | From = fun(V) -> list_to_atom(V) end, 149 | configure(Env, Var, Default, To, From). 150 | 151 | %% @private 152 | configure_str(Env, Var, Default) -> 153 | F = fun(V) -> V end, 154 | configure(Env, Var, Default, F, F). 155 | 156 | %% @private 157 | configure_int(Env, Var, Default) -> 158 | To = fun(V) -> integer_to_list(V) end, 159 | From = fun(V) -> list_to_integer(V) end, 160 | configure(Env, Var, Default, To, From). 161 | 162 | %% @private 163 | configure(Env, Var, Default, To, From) -> 164 | Current = exp_config:get(Var, Default), 165 | Val = From( 166 | os:getenv(Env, To(Current)) 167 | ), 168 | exp_config:set(Var, Val), 169 | Val. 170 | -------------------------------------------------------------------------------- /src/exp_util.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% 4 | %% This file is provided to you under the Apache License, 5 | %% Version 2.0 (the "License"); you may not use this file 6 | %% except in compliance with the License. You may obtain 7 | %% a copy of the License at 8 | %% 9 | %% http://www.apache.org/licenses/LICENSE-2.0 10 | %% 11 | %% Unless required by applicable law or agreed to in writing, 12 | %% software distributed under the License is distributed on an 13 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | %% KIND, either express or implied. See the License for the 15 | %% specific language governing permissions and limitations 16 | %% under the License. 17 | %% 18 | %% ------------------------------------------------------------------- 19 | 20 | -module(exp_util). 21 | -author("Vitor Enes node_spec(). 31 | generate_spec(IpStr, Port) -> 32 | NameStr = "exp-" ++ integer_to_list(?PORT) ++ "@" ++ IpStr, 33 | 34 | ParsedName = list_to_atom(NameStr), 35 | {ok, ParsedIp} = inet_parse:address(IpStr), 36 | 37 | {ParsedName, ParsedIp, Port}. 38 | 39 | %% @doc Shuffle a list. 40 | -spec shuffle_list(list()) -> list(). 41 | shuffle_list(L) -> 42 | rand:seed(exsplus, erlang:timestamp()), 43 | lists:map( 44 | fun({_, E}) -> E end, 45 | lists:sort( 46 | lists:map( 47 | fun(E) -> {rand:uniform(), E} end, L 48 | ) 49 | ) 50 | ). 51 | -------------------------------------------------------------------------------- /test/exp_modes_SUITE.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% Copyright (c) 2016 Christopher Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | %% 21 | 22 | -module(exp_modes_SUITE). 23 | -author("Vitor Enes "). 24 | 25 | %% common_test callbacks 26 | -export([%% suite/0, 27 | init_per_suite/1, 28 | end_per_suite/1, 29 | init_per_testcase/2, 30 | end_per_testcase/2, 31 | all/0]). 32 | 33 | %% tests 34 | -compile([export_all, nowarn_export_all]). 35 | 36 | -include("exp.hrl"). 37 | 38 | -include_lib("common_test/include/ct.hrl"). 39 | -include_lib("eunit/include/eunit.hrl"). 40 | -include_lib("kernel/include/inet.hrl"). 41 | 42 | -define(NODE_NUMBER, 3). 43 | -define(EVENT_NUMBER, 5). 44 | -define(SIMULATION, gcounter). 45 | 46 | %% =================================================================== 47 | %% common_test callbacks 48 | %% =================================================================== 49 | 50 | suite() -> 51 | [{timetrap, {hours, 1}}]. 52 | 53 | init_per_suite(Config) -> 54 | Config. 55 | 56 | end_per_suite(Config) -> 57 | Config. 58 | 59 | init_per_testcase(Case, Config) -> 60 | ct:pal("Beginning test case: ~p", [Case]), 61 | Config. 62 | 63 | end_per_testcase(Case, Config) -> 64 | ct:pal("Ending test case: ~p", [Case]), 65 | Config. 66 | 67 | all() -> 68 | [ 69 | state_based_ring_test, 70 | delta_based_ring_test, 71 | delta_based_revisited_ring_test, 72 | scuttlebutt_ring_test, 73 | scuttlebutt_gc_ring_test, 74 | op_based_ring_test 75 | ]. 76 | 77 | %% =================================================================== 78 | %% tests 79 | %% =================================================================== 80 | 81 | state_based_ring_test(_Config) -> 82 | run(state_based, ring). 83 | 84 | delta_based_ring_test(_Config) -> 85 | run(delta_based, ring). 86 | 87 | delta_based_revisited_ring_test(_Config) -> 88 | run(delta_based_revisited, ring). 89 | 90 | scuttlebutt_ring_test(_Config) -> 91 | run(scuttlebutt, ring). 92 | 93 | scuttlebutt_gc_ring_test(_Config) -> 94 | run(scuttlebutt_gc, ring). 95 | 96 | op_based_ring_test(_Config) -> 97 | run(op_based, ring). 98 | 99 | %% @private 100 | run(Evaluation, Overlay) -> 101 | {Mode, Redundant, BackPropagation, GC} = get_config(Evaluation), 102 | 103 | Options = [{node_number, ?NODE_NUMBER}, 104 | {exp_settings, 105 | [{exp_overlay, Overlay}, 106 | {exp_simulation, ?SIMULATION}, 107 | {exp_node_number, ?NODE_NUMBER}, 108 | {exp_node_event_number, ?EVENT_NUMBER}]}, 109 | {ldb_settings, 110 | [{ldb_mode, Mode}, 111 | {ldb_redundant_dgroups, Redundant}, 112 | {ldb_dgroup_back_propagation, BackPropagation}, 113 | {ldb_scuttlebutt_gc, GC}]}], 114 | 115 | exp_local_simulations_support:run(Options). 116 | 117 | %% @private 118 | get_config(state_based) -> 119 | {state_based, false, false, false}; 120 | get_config(delta_based) -> 121 | {delta_based, false, false, false}; 122 | get_config(delta_based_revisited) -> 123 | {delta_based, true, true, false}; 124 | get_config(scuttlebutt) -> 125 | {scuttlebutt, false, false, false}; 126 | get_config(scuttlebutt_gc) -> 127 | {scuttlebutt, false, false, true}; 128 | get_config(op_based) -> 129 | {op_based, false, false, false}. 130 | -------------------------------------------------------------------------------- /test/exp_simulations_SUITE.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% Copyright (c) 2018 Vitor Enes. All Rights Reserved. 3 | %% Copyright (c) 2016 Christopher Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | %% 21 | 22 | -module(exp_simulations_SUITE). 23 | -author("Vitor Enes "). 24 | 25 | %% common_test callbacks 26 | -export([%% suite/0, 27 | init_per_suite/1, 28 | end_per_suite/1, 29 | init_per_testcase/2, 30 | end_per_testcase/2, 31 | all/0]). 32 | 33 | %% tests 34 | -compile([export_all, nowarn_export_all]). 35 | 36 | -include("exp.hrl"). 37 | 38 | -include_lib("common_test/include/ct.hrl"). 39 | -include_lib("eunit/include/eunit.hrl"). 40 | -include_lib("kernel/include/inet.hrl"). 41 | 42 | -define(NODE_NUMBER, 3). 43 | -define(EVENT_NUMBER, 5). 44 | 45 | %% =================================================================== 46 | %% common_test callbacks 47 | %% =================================================================== 48 | 49 | suite() -> 50 | [{timetrap, {hours, 1}}]. 51 | 52 | init_per_suite(Config) -> 53 | Config. 54 | 55 | end_per_suite(Config) -> 56 | Config. 57 | 58 | init_per_testcase(Case, Config) -> 59 | ct:pal("Beginning test case: ~p", [Case]), 60 | Config. 61 | 62 | end_per_testcase(Case, Config) -> 63 | ct:pal("Ending test case: ~p", [Case]), 64 | Config. 65 | 66 | all() -> 67 | [ 68 | gset_test, 69 | gcounter_test, 70 | gmap_test 71 | %retwis_test 72 | ]. 73 | 74 | %% =================================================================== 75 | %% tests 76 | %% =================================================================== 77 | 78 | gset_test(_Config) -> 79 | run(gset). 80 | 81 | gcounter_test(_Config) -> 82 | run(gcounter). 83 | 84 | gmap_test(_Config) -> 85 | run(gmap). 86 | 87 | retwis_test(_Config) -> 88 | run(retwis). 89 | 90 | %% @private 91 | run(Simulation) -> 92 | Overlay = ring, 93 | 94 | Options = [{node_number, ?NODE_NUMBER}, 95 | {exp_settings, 96 | [{exp_overlay, Overlay}, 97 | {exp_simulation, Simulation}, 98 | {exp_node_number, ?NODE_NUMBER}, 99 | {exp_node_event_number, ?EVENT_NUMBER}]}, 100 | {ldb_settings, 101 | [{ldb_mode, delta_based}, 102 | {ldb_redundant_dgroups, true}, 103 | {ldb_dgroup_back_propagation, true}]}], 104 | 105 | exp_local_simulations_support:run(Options). 106 | --------------------------------------------------------------------------------