├── .gitignore ├── .travis.yml ├── Dockerfile ├── Makefile ├── README.rst ├── Rscripts ├── echo-latency.R ├── graph.R └── partisan-bench-8mb-1ms.R ├── bin ├── .gitkeep ├── bench ├── clone-deps ├── gist-results ├── kube └── perf-suite.sh ├── config ├── admin_bin ├── advanced.config ├── test.config ├── vars.config ├── vars_dev1.config ├── vars_dev2.config └── vars_dev3.config ├── eprof └── .gitkeep ├── examples ├── 10mb_object.config ├── 1kb_object.config ├── 1mb_object.config ├── 2mb_object.config ├── 32kb_object.config ├── 32mb_object.config ├── 4mb_object.config ├── 512kb_object.config ├── 64kb_object.config ├── 64mb_object.config ├── 8mb_object.config ├── 96kb_object.config ├── default.config ├── echo.config ├── ping.config └── sync_ping.config ├── output.txt ├── priv └── 01-unir.schema ├── rebar.config ├── rebar.lock ├── rebar3 ├── results-scale.csv ├── src ├── testable_vnode.erl ├── unir.app.src ├── unir.erl ├── unir_app.erl ├── unir_console.erl ├── unir_failure_fsm.erl ├── unir_failure_fsm_sup.erl ├── unir_get_fsm.erl ├── unir_get_fsm_sup.erl ├── unir_ping_fsm.erl ├── unir_ping_fsm_sup.erl ├── unir_put_fsm.erl ├── unir_put_fsm_sup.erl ├── unir_sup.erl └── unir_vnode.erl └── test ├── functionality_SUITE.erl ├── partisan_support.erl ├── prop_unir.erl ├── prop_unir_vnode.erl ├── proper-regressions.consult ├── support.erl └── throughput_SUITE.erl /.gitignore: -------------------------------------------------------------------------------- 1 | _build/ 2 | deps/ 3 | .vscode/ 4 | _checkouts/ 5 | log/ 6 | results/ 7 | data/ 8 | _checkouts* 9 | results.csv 10 | .rebar3/ 11 | rebar3.crashdump 12 | results-* 13 | dets/ 14 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: erlang 2 | otp_release: 3 | - 19.3 4 | - 20.3 5 | install: 6 | - make 7 | - ./rebar3 update 8 | - make release 9 | script: 10 | - make test 11 | - make without-partisan-test 12 | - make with-partisan-test 13 | - make scale-test 14 | - make partisan-scale-test 15 | - make partisan-with-binary-padding-test 16 | - make partisan-with-parallelism-test 17 | notifications: 18 | email: christopher.meiklejohn@gmail.com 19 | slack: lasp-lang:hiPRNnbUa3zdGrrXZfGRAF7D 20 | irc: "irc.freenode.org#lasp-lang" 21 | sudo: false 22 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM erlang:20.3 2 | 3 | MAINTAINER Christopher S. Meiklejohn 4 | 5 | RUN cd /tmp && \ 6 | apt-get update && \ 7 | apt-get -y install wget build-essential make gcc ruby-dev git expect gnuplot tmux strace && \ 8 | gem install gist && \ 9 | cd /opt && \ 10 | git clone https://github.com/lasp-lang/unir.git && \ 11 | cd unir && \ 12 | make release 13 | 14 | CMD echo "${GIST_TOKEN}" > /root/.gist && \ 15 | echo "kube running for ${HOSTNAME}" | gist && \ 16 | export LC_ALL=en_US.UTF-8 && \ 17 | export LANG=en_US.UTF-8 && \ 18 | cd /opt/unir && \ 19 | git pull && \ 20 | make && \ 21 | (ulimit -n 65534; ./rebar3 proper -m prop_unir -p prop_sequential; exit 0) | tee output-proper.txt && \ 22 | (make proper-logs; exit 0) | tee output-logs.txt && \ 23 | chmod 755 bin/gist-results && \ 24 | bin/gist-results -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BASEDIR = $(shell pwd) 2 | REBAR = ./rebar3 3 | RELPATH = _build/default/rel/unir 4 | PRODRELPATH = _build/prod/rel/unir 5 | APPNAME = unir 6 | SHELL = /bin/bash 7 | CONCURRENCY ?= 4 8 | LATENCY ?= 0 9 | SIZE ?= 1024 10 | PROJECT_ID ?= partisan-203021 11 | 12 | compile: 13 | $(REBAR) compile 14 | 15 | test-workflow: kill 16 | ./rebar3 proper -m prop_unir -p prop_sequential --noshrink 17 | 18 | gcloud-build: 19 | docker build --no-cache -t gcr.io/$(PROJECT_ID)/unir:v1 . 20 | gcloud docker -- push gcr.io/$(PROJECT_ID)/unir:v1 21 | 22 | gcloud-deploy: 23 | @read -s -p "Please make sure you've run gcloud-build and then hit enter to continue..." 24 | @echo 25 | yes | gcloud container clusters delete unir; exit 0 26 | gcloud container clusters create unir --machine-type n1-highcpu-16 27 | gcloud container clusters get-credentials unir 28 | sleep 60 29 | bin/kube 30 | 31 | gcloud-redeploy: 32 | @read -s -p "Please make sure you've run gcloud-build and then hit enter to continue..." 33 | @echo 34 | kubectl delete -f /tmp/unir.yaml 35 | bin/kube 36 | 37 | kube-deploy: 38 | bin/kube 39 | 40 | gcloud-delete: 41 | yes | gcloud container clusters delete unir; exit 0 42 | 43 | release: compile 44 | $(REBAR) release 45 | mkdir -p $(RELPATH)/../unir_config 46 | [ -f $(RELPATH)/../unir_config/unir.conf ] || cp $(RELPATH)/etc/unir.conf $(RELPATH)/../unir_config/unir.conf 47 | [ -f $(RELPATH)/../unir_config/advanced.config ] || cp $(RELPATH)/etc/advanced.config $(RELPATH)/../unir_config/advanced.config 48 | 49 | proper: release kill 50 | $(REBAR) proper -m prop_unir -p prop_sequential 51 | 52 | kill: 53 | pkill -9 beam.smp; pkill -9 epmd; exit 0 54 | 55 | console: 56 | cd $(RELPATH) && ./bin/unir console 57 | 58 | clear-logs: 59 | rm -rf _build/test/logs 60 | 61 | proper-logs: 62 | find . -name console.log | grep `ls -d ./undefined* | tail -1` | xargs cat 63 | 64 | tail-proper-logs: 65 | find . -name console.log | grep `ls -d ./undefined* | tail -1` | xargs tail -F 66 | 67 | logs: 68 | find . -name console.log | grep `ls -d ./_build/test/logs/ct_run* | tail -1` | xargs cat 69 | 70 | tail-logs: 71 | find . -name console.log | grep `ls -d ./_build/test/logs/ct_run* | tail -1` | xargs tail -F 72 | 73 | echo-bench: 74 | pkill -9 beam.smp; pkill -9 epmd; exit 0 75 | BENCH_CONFIG=echo.config $(REBAR) ct --suite=throughput_SUITE --group=disterl --case=bench_test --readable=false -v 76 | pkill -9 beam.smp; pkill -9 epmd; exit 0 77 | BENCH_CONFIG=echo.config $(REBAR) ct --suite=throughput_SUITE --group=partisan --case=bench_test --readable=false -v 78 | pkill -9 beam.smp; pkill -9 epmd; exit 0 79 | BENCH_CONFIG=echo.config $(REBAR) ct --suite=throughput_SUITE --group=partisan_with_parallelism --case=bench_test --readable=false -v 80 | 81 | fsm-perf: release 82 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=fsm_performance_test --group=disterl 83 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=fsm_performance_test --group=partisan 84 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=fsm_performance_test --group=partisan_with_channels 85 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=fsm_performance_test --group=partisan_with_parallelism 86 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=fsm_performance_test --group=partisan_with_monotonic_channels 87 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=fsm_performance_test --group=partisan_with_partitioned_parallelism 88 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=fsm_performance_test --group=partisan_with_partitioned_parallelism_and_channels 89 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=fsm_performance_test --group=partisan_with_partitioned_parallelism_and_channels_and_monotonic_channels 90 | 91 | echo-perf: release 92 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=echo_performance_test --group=disterl 93 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=echo_performance_test --group=partisan 94 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=echo_performance_test --group=partisan_with_channels 95 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=echo_performance_test --group=partisan_with_monotonic_channels 96 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=echo_performance_test --group=partisan_with_parallelism 97 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=echo_performance_test --group=partisan_with_partitioned_parallelism 98 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=echo_performance_test --group=partisan_with_partitioned_parallelism_and_channels 99 | pkill -9 beam.smp; pkill -9 epmd; SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=echo_performance_test --group=partisan_with_partitioned_parallelism_and_channels_and_monotonic_channels 100 | 101 | partisan-perf: release 102 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=disterl 103 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan 104 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan_with_channels 105 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan_with_parallelism 106 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan_with_partitioned_parallelism 107 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan_with_partitioned_parallelism_and_channels 108 | 109 | profile: release 110 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=false PROFILE=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=disterl 111 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=false PROFILE=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan 112 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=false PROFILE=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan_with_channels 113 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=false PROFILE=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan_with_parallelism 114 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=false PROFILE=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan_with_partitioned_parallelism 115 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=false PROFILE=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan_with_partitioned_parallelism_and_channels 116 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=true PROFILE=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=disterl 117 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=true PROFILE=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan 118 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=true PROFILE=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan_with_channels 119 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=true PROFILE=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan_with_parallelism 120 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=true PROFILE=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan_with_partitioned_parallelism 121 | pkill -9 beam.smp; pkill -9 epmd; PARTISAN_INIT=true PROFILE=true SIZE=${SIZE} LATENCY=${LATENCY} CONCURRENCY=${CONCURRENCY} PARALLELISM=${CONCURRENCY} ${REBAR} ct --readable=false -v --suite=throughput_SUITE --case=partisan_performance_test --group=partisan_with_partitioned_parallelism_and_channels 122 | 123 | single-bench: 124 | pkill -9 beam.smp; pkill -9 epmd; exit 0 125 | BENCH_CONFIG=32kb_object.config $(REBAR) ct --readable=false -v --suite=throughput_SUITE --group=disterl --case=bench_test 126 | pkill -9 beam.smp; pkill -9 epmd; exit 0 127 | BENCH_CONFIG=32kb_object.config $(REBAR) ct --readable=false -v --suite=throughput_SUITE --group=partisan --case=bench_test 128 | pkill -9 beam.smp; pkill -9 epmd; exit 0 129 | BENCH_CONFIG=32kb_object.config $(REBAR) ct --readable=false -v --suite=throughput_SUITE --group=partisan_with_binary_padding --case=bench_test 130 | pkill -9 beam.smp; pkill -9 epmd; exit 0 131 | BENCH_CONFIG=64kb_object.config $(REBAR) ct --readable=false -v --suite=throughput_SUITE --group=disterl --case=bench_test 132 | pkill -9 beam.smp; pkill -9 epmd; exit 0 133 | BENCH_CONFIG=64kb_object.config $(REBAR) ct --readable=false -v --suite=throughput_SUITE --group=partisan --case=bench_test 134 | pkill -9 beam.smp; pkill -9 epmd; exit 0 135 | BENCH_CONFIG=64kb_object.config $(REBAR) ct --readable=false -v --suite=throughput_SUITE --group=partisan_with_binary_padding --case=bench_test 136 | pkill -9 beam.smp; pkill -9 epmd; exit 0 137 | BENCH_CONFIG=96kb_object.config $(REBAR) ct --readable=false -v --suite=throughput_SUITE --group=disterl --case=bench_test 138 | pkill -9 beam.smp; pkill -9 epmd; exit 0 139 | BENCH_CONFIG=96kb_object.config $(REBAR) ct --readable=false -v --suite=throughput_SUITE --group=partisan --case=bench_test 140 | pkill -9 beam.smp; pkill -9 epmd; exit 0 141 | BENCH_CONFIG=96kb_object.config $(REBAR) ct --readable=false -v --suite=throughput_SUITE --group=partisan_with_binary_padding --case=bench_test 142 | 143 | busy-port-bench: 144 | pkill -9 beam.smp; pkill -9 epmd; exit 0 145 | BENCH_CONFIG=default.config $(REBAR) ct --suite=throughput_SUITE --group=disterl --case=bench_test --readable=false -v 146 | pkill -9 beam.smp; pkill -9 epmd; exit 0 147 | BENCH_CONFIG=32kb_object.config $(REBAR) ct --suite=throughput_SUITE --group=disterl --case=bench_test --readable=false -v 148 | pkill -9 beam.smp; pkill -9 epmd; exit 0 149 | BENCH_CONFIG=64kb_object.config $(REBAR) ct --suite=throughput_SUITE --group=disterl --case=bench_test --readable=false -v 150 | pkill -9 beam.smp; pkill -9 epmd; exit 0 151 | BENCH_CONFIG=96kb_object.config $(REBAR) ct --suite=throughput_SUITE --group=disterl --case=bench_test --readable=false -v 152 | pkill -9 beam.smp; pkill -9 epmd; exit 0 153 | BENCH_CONFIG=512kb_object.config $(REBAR) ct --suite=throughput_SUITE --group=disterl --case=bench_test --readable=false -v 154 | pkill -9 beam.smp; pkill -9 epmd; exit 0 155 | BENCH_CONFIG=1mb_object.config $(REBAR) ct --suite=throughput_SUITE --group=disterl --case=bench_test --readable=false -v 156 | pkill -9 beam.smp; pkill -9 epmd; exit 0 157 | BENCH_CONFIG=2mb_object.config $(REBAR) ct --suite=throughput_SUITE --group=disterl --case=bench_test --readable=false -v 158 | pkill -9 beam.smp; pkill -9 epmd; exit 0 159 | BENCH_CONFIG=4mb_object.config $(REBAR) ct --suite=throughput_SUITE --group=disterl --case=bench_test --readable=false -v 160 | 161 | bench: kill 162 | @echo "Running Distributed Erlang benchmark with configuration $(BENCH_CONFIG)..." 163 | BENCH_CONFIG=$(BENCH_CONFIG) $(REBAR) ct --suite=throughput_SUITE --group=disterl --case=bench_test 164 | @echo "Running Partisan benchmark with configuration $(BENCH_CONFIG)..." 165 | BENCH_CONFIG=$(BENCH_CONFIG) $(REBAR) ct --suite=throughput_SUITE --group=partisan --case=bench_test 166 | 167 | extended-bench: kill bench 168 | @echo "Running Partisan (parallel) benchmark with configuration $(BENCH_CONFIG)..." 169 | BENCH_CONFIG=$(BENCH_CONFIG) $(REBAR) ct --suite=throughput_SUITE --group=partisan_with_parallelism --case=bench_test 170 | @echo "Running Partisan (binary padding) benchmark with configuration $(BENCH_CONFIG)..." 171 | BENCH_CONFIG=$(BENCH_CONFIG) $(REBAR) ct --suite=throughput_SUITE --group=partisan_with_binary_padding --case=bench_test 172 | @echo "Running Partisan (vnode partitioning) benchmark with configuration $(BENCH_CONFIG)..." 173 | BENCH_CONFIG=$(BENCH_CONFIG) $(REBAR) ct --suite=throughput_SUITE --group=partisan_with_vnode_partitioning --case=bench_test 174 | 175 | without-partisan-test: kill release 176 | $(REBAR) ct -v --readable=false --suite=functionality_SUITE --group=disterl 177 | 178 | with-partisan-test: kill release 179 | $(REBAR) ct -v --readable=false --suite=functionality_SUITE --group=partisan 180 | 181 | scale-test: clear-logs kill release 182 | $(REBAR) ct -v --readable=false --suite=functionality_SUITE --group=scale 183 | 184 | large-scale-test: kill release 185 | $(REBAR) ct -v --readable=false --suite=functionality_SUITE --group=large_scale 186 | 187 | partisan-scale-test: clear-logs kill release 188 | $(REBAR) ct -v --readable=false --suite=functionality_SUITE --group=partisan_scale 189 | 190 | partisan-large-scale-test: kill release 191 | $(REBAR) ct -v --readable=false --suite=functionality_SUITE --group=partisan_large_scale 192 | 193 | partisan-with-binary-padding-test: kill release 194 | $(REBAR) ct -v --readable=false --suite=functionality_SUITE --group=partisan_with_binary_padding 195 | 196 | partisan-with-parallelism-test: kill release 197 | $(REBAR) ct -v --readable=false --suite=functionality_SUITE --group=partisan_with_parallelism 198 | 199 | prod-release: 200 | $(REBAR) as prod release 201 | mkdir -p $(PRODRELPATH)/../unir_config 202 | [ -f $(PRODRELPATH)/../unir_config/unir.conf ] || cp $(PRODRELPATH)/etc/unir.conf $(PRODRELPATH)/../unir_config/unir.conf 203 | [ -f $(PRODRELPATH)/../unir_config/advanced.config ] || cp $(PRODRELPATH)/etc/advanced.config $(PRODRELPATH)/../unir_config/advanced.config 204 | 205 | prod-console: 206 | cd $(PRODRELPATH) && ./bin/unir console 207 | 208 | clean: 209 | $(REBAR) clean 210 | 211 | dialyzer: 212 | $(REBAR) dialyzer 213 | 214 | functionality-test: kill release 215 | $(REBAR) ct --readable=false -v --suite=functionality_SUITE --case=vnode_test 216 | 217 | test: kill release 218 | $(REBAR) ct --readable=false -v 219 | 220 | devrel1: 221 | $(REBAR) as dev1 release 222 | 223 | devrel2: 224 | $(REBAR) as dev2 release 225 | 226 | devrel3: 227 | $(REBAR) as dev3 release 228 | 229 | devrel: devrel1 devrel2 devrel3 230 | 231 | dev1-console: 232 | $(BASEDIR)/_build/dev1/rel/unir/bin/$(APPNAME) console 233 | 234 | dev2-console: 235 | $(BASEDIR)/_build/dev2/rel/unir/bin/$(APPNAME) console 236 | 237 | dev3-console: 238 | $(BASEDIR)/_build/dev3/rel/unir/bin/$(APPNAME) console 239 | 240 | devrel-start: 241 | for d in $(BASEDIR)/_build/dev*; do $$d/rel/unir/bin/$(APPNAME) start; done 242 | 243 | devrel-join: 244 | for d in $(BASEDIR)/_build/dev{2,3}; do $$d/rel/unir/bin/$(APPNAME)-admin cluster join unir1@127.0.0.1; done 245 | 246 | devrel-cluster-plan: 247 | $(BASEDIR)/_build/dev1/rel/unir/bin/$(APPNAME)-admin cluster plan 248 | 249 | devrel-cluster-commit: 250 | $(BASEDIR)/_build/dev1/rel/unir/bin/$(APPNAME)-admin cluster commit 251 | 252 | devrel-status: 253 | $(BASEDIR)/_build/dev1/rel/unir/bin/$(APPNAME)-admin member-status 254 | 255 | devrel-ping: 256 | for d in $(BASEDIR)/_build/dev*; do $$d/rel/unir/bin/$(APPNAME) ping; done 257 | 258 | devrel-stop: 259 | for d in $(BASEDIR)/_build/dev*; do $$d/rel/unir/bin/$(APPNAME) stop; done 260 | 261 | start: 262 | $(BASEDIR)/$(RELPATH)/bin/$(APPNAME) start 263 | 264 | stop: 265 | $(BASEDIR)/$(RELPATH)/bin/$(APPNAME) stop 266 | 267 | attach: 268 | $(BASEDIR)/$(RELPATH)/bin/$(APPNAME) attach 269 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | unir 2 | =========== 3 | 4 | A riak_core application 5 | 6 | Build 7 | ----- 8 | 9 | :: 10 | 11 | rebar3 release 12 | 13 | Test 14 | ---- 15 | 16 | :: 17 | 18 | rebar3 ct 19 | 20 | Run 21 | --- 22 | 23 | :: 24 | 25 | rebar3 run 26 | 27 | Try 28 | --- 29 | 30 | :: 31 | 32 | 1> unir:ping(). 33 | {pong,753586781748746817198774991869333432010090217472} 34 | 35 | Quit 36 | ---- 37 | 38 | :: 39 | 40 | 2> q(). 41 | 42 | Play with Clustering 43 | -------------------- 44 | 45 | Build 3 releases that can run on the same machine:: 46 | 47 | make devrel 48 | 49 | Start them in different consoles:: 50 | 51 | make dev1-console 52 | make dev2-console 53 | make dev3-console 54 | 55 | join 2 nodes to the first one:: 56 | 57 | make devrel-join 58 | 59 | check the status of the cluster:: 60 | 61 | make devrel-status 62 | 63 | you should see something like this:: 64 | 65 | ================================= Membership ================================== 66 | Status Ring Pending Node 67 | ------------------------------------------------------------------------------- 68 | joining 0.0% -- 'unir2@127.0.0.1' 69 | joining 0.0% -- 'unir3@127.0.0.1' 70 | valid 100.0% -- 'unir1@127.0.0.1' 71 | ------------------------------------------------------------------------------- 72 | Valid:1 / Leaving:0 / Exiting:0 / Joining:2 / Down:0 73 | 74 | it should say that 3 nodes are joining, now check the cluster plan:: 75 | 76 | make devrel-cluster-plan 77 | 78 | it should display the cluster plan, now we can commit the plan:: 79 | 80 | make devrel-cluster-commit 81 | 82 | check the status of the cluster again:: 83 | 84 | make devrel-status 85 | 86 | you could see the vnodes transfering:: 87 | 88 | ================================= Membership ================================== 89 | Status Ring Pending Node 90 | ------------------------------------------------------------------------------- 91 | valid 75.0% 25.0% 'unir1@127.0.0.1' 92 | valid 9.4% 25.0% 'unir2@127.0.0.1' 93 | valid 7.8% 25.0% 'unir3@127.0.0.1' 94 | ------------------------------------------------------------------------------- 95 | Valid:3 / Leaving:0 / Exiting:0 / Joining:0 / Down:0 96 | 97 | at some point you should see something like this:: 98 | 99 | ================================= Membership ================================== 100 | Status Ring Pending Node 101 | ------------------------------------------------------------------------------- 102 | valid 33.3% -- 'unir1@127.0.0.1' 103 | valid 33.3% -- 'unir2@127.0.0.1' 104 | valid 33.3% -- 'unir3@127.0.0.1' 105 | ------------------------------------------------------------------------------- 106 | Valid:3 / Leaving:0 / Exiting:0 / Joining:0 / Down:0 107 | 108 | when you are bored you can stop them:: 109 | 110 | make devrel-stop 111 | 112 | 113 | TODO 114 | ---- 115 | 116 | * define license and create LICENSE file 117 | 118 | License 119 | ------- 120 | 121 | TODO -------------------------------------------------------------------------------- /Rscripts/echo-latency.R: -------------------------------------------------------------------------------- 1 | # library 2 | library(ggplot2) 3 | 4 | # Read in the input information 5 | data <- read.csv(file="C:\\Users\\chris\\OneDrive\\Desktop\\Results\\partisan-bench.csv", 6 | head=FALSE, sep=",") 7 | 8 | # Rename the columns 9 | colnames(data)[1] <- "Backend" 10 | colnames(data)[2] <- "Concurrency" 11 | colnames(data)[3] <- "Connections" 12 | colnames(data)[4] <- "Size" 13 | colnames(data)[5] <- "NumMessages" 14 | colnames(data)[6] <- "Latency" 15 | colnames(data)[7] <- "Time" 16 | 17 | data[8] <- paste(data$Backend, data$Latency, "ms", "RTT latency") 18 | colnames(data)[8] <- "Configuration" 19 | 20 | # Select optimal Partisan configuration and base Disterl configuration 21 | df = data[data$Size == 8388608 & data$Latency != 100 & 22 | ( 23 | (data$Backend == "disterl" & data$Connections == 1) | 24 | (data$Backend == "partisan" & 25 | (data$Concurrency == data$Connections))),] 26 | 27 | # Plot performance 28 | ggplot(aes(y = (Time / 1000 / 1000), x = Concurrency, colour = Configuration), data = df, stat="identity") + 29 | geom_point(aes(shape=Configuration)) + 30 | geom_line(aes(linetype=Backend)) + 31 | xlab("Concurrent Processes") + 32 | ylab("Time (ms)") + 33 | theme(legend.justification = c(1, 1), legend.position = c(0.3, 0.9)) + 34 | theme(axis.text.x = element_text(angle = 0, hjust = 1)) + 35 | ggtitle("echo request/reply with 8MB object") -------------------------------------------------------------------------------- /Rscripts/graph.R: -------------------------------------------------------------------------------- 1 | # library 2 | library(ggplot2) 3 | 4 | # Read in the input information 5 | data <- read.csv(file="C:\\Users\\chris\\OneDrive\\Desktop\\Results\\partisan-bench.csv", 6 | head=FALSE, sep=",") 7 | 8 | # Rename the columns 9 | colnames(data)[1] <- "Backend" 10 | colnames(data)[2] <- "Concurrency" 11 | colnames(data)[3] <- "Connections" 12 | colnames(data)[4] <- "Size" 13 | colnames(data)[5] <- "NumMessages" 14 | colnames(data)[6] <- "Latency" 15 | colnames(data)[7] <- "Time" 16 | 17 | # Select optimal Partisan configuration and base Disterl configuration 18 | df = data[data$Size == 8388608 & data$Latency == 1 & 19 | ( 20 | (data$Backend == "disterl" & data$Connections == 1) | 21 | (data$Backend == "partisan" & 22 | (data$Concurrency == data$Connections))),] 23 | 24 | # Plot performance 25 | ggplot(aes(y = (Time / 1000 / 1000), x = Concurrency, colour = Backend), data = df, stat="identity") + 26 | geom_point(aes(shape=Backend)) + 27 | geom_line(aes(linetype=Backend)) + 28 | xlab("Concurrent Processes") + 29 | ylab("Time (ms)") + 30 | theme(legend.justification = c(1, 1), legend.position = c(1, 0.2)) + 31 | theme(axis.text.x = element_text(angle = 0, hjust = 1)) + 32 | ggtitle("echo request/reply with 8MB object and 1ms RTT latency") -------------------------------------------------------------------------------- /Rscripts/partisan-bench-8mb-1ms.R: -------------------------------------------------------------------------------- 1 | # library 2 | library(ggplot2) 3 | 4 | # Read in the input information 5 | data <- read.csv(file="C:\\Users\\chris\\OneDrive\\Desktop\\Results\\partisan-bench.csv", 6 | head=FALSE, sep=",") 7 | 8 | # Rename the columns 9 | colnames(data)[1] <- "Backend" 10 | colnames(data)[2] <- "Concurrency" 11 | colnames(data)[3] <- "Connections" 12 | colnames(data)[4] <- "Size" 13 | colnames(data)[5] <- "NumMessages" 14 | colnames(data)[6] <- "Latency" 15 | colnames(data)[7] <- "Time" 16 | 17 | # Select optimal Partisan configuration and base Disterl configuration 18 | df = data[data$Size == 8388608 & data$Latency == 1 & 19 | ( 20 | (data$Backend == "disterl" & data$Connections == 1) | 21 | (data$Backend == "partisan" & 22 | (data$Concurrency == data$Connections))),] 23 | 24 | # Plot performance 25 | ggplot(aes(y = (Time / 1000 / 1000), x = Concurrency, colour = Backend), data = df, stat="identity") + 26 | geom_point(aes(shape=Backend)) + 27 | geom_line(aes(linetype=Backend)) + 28 | xlab("Concurrent Processes") + 29 | ylab("Time (ms)") + 30 | theme(legend.justification = c(1, 1), legend.position = c(1, 0.2)) + 31 | theme(axis.text.x = element_text(angle = 0, hjust = 1)) + 32 | ggtitle("echo request/reply with 8MB object and 1ms RTT latency") -------------------------------------------------------------------------------- /bin/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lasp-lang/unir/5f86eca18ff2d20bf80a4f879226acdc9d7bd70d/bin/.gitkeep -------------------------------------------------------------------------------- /bin/bench: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for i in {1..10} 4 | do 5 | BENCH_CONFIG=unir_1kb_object.config make bench 6 | done 7 | -------------------------------------------------------------------------------- /bin/clone-deps: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | rm -rf _checkouts 4 | git clone git@github.com:lasp-lang/partisan _checkouts/partisan 5 | git clone git@github.com:lasp-lang/riak_core -b partisan-support _checkouts/riak_core 6 | git clone git@github.com:lasp-lang/riak_core_partisan_utils _checkouts/riak_core_partisan_utils 7 | -------------------------------------------------------------------------------- /bin/gist-results: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FILE=_build/test/rebar3_proper-counterexamples.consult 4 | if [ -f $FILE ]; then 5 | echo "Gisting output files and consult file..." 6 | gist -d "results for ${HOSTNAME}" output-*.txt _build/test/rebar3_proper-counterexamples.consult 7 | else 8 | echo "Gisting output files..." 9 | gist -d "results for ${HOSTNAME}" output-*.txt 10 | fi -------------------------------------------------------------------------------- /bin/kube: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cat > /tmp/unir.yaml < /dev/null | tail -n 1 | cut -d = -f 2` 113 | if [ -z "$NODENAME" ]; then 114 | echo "vm.args needs to have a -name parameter." 115 | echo " -sname is not supported." 116 | exit 1 117 | else 118 | NAME_TYPE="-name" 119 | NAME="${NODENAME# *}" 120 | fi 121 | fi 122 | 123 | PIPE_DIR="${PIPE_DIR:-/tmp/erl_pipes/$NAME/}" 124 | 125 | # Extract the target cookie 126 | #COOKIE_ARG=`grep -e '-setcookie' $RUNNER_ETC_DIR/vm.args` 127 | if [ -z "$COOKIE_ARG" ]; then 128 | COOKIE=`egrep '^[ \t]*distributed_cookie[ \t]*=[ \t]*' $RUNNER_ETC_DIR/$CUTTLEFISH_CONF 2> /dev/null | cut -d = -f 2 | tr -d ' '` 129 | if [ -z "$COOKIE" ]; then 130 | echo "vm.args needs to have a -setcookie parameter." 131 | exit 1 132 | else 133 | COOKIE_ARG="-setcookie $COOKIE" 134 | fi 135 | fi 136 | 137 | find_erts_dir 138 | export ROOTDIR="$RELEASE_ROOT_DIR" 139 | export BINDIR="$ERTS_DIR/bin" 140 | export EMU="beam" 141 | export PROGNAME="erl" 142 | export LD_LIBRARY_PATH="$ERTS_DIR/lib:$LD_LIBRARY_PATH" 143 | ERTS_LIB_DIR="$ERTS_DIR/../lib" 144 | CUTTLEFISHCMD="$ERTS_DIR/bin/escript $RUNNER_BASE_DIR/bin/cuttlefish" 145 | 146 | cd "$ROOTDIR" 147 | 148 | if CUTTLEFISH_CONFIG=$($CUTTLEFISHCMD -e $RUNNER_ETC_DIR -d $RUNNER_BASE_DIR/generated.conf -s $RUNNER_BASE_DIR/share/schema/ -c $RUNNER_ETC_DIR/$CUTTLEFISH_CONF) 149 | then 150 | CONFIG_FILES="$CUTTLEFISH_CONFIG" 151 | else 152 | echo "Cuttlefish failed! Oh no!" 153 | exit 1 154 | fi 155 | 156 | 157 | # Parse out release and erts info 158 | START_ERL=`cat $RUNNER_BASE_DIR/releases/start_erl.data` 159 | ERTS_VSN=${START_ERL% *} 160 | APP_VSN=${START_ERL#* } 161 | 162 | # TODO: look in the release otherwise use which 163 | ESCRIPT=escript 164 | NODETOOL_PATH=$RUNNER_BASE_DIR/bin 165 | NODETOOL=$NODETOOL_PATH/nodetool 166 | # Setup command to control the node 167 | NODETOOL="$ESCRIPT $NODETOOL $NAME_ARG $COOKIE_ARG" 168 | 169 | ensure_node_running() 170 | { 171 | # Make sure the local node IS running 172 | if ! relx_nodetool "ping"; then 173 | echo "Node is not running!" 174 | exit 1 175 | fi 176 | } 177 | 178 | cluster_admin() 179 | { 180 | case "$1" in 181 | join) 182 | if [ $# -ne 2 ]; then 183 | echo "Usage: $SCRIPT cluster join " 184 | exit 1 185 | fi 186 | ensure_node_running 187 | relx_nodetool rpc {{ release_name }}_console staged_join "$2" 188 | ;; 189 | leave) 190 | if [ $# -eq 1 ]; then 191 | ensure_node_running 192 | relx_nodetool rpc riak_core_console stage_leave 193 | elif [ $# -eq 2 ]; then 194 | ensure_node_running 195 | relx_nodetool rpc riak_core_console stage_leave "$2" 196 | else 197 | echo "Usage: $SCRIPT cluster leave []" 198 | exit 1 199 | fi 200 | ;; 201 | force-remove) 202 | if [ $# -ne 2 ]; then 203 | echo "Usage: $SCRIPT cluster force-remove " 204 | exit 1 205 | fi 206 | ensure_node_running 207 | relx_nodetool rpc riak_core_console stage_remove "$2" 208 | ;; 209 | replace) 210 | if [ $# -ne 3 ]; then 211 | echo "Usage: $SCRIPT cluster replace " 212 | exit 1 213 | fi 214 | ensure_node_running 215 | relx_nodetool rpc riak_core_console stage_replace "$2" "$3" 216 | ;; 217 | force-replace) 218 | if [ $# -ne 3 ]; then 219 | echo "Usage: $SCRIPT cluster force-replace " 220 | exit 1 221 | fi 222 | ensure_node_running 223 | relx_nodetool rpc riak_core_console stage_force_replace "$2" "$3" 224 | ;; 225 | plan) 226 | ensure_node_running 227 | relx_nodetool rpc riak_core_console print_staged 228 | ;; 229 | commit) 230 | ensure_node_running 231 | relx_nodetool rpc riak_core_console commit_staged 232 | ;; 233 | clear) 234 | ensure_node_running 235 | relx_nodetool rpc riak_core_console clear_staged 236 | ;; 237 | *) 238 | echo "\ 239 | Usage: $SCRIPT cluster 240 | 241 | The following commands stage changes to cluster membership. These commands 242 | do not take effect immediately. After staging a set of changes, the staged 243 | plan must be committed to take effect: 244 | 245 | join Join node to the cluster containing 246 | leave Have this node leave the cluster and shutdown 247 | leave Have leave the cluster and shutdown 248 | 249 | force-remove Remove from the cluster without 250 | first handing off data. Designed for 251 | crashed, unrecoverable nodes 252 | 253 | replace Have transfer all data to , 254 | and then leave the cluster and shutdown 255 | 256 | force-replace Reassign all partitions owned by to 257 | without first handing off data, and 258 | remove from the cluster. 259 | 260 | Staging commands: 261 | plan Display the staged changes to the cluster 262 | commit Commit the staged changes 263 | clear Clear the staged changes 264 | " 265 | esac 266 | } 267 | 268 | # Check the first argument for instructions 269 | case "$1" in 270 | down) 271 | if [ $# -ne 2 ]; then 272 | echo "Usage: $SCRIPT down " 273 | exit 1 274 | fi 275 | 276 | ensure_node_running 277 | relx_nodetool rpc {{ release_name }}_console down $@ 278 | ;; 279 | 280 | ringready) 281 | if [ $# -ne 1 ]; then 282 | echo "Usage: $SCRIPT ringready" 283 | exit 1 284 | fi 285 | 286 | ensure_node_running 287 | relx_nodetool rpc {{ release_name }}_console ringready '' 288 | ;; 289 | 290 | member[_-]status) 291 | if [ $# -ne 1 ]; then 292 | echo "Usage: $SCRIPT $1" 293 | exit 1 294 | fi 295 | 296 | ensure_node_running 297 | relx_nodetool rpc riak_core_console member_status '' 298 | ;; 299 | 300 | ring[_-]status) 301 | if [ $# -ne 1 ]; then 302 | echo "Usage: $SCRIPT $1" 303 | exit 1 304 | fi 305 | 306 | ensure_node_running 307 | relx_nodetool rpc riak_core_console ring_status '' 308 | ;; 309 | 310 | services) 311 | relx_nodetool rpcterms riak_core_node_watcher services '' 312 | ;; 313 | 314 | wait[_-]for[_-]service) 315 | SVC=$2 316 | TARGETNODE=$3 317 | if [ $# -lt 3 ]; then 318 | echo "Usage: $SCRIPT $1 " 319 | exit 1 320 | fi 321 | 322 | while (true); do 323 | # Make sure riak_core_node_watcher is up and running locally before trying to query it 324 | # to avoid ugly (but harmless) error messages 325 | NODEWATCHER=`$NODETOOL rpcterms erlang whereis "'riak_core_node_watcher'."` 326 | if [ "$NODEWATCHER" = "undefined" ]; then 327 | echo "$SVC is not up: node watcher is not running" 328 | continue 329 | fi 330 | 331 | # Get the list of services that are available on the requested node 332 | SERVICES=`$NODETOOL rpcterms riak_core_node_watcher services "'${TARGETNODE}'."` 333 | echo "$SERVICES" | grep "[[,]$SVC[],]" > /dev/null 2>&1 334 | if [ "X$?" = "X0" ]; then 335 | echo "$SVC is up" 336 | exit 0 337 | else 338 | echo "$SVC is not up: $SERVICES" 339 | fi 340 | sleep 3 341 | done 342 | ;; 343 | cluster) 344 | shift 345 | cluster_admin "$@" 346 | ;; 347 | *) 348 | echo "Usage: $SCRIPT { cluster | down | ringready | member-status | " 349 | echo " ring-status | services | wait-for-service " 350 | exit 1 351 | ;; 352 | esac 353 | -------------------------------------------------------------------------------- /config/advanced.config: -------------------------------------------------------------------------------- 1 | [ 2 | { unir, []}, 3 | {riak_core, [ 4 | {schema_dirs, ["share/schema"]} 5 | %% unir valid permissions to grant 6 | % {permissions, [{ unir, [put, get, list, grant, delete]}]} 7 | ]}, 8 | %% SASL config 9 | {sasl, [ 10 | {sasl_error_logger, {file, "log/sasl-error.log"}}, 11 | {errlog_type, error}, 12 | {error_logger_mf_dir, "log/sasl"}, % Log directory 13 | {error_logger_mf_maxbytes, 10485760}, % 10 MB max file size 14 | {error_logger_mf_maxfiles, 5} % 5 files max 15 | ] 16 | }, 17 | 18 | %% Lager config 19 | %% see https://github.com/basho/lager#configuration 20 | %% see https://github.com/basho/lager/blob/master/src/lager.app.src 21 | {lager, [ 22 | {handlers, [ 23 | {lager_console_backend, info}, 24 | {lager_file_backend, [{file, "error.log"}, {level, error}, 25 | {size, 10485760}, {date, "$D0"}, {count, 5}]}, 26 | {lager_file_backend, [{file, "console.log"}, {level, info}, 27 | {size, 10485760}, {date, "$D0"}, {count, 5}]} 28 | ]} 29 | ]} 30 | ]. 31 | -------------------------------------------------------------------------------- /config/test.config: -------------------------------------------------------------------------------- 1 | {nodes, []}. -------------------------------------------------------------------------------- /config/vars.config: -------------------------------------------------------------------------------- 1 | {cuttlefish_conf, "unir.conf"}. 2 | {rel_name, "unir"}. 3 | {node, "unir@127.0.0.1"}. 4 | 5 | {web_ip, "127.0.0.1"}. 6 | {web_port, 8098}. 7 | {handoff_port, 8099}. 8 | {handoff_ip, "127.0.0.1"}. 9 | {sasl_error_log, "./log/sasl-error.log"}. 10 | {sasl_log_dir, "./log/sasl"}. 11 | 12 | {platform_bin_dir, "./bin"}. 13 | {platform_data_dir, "../unir_data"}. 14 | {platform_etc_dir, "../unir_config"}. 15 | {platform_lib_dir, "./lib"}. 16 | {platform_log_dir, "./log"}. 17 | 18 | {crash_dump, "erl_crash.dump"}. 19 | -------------------------------------------------------------------------------- /config/vars_dev1.config: -------------------------------------------------------------------------------- 1 | {node, "unir1@127.0.0.1"}. 2 | 3 | {web_port, 8198}. 4 | {handoff_port, 8199}. 5 | -------------------------------------------------------------------------------- /config/vars_dev2.config: -------------------------------------------------------------------------------- 1 | {node, "unir2@127.0.0.1"}. 2 | 3 | {web_port, 8298}. 4 | {handoff_port, 8299}. 5 | -------------------------------------------------------------------------------- /config/vars_dev3.config: -------------------------------------------------------------------------------- 1 | {node, "unir3@127.0.0.1"}. 2 | 3 | {web_port, 8398}. 4 | {handoff_port, 8399}. 5 | -------------------------------------------------------------------------------- /eprof/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lasp-lang/unir/5f86eca18ff2d20bf80a4f879226acdc9d7bd70d/eprof/.gitkeep -------------------------------------------------------------------------------- /examples/10mb_object.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, object}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{fsm_get, 10}, {fsm_put, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 10485760}}. 16 | -------------------------------------------------------------------------------- /examples/1kb_object.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, object}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{fsm_get, 10}, {fsm_put, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 1024}}. 16 | -------------------------------------------------------------------------------- /examples/1mb_object.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, object}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{fsm_get, 10}, {fsm_put, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 1048576}}. 16 | -------------------------------------------------------------------------------- /examples/2mb_object.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, object}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{fsm_get, 10}, {fsm_put, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 2097152}}. 16 | -------------------------------------------------------------------------------- /examples/32kb_object.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, object}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{fsm_get, 10}, {fsm_put, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 32768}}. 16 | -------------------------------------------------------------------------------- /examples/32mb_object.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, object}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{fsm_get, 10}, {fsm_put, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 33554144}}. 16 | -------------------------------------------------------------------------------- /examples/4mb_object.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, object}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{fsm_get, 10}, {fsm_put, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 4194304}}. 16 | -------------------------------------------------------------------------------- /examples/512kb_object.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, object}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{fsm_get, 10}, {fsm_put, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 524288}}. 16 | -------------------------------------------------------------------------------- /examples/64kb_object.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, object}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{fsm_get, 10}, {fsm_put, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 65536}}. -------------------------------------------------------------------------------- /examples/64mb_object.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, object}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{fsm_get, 10}, {fsm_put, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 67108864}}. 16 | -------------------------------------------------------------------------------- /examples/8mb_object.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, object}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{fsm_get, 10}, {fsm_put, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 8388608}}. 16 | -------------------------------------------------------------------------------- /examples/96kb_object.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, object}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{fsm_get, 10}, {fsm_put, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 98304}}. 16 | -------------------------------------------------------------------------------- /examples/default.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, ping}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{ping, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 0}}. -------------------------------------------------------------------------------- /examples/echo.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, echo}. 6 | 7 | {concurrent, 1}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{echo, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 16777216}}. 16 | -------------------------------------------------------------------------------- /examples/ping.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, ping}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{ping, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 0}}. -------------------------------------------------------------------------------- /examples/sync_ping.config: -------------------------------------------------------------------------------- 1 | {mode, max}. 2 | 3 | {duration, 2}. 4 | 5 | {type, sync_ping}. 6 | 7 | {concurrent, 3}. 8 | 9 | {driver, lasp_bench_driver_unir}. 10 | 11 | {operations, [{sync_ping, 1}]}. 12 | 13 | {key_generator, {int_to_bin_bigendian, {uniform_int, 1000}}}. 14 | 15 | {value_generator, {fixed_bin, 0}}. 16 | -------------------------------------------------------------------------------- /output.txt: -------------------------------------------------------------------------------- 1 | a 2 | -------------------------------------------------------------------------------- /priv/01-unir.schema: -------------------------------------------------------------------------------- 1 | %%-*- mode: erlang -*- 2 | %% ex: ft=erlang ts=4 sw=4 et 3 | 4 | %% @doc Enable/Disable HTTP API 5 | {mapping, "http.enabled", "unir.http_enabled", [ 6 | {datatype, {flag, yes, no}}, 7 | {default, yes} 8 | ]}. 9 | 10 | %% @doc port to listen to for HTTP API 11 | {mapping, "http.port", "unir.http_port", [ 12 | {datatype, integer}, 13 | {default, 8080} 14 | ]}. 15 | 16 | %% @doc number of acceptors to user for HTTP API 17 | {mapping, "http.acceptors", "unir.http_acceptors", [ 18 | {datatype, integer}, 19 | {default, 100} 20 | ]}. 21 | 22 | %% @doc Enable/Disable HTTPS API 23 | {mapping, "https.enabled", "unir.https_enabled", [ 24 | {datatype, {flag, yes, no}}, 25 | {default, no} 26 | ]}. 27 | 28 | %% @doc port to listen to for HTTPS API 29 | {mapping, "https.port", "unir.https_port", [ 30 | {datatype, integer}, 31 | {default, 8443} 32 | ]}. 33 | 34 | %% @doc number of acceptors to use for HTTPS API 35 | {mapping, "https.acceptors", "unir.https_acceptors", [ 36 | {datatype, integer}, 37 | {default, 100} 38 | ]}. 39 | 40 | %% @doc Enable/Disable HTTP CORS API 41 | {mapping, "http.cors.enabled", "unir.cors_enabled", [ 42 | {datatype, {flag, yes, no}}, 43 | {default, no} 44 | ]}. 45 | 46 | %% @doc HTTP CORS API allowed origins, it can be a comma separated list of 47 | %% origins to accept or * to accept all 48 | {mapping, "http.cors.origins", "unir.cors_origins", [ 49 | {default, "*"} 50 | ]}. 51 | 52 | {translation, "unir.cors_origins", 53 | fun(Conf) -> 54 | Setting = cuttlefish:conf_get("http.cors.origins", Conf), 55 | case Setting of 56 | "*" -> any; 57 | CSVs -> 58 | Tokens = string:tokens(CSVs, ","), 59 | Cleanup = fun (Token) -> 60 | CleanToken = string:strip(Token), 61 | list_to_binary(CleanToken) 62 | end, 63 | FilterEmptyStr = fun ("") -> false; (_) -> true end, 64 | lists:filter(FilterEmptyStr, lists:map(Cleanup, Tokens)) 65 | end 66 | end}. 67 | 68 | %% @doc HTTP CORS API a comma separated list of allowed headers to accept 69 | {mapping, "http.cors.headers", "unir.cors_headers", []}. 70 | 71 | {translation, "unir.cors_headers", 72 | fun(Conf) -> 73 | CSVs = cuttlefish:conf_get("http.cors.headers", Conf), 74 | Tokens = string:tokens(CSVs, ","), 75 | Cleanup = fun (Token) -> 76 | CleanToken = string:strip(Token), 77 | list_to_binary(CleanToken) 78 | end, 79 | FilterEmptyStr = fun ("") -> false; (_) -> true end, 80 | lists:filter(FilterEmptyStr, lists:map(Cleanup, Tokens)) 81 | end}. 82 | 83 | %% @doc HTTP CORS API indicates how long the results of a preflight request can 84 | %% be cached 85 | {mapping, "http.cors.maxage", "unir.cors_max_age_secs", [ 86 | {datatype, {duration, s}}, 87 | {default, "60s"} 88 | ]}. 89 | 90 | %% @doc secret used to encrypt the session token, IMPORTANT: change this 91 | {mapping, "auth.secret", "unir.auth_secret", [ 92 | {default, "changeme"} 93 | ]}. 94 | 95 | {translation, "unir.auth_secret", 96 | fun(Conf) -> 97 | Setting = cuttlefish:conf_get("auth.secret", Conf), 98 | list_to_binary(Setting) 99 | end}. 100 | 101 | %% @doc time a session is valid after login 102 | {mapping, "auth.session.duration", "unir.session_duration_secs", [ 103 | {datatype, {duration, s}}, 104 | {default, "24h"} 105 | ]}. 106 | -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | {erl_opts, [debug_info, {parse_transform, lager_transform}]}. 2 | 3 | {deps, [ 4 | {pbkdf2, {git, "git://github.com/marianoguerra/erlang-pbkdf2-no-history", {branch, "master"}}}, 5 | {exometer_core, {git, "git://github.com/basho/exometer_core.git", {branch, "th/correct-dependencies"}}}, 6 | {riak_core, {git, "git://github.com/lasp-lang/riak_core", {branch, "partisan-support-r21"}}}, 7 | {riak_core_partisan_utils, {git, "git://github.com/lasp-lang/riak_core_partisan_utils", {branch, "master"}}}, 8 | {partisan, {git, "git://github.com/lasp-lang/partisan", {branch, "master"}}}, 9 | {lasp_bench, {git, "git://github.com/lasp-lang/lasp-bench", {branch, "master"}}}, 10 | {riak_ensemble, {git, "git://github.com/Kyorai/riak_ensemble", {branch, "develop"}}}, 11 | {lager, {git, "https://github.com/basho/lager.git", {tag, "3.2.4"}}}, 12 | {poolboy, {git, "https://github.com/Kyorai/poolboy.git", {branch, "r21"}}} 13 | ]}. 14 | 15 | {relx, [{release, { unir , "0.1.0"}, 16 | [unir, 17 | cuttlefish, 18 | sasl]}, 19 | 20 | {dev_mode, true}, 21 | {include_erts, false}, 22 | 23 | {overlay_vars, "config/vars.config"}, 24 | {overlay, [ 25 | {mkdir, "etc"}, 26 | {mkdir, "bin"}, 27 | {mkdir, "data/ring"}, 28 | {mkdir, "log/sasl"}, 29 | {template, "./config/admin_bin", "bin/unir-admin"}, 30 | {template, "./config/advanced.config", "etc/advanced.config"} 31 | ]} 32 | ]}. 33 | 34 | {provider_hooks, [{pre, [{compile, erl_vsn}]}]}. 35 | 36 | {plugins, [ 37 | rebar_erl_vsn, 38 | {rebar3_proper, "0.10.1"}, 39 | {rebar3_run, {git, "git://github.com/tsloughter/rebar3_run.git", {branch, "master"}}} 40 | ]}. 41 | 42 | {project_plugins, [{rebar3_cuttlefish, {git, "git://github.com/tsloughter/rebar3_cuttlefish.git", {branch, "master"}}}]}. 43 | 44 | {profiles, [ 45 | {prod, [{relx, [{dev_mode, false}, {include_erts, true}]}]}, 46 | {dev1, [{relx, [{overlay_vars, ["config/vars.config", "config/vars_dev1.config"]}]}]}, 47 | {dev2, [{relx, [{overlay_vars, ["config/vars.config", "config/vars_dev2.config"]}]}]}, 48 | {dev3, [{relx, [{overlay_vars, ["config/vars.config", "config/vars_dev3.config"]}]}]}, 49 | {test, [ 50 | {deps, [{proper, "1.2.0"}, recon]} 51 | ]} 52 | ]}. 53 | 54 | {overrides, 55 | [{override, eleveldb, 56 | [ 57 | {artifacts, ["priv/eleveldb.so"]}, 58 | {pre_hooks, [{compile, "c_src/build_deps.sh get-deps"}, 59 | {compile, "c_src/build_deps.sh"}]}, 60 | 61 | {post_hooks, [{clean, "c_src/build_deps.sh clean"}]}, 62 | 63 | {plugins, [pc]}, 64 | 65 | {provider_hooks, [{post, 66 | [{compile, {pc, compile}}, 67 | {clean, {pc, clean}} 68 | ] 69 | }] 70 | } 71 | ] 72 | }, 73 | {override, riak_ensemble, 74 | [ 75 | {artifacts, ["priv/riak_ensemble_drv.so"]}, 76 | {plugins, [pc]}, 77 | {provider_hooks, [{post, 78 | [{compile, {pc, compile}}, 79 | {clean, {pc, clean}} 80 | ]}]}, 81 | {erl_opts, [debug_info, 82 | warn_untyped_record, 83 | {parse_transform, lager_transform}]} 84 | ]}, 85 | {override, riak_core, 86 | [ 87 | {erl_opts, [debug_info, 88 | {parse_transform, lager_transform}, 89 | {platform_define, "^R15", "old_hash"}]} 90 | ]} 91 | ]}. 92 | -------------------------------------------------------------------------------- /rebar.lock: -------------------------------------------------------------------------------- 1 | {"1.1.0", 2 | [{<<"acceptor_pool">>,{pkg,<<"acceptor_pool">>,<<"1.0.0-rc.0">>},1}, 3 | {<<"basho_stats">>,{pkg,<<"basho_stats">>,<<"1.0.3">>},1}, 4 | {<<"bear">>, 5 | {git,"git://github.com/basho/bear.git", 6 | {ref,"da820a13c607c3f816ee8b83c587266da5389761"}}, 7 | 2}, 8 | {<<"blume">>,{pkg,<<"blume">>,<<"0.1.1">>},1}, 9 | {<<"chash">>,{pkg,<<"chash">>,<<"0.1.2">>},1}, 10 | {<<"clique">>, 11 | {git,"https://github.com/Kyorai/clique.git", 12 | {ref,"57ae3909329db8724c3bed283dd930282f9bec2b"}}, 13 | 1}, 14 | {<<"cuttlefish">>,{pkg,<<"cuttlefish">>,<<"2.0.12">>},1}, 15 | {<<"edown">>, 16 | {git,"git://github.com/uwiger/edown.git", 17 | {ref,"b7c8eb0ac1859f8fce11cbfe3526f5ec83194776"}}, 18 | 2}, 19 | {<<"eleveldb">>,{pkg,<<"eleveldb">>,<<"2.2.20">>},1}, 20 | {<<"exometer_core">>, 21 | {git,"git://github.com/basho/exometer_core.git", 22 | {ref,"3e1516837e15918a7514a78d5126199e3deac989"}}, 23 | 0}, 24 | {<<"folsom">>, 25 | {git,"git://github.com/basho/folsom.git", 26 | {ref,"72944523b6467c9f7add5f1c96dd5020424a2681"}}, 27 | 1}, 28 | {<<"gen_fsm_compat">>,{pkg,<<"gen_fsm_compat">>,<<"0.3.0">>},1}, 29 | {<<"getopt">>,{pkg,<<"getopt">>,<<"0.8.2">>},1}, 30 | {<<"goldrush">>, 31 | {git,"https://github.com/basho/goldrush.git", 32 | {ref,"8f1b715d36b650ec1e1f5612c00e28af6ab0de82"}}, 33 | 1}, 34 | {<<"jam">>,{pkg,<<"jam">>,<<"1.0.0">>},1}, 35 | {<<"lager">>, 36 | {git,"https://github.com/basho/lager.git", 37 | {ref,"81eaef0ce98fdbf64ab95665e3bc2ec4b24c7dac"}}, 38 | 0}, 39 | {<<"lasp_bench">>, 40 | {git,"git://github.com/lasp-lang/lasp-bench", 41 | {ref,"d9b3e78d64ea4709ca10ec062c7fc969c1c503d4"}}, 42 | 0}, 43 | {<<"meck">>, 44 | {git,"git://github.com/basho/meck.git", 45 | {ref,"dde759050eff19a1a80fd854d7375174b191665d"}}, 46 | 2}, 47 | {<<"parse_trans">>, 48 | {git,"git://github.com/uwiger/parse_trans.git", 49 | {ref,"a210adafdfbb904d156d8f22abd5fb58fc17de1e"}}, 50 | 1}, 51 | {<<"partisan">>, 52 | {git,"git://github.com/lasp-lang/partisan", 53 | {ref,"7bff9e18c8c94b5b66d0166ff55949a6ff5ecb6c"}}, 54 | 0}, 55 | {<<"pbkdf2">>, 56 | {git,"git://github.com/marianoguerra/erlang-pbkdf2-no-history", 57 | {ref,"b8561cf72b13497ee498ce00bb80e5f2d24f512f"}}, 58 | 0}, 59 | {<<"poolboy">>, 60 | {git,"https://github.com/Kyorai/poolboy.git", 61 | {ref,"a1f00f170c5ec3736b5430433f4055541d4fbdae"}}, 62 | 0}, 63 | {<<"proper">>,{pkg,<<"proper">>,<<"1.2.0">>},0}, 64 | {<<"quickrand">>,{pkg,<<"quickrand">>,<<"1.7.4">>},2}, 65 | {<<"rand_compat">>,{pkg,<<"rand_compat">>,<<"0.0.3">>},1}, 66 | {<<"recon">>,{pkg,<<"recon">>,<<"2.3.4">>},0}, 67 | {<<"riak_core">>, 68 | {git,"git://github.com/lasp-lang/riak_core", 69 | {ref,"ba4818aedef9cf961fe97d7357457266d9d0aab9"}}, 70 | 0}, 71 | {<<"riak_core_partisan_utils">>, 72 | {git,"git://github.com/lasp-lang/riak_core_partisan_utils", 73 | {ref,"c1e4941a60796dcf754c7eb9df61fb17b1c5498e"}}, 74 | 0}, 75 | {<<"riak_ensemble">>, 76 | {git,"git://github.com/Kyorai/riak_ensemble", 77 | {ref,"1147a3f5e1ca164e80e934187d4e4ee2d0d67c53"}}, 78 | 0}, 79 | {<<"riak_sysmon">>, 80 | {git,"https://github.com/Kyorai/riak_sysmon.git", 81 | {ref,"086930e5641b6a0b0ce659f115e35b1f84780ef2"}}, 82 | 1}, 83 | {<<"setup">>, 84 | {git,"git://github.com/basho/setup.git", 85 | {ref,"4878261b8859909569509ad67f6efefb735651f0"}}, 86 | 1}, 87 | {<<"time_compat">>,{pkg,<<"time_compat">>,<<"0.0.1">>},1}, 88 | {<<"types">>,{pkg,<<"types">>,<<"0.1.6">>},1}, 89 | {<<"uuid">>,{pkg,<<"uuid_erl">>,<<"1.7.3">>},1}]}. 90 | [ 91 | {pkg_hash,[ 92 | {<<"acceptor_pool">>, <<"679D741DF87FC13599B1AEF2DF8F78F1F880449A6BEFAB7C44FB6FAE0E92A2DE">>}, 93 | {<<"basho_stats">>, <<"7E1174151509C64FCC1934120ED32295E14F84DAAE7F84926BA2C8D3700D146C">>}, 94 | {<<"blume">>, <<"CFB4F43688690BA81C6A79F54E4678CFD5FDEDAB692F277AE740AE4A3897360D">>}, 95 | {<<"chash">>, <<"AF02484F2640C653C4B9A8557A14CA0704989DBEDB27E7CCBC442F1903A3BCA7">>}, 96 | {<<"cuttlefish">>, <<"1441A12BCE207F7FC796A4DA50D47080D21E83E15309AD6496DE27840A54D5FC">>}, 97 | {<<"eleveldb">>, <<"1FFF63A5055BBF4BF821F797EF76065882B193F5E8095F95FCD9287187773B58">>}, 98 | {<<"gen_fsm_compat">>, <<"5903549F67D595F58A7101154CBE0FDD46955FBFBE40813F1E53C23A970FF5F4">>}, 99 | {<<"getopt">>, <<"B17556DB683000BA50370B16C0619DF1337E7AF7ECBF7D64FBF8D1D6BCE3109B">>}, 100 | {<<"jam">>, <<"ED9B180F2F3A775E6A47AC490954976802F0638C19A393F3E86D4BA4CF890582">>}, 101 | {<<"proper">>, <<"1466492385959412A02871505434E72E92765958C60DBA144B43863554B505A4">>}, 102 | {<<"quickrand">>, <<"F91F34469D6AC153B951BB34DD9C78029882426BD61E12B85E5EE465850C08ED">>}, 103 | {<<"rand_compat">>, <<"011646BC1F0B0C432FE101B816F25B9BBB74A085713CEE1DAFD2D62E9415EAD3">>}, 104 | {<<"recon">>, <<"B406C2FCCDEAA0D94E23B5E30AE3D635A2D461E363A5C9C6316897037CF050D2">>}, 105 | {<<"time_compat">>, <<"23FE0AD1FDF3B5B88821B2D04B4B5E865BF587AE66056D671FE0F53514ED8139">>}, 106 | {<<"types">>, <<"03BB7140016C896D3441A77CB0B7D6ACAA583D6D6E9C4A3E1FD3C25123710290">>}, 107 | {<<"uuid">>, <<"C5DF97D1A3D626235C2415E74053C47B2138BB863C5CD802AB5CAECB8ECC019F">>}]} 108 | ]. 109 | -------------------------------------------------------------------------------- /rebar3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lasp-lang/unir/5f86eca18ff2d20bf80a4f879226acdc9d7bd70d/rebar3 -------------------------------------------------------------------------------- /results-scale.csv: -------------------------------------------------------------------------------- 1 | 16142632,3,59900 2 | 22295029,4,60872 3 | 31635989,5,61922 4 | 16203026,3,59900 5 | 57021042,4,60872 6 | 32065029,5,61922 7 | 20507275,3,59900 8 | 22637026,4,60872 9 | 23482090,5,61922 10 | 22673100,3,59900 11 | 20631567,4,60872 12 | 22660729,5,61922 13 | -------------------------------------------------------------------------------- /src/testable_vnode.erl: -------------------------------------------------------------------------------- 1 | -module(testable_vnode). 2 | -author("Christopher S. Meiklejohn "). 3 | 4 | -callback inject_failure(term(), reference(), binary(), binary()) -> ok. -------------------------------------------------------------------------------- /src/unir.app.src: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | {application, unir, 3 | [ 4 | {description, "A Riak Core Application"}, 5 | {vsn, "1"}, 6 | {registered, []}, 7 | {applications, [ 8 | kernel, 9 | stdlib, 10 | sasl, 11 | riak_core, 12 | partisan, 13 | setup 14 | ]}, 15 | {mod, { unir_app, []}}, 16 | {env, []} 17 | ]}. 18 | -------------------------------------------------------------------------------- /src/unir.erl: -------------------------------------------------------------------------------- 1 | -module(unir). 2 | -author("Christopher S. Meiklejohn "). 3 | 4 | -include_lib("riak_core/include/riak_core_vnode.hrl"). 5 | 6 | -export([ 7 | ping/0, 8 | sync_ping/0, 9 | sync_spawn_ping/0, 10 | fsm_ping/0, 11 | fsm_get/1, 12 | fsm_put/2, 13 | echo/0, 14 | echo/1]). 15 | 16 | -export([inject_failure/2]). 17 | 18 | -ignore_xref([ 19 | ping/0, 20 | sync_ping/0, 21 | sync_spawn_ping/0 22 | ]). 23 | 24 | -export([mk_reqid/0, 25 | wait_for_reqid/2]). 26 | 27 | -define(TIMEOUT, 10000). 28 | 29 | %% Public API 30 | 31 | %% @doc Pings a random vnode to make sure communication is functional 32 | ping() -> 33 | DocIdx = riak_core_util:chash_key({<<"ping">>, term_to_binary(os:timestamp())}), 34 | PrefList = riak_core_apl:get_primary_apl(DocIdx, 1, unir), 35 | [{IndexNode, _Type}] = PrefList, 36 | riak_core_vnode_master:command(IndexNode, ping, unir_vnode_master). 37 | 38 | %% @doc Pings a random vnode to make sure communication is functional 39 | sync_ping() -> 40 | DocIdx = riak_core_util:chash_key({<<"ping">>, term_to_binary(os:timestamp())}), 41 | PrefList = riak_core_apl:get_primary_apl(DocIdx, 1, unir), 42 | [{IndexNode, _Type}] = PrefList, 43 | riak_core_vnode_master:sync_command(IndexNode, ping, unir_vnode_master). 44 | 45 | %% @doc Perform an echo request. 46 | echo() -> 47 | EchoBinary = partisan_config:get(echo_binary, undefined), 48 | echo(EchoBinary). 49 | 50 | %% @doc Perform an echo request. 51 | echo(EchoBinary) -> 52 | DocIdx = riak_core_util:chash_key({<<"echo">>, term_to_binary(rand:uniform(1024))}), 53 | PrefList = riak_core_apl:get_primary_apl(DocIdx, 1, unir), 54 | [{IndexNode, _Type}] = PrefList, 55 | ok = riak_core_vnode_master:command(IndexNode, {echo, EchoBinary, node(), self()}, unir_vnode_master), 56 | receive 57 | {echo, EchoBinary} -> 58 | ok 59 | end, 60 | ok. 61 | 62 | %% @doc Pings a random vnode to make sure communication is functional 63 | sync_spawn_ping() -> 64 | DocIdx = riak_core_util:chash_key({<<"ping">>, term_to_binary(os:timestamp())}), 65 | PrefList = riak_core_apl:get_primary_apl(DocIdx, 1, unir), 66 | [{IndexNode, _Type}] = PrefList, 67 | riak_core_vnode_master:sync_spawn_command(IndexNode, ping, unir_vnode_master). 68 | 69 | %% @doc Pings a random vnode to make sure communication is functional 70 | fsm_ping() -> 71 | {ok, ReqId} = unir_ping_fsm:ping(), 72 | wait_for_reqid(ReqId, ?TIMEOUT). 73 | 74 | %% @doc Make a request through the put FSM. 75 | fsm_put(Key, Value) -> 76 | {ok, ReqId} = unir_put_fsm:put(Key, Value), 77 | wait_for_reqid(ReqId, ?TIMEOUT). 78 | 79 | %% @doc Make a request through the get FSM. 80 | fsm_get(Key) -> 81 | {ok, ReqId} = unir_get_fsm:get(Key), 82 | wait_for_reqid(ReqId, ?TIMEOUT). 83 | 84 | %% @doc Alter state. 85 | inject_failure(Key, Value) -> 86 | {ok, ReqId} = unir_failure_fsm:inject_failure(Key, Value), 87 | wait_for_reqid(ReqId, ?TIMEOUT). 88 | 89 | %%%=================================================================== 90 | %%% Internal Functions 91 | %%%=================================================================== 92 | 93 | %% @doc Generate a request id. 94 | mk_reqid() -> 95 | erlang:phash2(erlang:timestamp()). 96 | 97 | %% @doc Wait for a response. 98 | wait_for_reqid(ReqID, Timeout) -> 99 | receive 100 | {ReqID, ok} -> 101 | ok; 102 | {ReqID, ok, Val} -> 103 | {ok, Val} 104 | after 105 | Timeout -> 106 | {error, timeout} 107 | end. -------------------------------------------------------------------------------- /src/unir_app.erl: -------------------------------------------------------------------------------- 1 | -module(unir_app). 2 | 3 | -behaviour(application). 4 | 5 | %% Application callbacks 6 | -export([start/2, stop/1]). 7 | 8 | %% =================================================================== 9 | %% Application callbacks 10 | %% =================================================================== 11 | 12 | start(_StartType, _StartArgs) -> 13 | case unir_sup:start_link() of 14 | {ok, Pid} -> 15 | ok = riak_core:register([{vnode_module, unir_vnode}]), 16 | ok = riak_core_node_watcher:service_up(unir, self()), 17 | 18 | {ok, Pid}; 19 | {error, Reason} -> 20 | {error, Reason} 21 | end. 22 | 23 | stop(_State) -> 24 | ok. 25 | -------------------------------------------------------------------------------- /src/unir_console.erl: -------------------------------------------------------------------------------- 1 | %% @doc Interface for riak_searchng-admin commands. 2 | -module(unir_console). 3 | -export([staged_join/1, 4 | down/1, 5 | ringready/1]). 6 | -ignore_xref([join/1, 7 | leave/1, 8 | remove/1, 9 | ringready/1]). 10 | 11 | staged_join([NodeStr]) -> 12 | Node = list_to_atom(NodeStr), 13 | join(NodeStr, fun riak_core:staged_join/1, 14 | "Success: staged join request for ~p to ~p~n", [node(), Node]). 15 | 16 | join(NodeStr, JoinFn, SuccessFmt, SuccessArgs) -> 17 | try 18 | case JoinFn(NodeStr) of 19 | ok -> 20 | io:format(SuccessFmt, SuccessArgs), 21 | ok; 22 | {error, not_reachable} -> 23 | io:format("Node ~s is not reachable!~n", [NodeStr]), 24 | error; 25 | {error, different_ring_sizes} -> 26 | io:format("Failed: ~s has a different ring_creation_size~n", 27 | [NodeStr]), 28 | error; 29 | {error, unable_to_get_join_ring} -> 30 | io:format("Failed: Unable to get ring from ~s~n", [NodeStr]), 31 | error; 32 | {error, not_single_node} -> 33 | io:format("Failed: This node is already a member of a " 34 | "cluster~n"), 35 | error; 36 | {error, self_join} -> 37 | io:format("Failed: This node cannot join itself in a " 38 | "cluster~n"), 39 | error; 40 | {error, _} -> 41 | io:format("Join failed. Try again in a few moments.~n", []), 42 | error 43 | end 44 | catch 45 | Exception:Reason -> 46 | lager:error("Join failed ~p:~p", [Exception, Reason]), 47 | io:format("Join failed, see log for details~n"), 48 | error 49 | end. 50 | 51 | 52 | down([Node]) -> 53 | try 54 | case riak_core:down(list_to_atom(Node)) of 55 | ok -> 56 | io:format("Success: ~p marked as down~n", [Node]), 57 | ok; 58 | {error, is_up} -> 59 | io:format("Failed: ~s is up~n", [Node]), 60 | error; 61 | {error, not_member} -> 62 | io:format("Failed: ~p is not a member of the cluster.~n", 63 | [Node]), 64 | error; 65 | {error, only_member} -> 66 | io:format("Failed: ~p is the only member.~n", [Node]), 67 | error 68 | end 69 | catch 70 | Exception:Reason -> 71 | lager:error("Down failed ~p:~p", [Exception, Reason]), 72 | io:format("Down failed, see log for details~n"), 73 | error 74 | end. 75 | 76 | ringready([]) -> 77 | try 78 | case riak_core_status:ringready() of 79 | {ok, Nodes} -> 80 | io:format("TRUE All nodes agree on the ring ~p\n", [Nodes]); 81 | {error, {different_owners, N1, N2}} -> 82 | io:format("FALSE Node ~p and ~p list different partition owners\n", [N1, N2]), 83 | error; 84 | {error, {nodes_down, Down}} -> 85 | io:format("FALSE ~p down. All nodes need to be up to check.\n", [Down]), 86 | error 87 | end 88 | catch 89 | Exception:Reason -> 90 | lager:error("Ringready failed ~p:~p", [Exception, 91 | Reason]), 92 | io:format("Ringready failed, see log for details~n"), 93 | error 94 | end. 95 | -------------------------------------------------------------------------------- /src/unir_failure_fsm.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2017 Christopher Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(unir_failure_fsm). 22 | -author('Christopher S. Meiklejohn '). 23 | 24 | -behaviour(gen_fsm). 25 | 26 | %% API 27 | -export([start_link/4, 28 | inject_failure/2]). 29 | 30 | %% Callbacks 31 | -export([init/1, 32 | code_change/4, 33 | handle_event/3, 34 | handle_info/3, 35 | handle_sync_event/4, 36 | terminate/3]). 37 | 38 | %% States 39 | -export([prepare/2, 40 | execute/2, 41 | waiting/2]). 42 | 43 | -record(state, {preflist, 44 | req_id, 45 | coordinator, 46 | from, 47 | key, 48 | value, 49 | responses}). 50 | 51 | -define(N, 3). 52 | -define(W, 2). 53 | 54 | %%%=================================================================== 55 | %%% API 56 | %%%=================================================================== 57 | 58 | start_link(ReqId, From, Key, Value) -> 59 | gen_fsm:start_link(?MODULE, [ReqId, From, Key, Value], []). 60 | 61 | %% @doc Join a pid to a group. 62 | inject_failure(Key, Value) -> 63 | ReqId = unir:mk_reqid(), 64 | _ = unir_failure_fsm_sup:start_child([ReqId, self(), Key, Value]), 65 | {ok, ReqId}. 66 | 67 | %%%=================================================================== 68 | %%% Callbacks 69 | %%%=================================================================== 70 | 71 | handle_info(_Info, _StateName, StateData) -> 72 | {stop, badmsg, StateData}. 73 | 74 | handle_event(_Event, _StateName, StateData) -> 75 | {stop, badmsg, StateData}. 76 | 77 | handle_sync_event(_Event, _From, _StateName, StateData) -> 78 | {stop, badmsg, StateData}. 79 | 80 | code_change(_OldVsn, StateName, State, _Extra) -> 81 | {ok, StateName, State}. 82 | 83 | terminate(_Reason, _SN, _SD) -> 84 | ok. 85 | 86 | %%%=================================================================== 87 | %%% States 88 | %%%=================================================================== 89 | 90 | %% @doc Initialize the request. 91 | init([ReqId, From, Key, Value]) -> 92 | State = #state{preflist=undefined, 93 | req_id=ReqId, 94 | key=Key, 95 | value=Value, 96 | coordinator=node(), 97 | from=From, 98 | responses=[]}, 99 | {ok, prepare, State, 0}. 100 | 101 | prepare(timeout, #state{key=Key}=State) -> 102 | DocIdx = riak_core_util:chash_key({<<"objects">>, term_to_binary(Key)}), 103 | Preflist = riak_core_apl:get_primary_apl(DocIdx, 1, unir), 104 | Preflist2 = [{Index, Node} || {{Index, Node}, _Type} <- Preflist], 105 | {next_state, execute, State#state{preflist=Preflist2}, 0}. 106 | 107 | execute(timeout, #state{preflist=Preflist, 108 | req_id=ReqId, 109 | key=Key, 110 | value=Value, 111 | coordinator=Coordinator}=State) -> 112 | unir_vnode:inject_failure(Preflist, {ReqId, Coordinator}, Key, Value), 113 | {next_state, waiting, State}. 114 | 115 | waiting({ok, ReqId}, #state{from=From}=State) -> 116 | From ! {ReqId, ok}, 117 | {stop, normal, State}. 118 | 119 | %%%=================================================================== 120 | %%% Internal Functions 121 | %%%=================================================================== -------------------------------------------------------------------------------- /src/unir_failure_fsm_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2017 Christopher Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(unir_failure_fsm_sup). 22 | -author('Christopher S. Meiklejohn '). 23 | 24 | -behaviour(supervisor). 25 | 26 | %% API 27 | -export([start_link/0, 28 | start_child/1, 29 | terminate_child/2]). 30 | 31 | %% Supervisor callbacks 32 | -export([init/1]). 33 | 34 | %% =================================================================== 35 | %% API functions 36 | %% =================================================================== 37 | 38 | %% @doc API for starting the supervisor. 39 | start_link() -> 40 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 41 | 42 | %% @doc Start a child. 43 | start_child(Args) -> 44 | supervisor:start_child(?MODULE, Args). 45 | 46 | %% @doc Stop a child immediately 47 | terminate_child(Supervisor, Pid) -> 48 | supervisor:terminate_child(Supervisor, Pid). 49 | 50 | %% =================================================================== 51 | %% Supervisor callbacks 52 | %% =================================================================== 53 | 54 | %% @doc supervisor callback. 55 | init([]) -> 56 | Spec = {unir_failure_fsm, 57 | {unir_failure_fsm, start_link, []}, 58 | temporary, 5000, worker, [unir_failure_fsm]}, 59 | 60 | {ok, {{simple_one_for_one, 10, 10}, [Spec]}}. -------------------------------------------------------------------------------- /src/unir_get_fsm.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2017 Christopher Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(unir_get_fsm). 22 | -author('Christopher S. Meiklejohn '). 23 | 24 | -behaviour(gen_fsm). 25 | 26 | %% API 27 | -export([start_link/3, 28 | get/1]). 29 | 30 | %% Callbacks 31 | -export([init/1, 32 | code_change/4, 33 | handle_event/3, 34 | handle_info/3, 35 | handle_sync_event/4, 36 | terminate/3]). 37 | 38 | %% States 39 | -export([prepare/2, 40 | execute/2, 41 | waiting/2]). 42 | 43 | -record(state, {preflist, 44 | req_id, 45 | coordinator, 46 | from, 47 | key, 48 | responses}). 49 | 50 | -define(N, 3). 51 | -define(W, 2). 52 | 53 | %%%=================================================================== 54 | %%% API 55 | %%%=================================================================== 56 | 57 | start_link(ReqId, From, Key) -> 58 | gen_fsm:start_link(?MODULE, [ReqId, From, Key], []). 59 | 60 | %% @doc Join a pid to a group. 61 | get(Key) -> 62 | ReqId = unir:mk_reqid(), 63 | _ = unir_get_fsm_sup:start_child([ReqId, self(), Key]), 64 | {ok, ReqId}. 65 | 66 | %%%=================================================================== 67 | %%% Callbacks 68 | %%%=================================================================== 69 | 70 | handle_info(_Info, _StateName, StateData) -> 71 | {stop, badmsg, StateData}. 72 | 73 | handle_event(_Event, _StateName, StateData) -> 74 | {stop, badmsg, StateData}. 75 | 76 | handle_sync_event(_Event, _From, _StateName, StateData) -> 77 | {stop, badmsg, StateData}. 78 | 79 | code_change(_OldVsn, StateName, State, _Extra) -> 80 | {ok, StateName, State}. 81 | 82 | terminate(_Reason, _SN, _SD) -> 83 | ok. 84 | 85 | %%%=================================================================== 86 | %%% States 87 | %%%=================================================================== 88 | 89 | %% @doc Initialize the request. 90 | init([ReqId, From, Key]) -> 91 | State = #state{preflist=undefined, 92 | req_id=ReqId, 93 | key=Key, 94 | coordinator=node(), 95 | from=From, 96 | responses=[]}, 97 | {ok, prepare, State, 0}. 98 | 99 | prepare(timeout, #state{key=Key}=State) -> 100 | %% lager:info("** get fsm started..."), 101 | 102 | DocIdx = riak_core_util:chash_key({<<"objects">>, term_to_binary(Key)}), 103 | Preflist = riak_core_apl:get_primary_apl(DocIdx, ?N, unir), 104 | Preflist2 = [{Index, Node} || {{Index, Node}, _Type} <- Preflist], 105 | {next_state, execute, State#state{preflist=Preflist2}, 0}. 106 | 107 | execute(timeout, #state{preflist=Preflist, 108 | req_id=ReqId, 109 | key=Key, 110 | coordinator=Coordinator}=State) -> 111 | unir_vnode:get(Preflist, {ReqId, Coordinator}, Key), 112 | {next_state, waiting, State}. 113 | 114 | waiting({ok, ReqId, Value}, #state{responses=Responses0, from=From}=State0) -> 115 | %% lager:info("** got response number ~p: ~p", [length(Responses0) + 1, Value]), 116 | 117 | Responses = [Value|Responses0], 118 | State = State0#state{responses=Responses}, 119 | case length(Responses) =:= ?W of 120 | true -> 121 | From ! {ReqId, ok, merge(Responses)}, 122 | {stop, normal, State}; 123 | false -> 124 | {next_state, waiting, State} 125 | end. 126 | 127 | %%%=================================================================== 128 | %%% Internal Functions 129 | %%%=================================================================== 130 | 131 | merge(Values) -> 132 | lists:foldl(fun(Value, Acc) -> 133 | case Value of 134 | 135 | %% Values override not_found, but undefined doesn't. 136 | not_found -> 137 | case Acc of 138 | undefined -> 139 | Value; 140 | Acc -> 141 | Acc 142 | end; 143 | 144 | %% Timestamps that are larger override timestamps that are younger. 145 | {Timestamp, Binary} -> 146 | case Acc of 147 | {LastTimestamp, LastBinary} -> 148 | case timer:now_diff(Timestamp, LastTimestamp) >= 0 of 149 | true -> 150 | {Timestamp, Binary}; 151 | false -> 152 | {LastTimestamp, LastBinary} 153 | end; 154 | _ -> 155 | Value 156 | end; 157 | 158 | %% Otherwise, a value. 159 | Other -> 160 | Other 161 | end 162 | 163 | end, undefined, Values). -------------------------------------------------------------------------------- /src/unir_get_fsm_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2017 Christopher Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(unir_get_fsm_sup). 22 | -author('Christopher S. Meiklejohn '). 23 | 24 | -behaviour(supervisor). 25 | 26 | %% API 27 | -export([start_link/0, 28 | start_child/1, 29 | terminate_child/2]). 30 | 31 | %% Supervisor callbacks 32 | -export([init/1]). 33 | 34 | %% =================================================================== 35 | %% API functions 36 | %% =================================================================== 37 | 38 | %% @doc API for starting the supervisor. 39 | start_link() -> 40 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 41 | 42 | %% @doc Start a child. 43 | start_child(Args) -> 44 | supervisor:start_child(?MODULE, Args). 45 | 46 | %% @doc Stop a child immediately 47 | terminate_child(Supervisor, Pid) -> 48 | supervisor:terminate_child(Supervisor, Pid). 49 | 50 | %% =================================================================== 51 | %% Supervisor callbacks 52 | %% =================================================================== 53 | 54 | %% @doc supervisor callback. 55 | init([]) -> 56 | Spec = {unir_get_fsm, 57 | {unir_get_fsm, start_link, []}, 58 | temporary, 5000, worker, [unir_get_fsm]}, 59 | 60 | {ok, {{simple_one_for_one, 10, 10}, [Spec]}}. -------------------------------------------------------------------------------- /src/unir_ping_fsm.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2017 Christopher Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(unir_ping_fsm). 22 | -author('Christopher S. Meiklejohn '). 23 | 24 | -behaviour(gen_fsm). 25 | 26 | %% API 27 | -export([start_link/2, 28 | ping/0]). 29 | 30 | %% Callbacks 31 | -export([init/1, 32 | code_change/4, 33 | handle_event/3, 34 | handle_info/3, 35 | handle_sync_event/4, 36 | terminate/3]). 37 | 38 | %% States 39 | -export([prepare/2, 40 | execute/2, 41 | waiting/2]). 42 | 43 | -record(state, {preflist, 44 | req_id, 45 | coordinator, 46 | from, 47 | responses}). 48 | 49 | -define(N, 3). 50 | -define(W, 2). 51 | 52 | %%%=================================================================== 53 | %%% API 54 | %%%=================================================================== 55 | 56 | start_link(ReqId, From) -> 57 | gen_fsm:start_link(?MODULE, [ReqId, From], []). 58 | 59 | %% @doc Join a pid to a group. 60 | ping() -> 61 | ReqId = unir:mk_reqid(), 62 | _ = unir_ping_fsm_sup:start_child([ReqId, self()]), 63 | {ok, ReqId}. 64 | 65 | %%%=================================================================== 66 | %%% Callbacks 67 | %%%=================================================================== 68 | 69 | handle_info(_Info, _StateName, StateData) -> 70 | {stop, badmsg, StateData}. 71 | 72 | handle_event(_Event, _StateName, StateData) -> 73 | {stop, badmsg, StateData}. 74 | 75 | handle_sync_event(_Event, _From, _StateName, StateData) -> 76 | {stop, badmsg, StateData}. 77 | 78 | code_change(_OldVsn, StateName, State, _Extra) -> 79 | {ok, StateName, State}. 80 | 81 | terminate(_Reason, _SN, _SD) -> 82 | ok. 83 | 84 | %%%=================================================================== 85 | %%% States 86 | %%%=================================================================== 87 | 88 | %% @doc Initialize the request. 89 | init([ReqId, From]) -> 90 | State = #state{preflist=undefined, 91 | req_id=ReqId, 92 | coordinator=node(), 93 | from=From, 94 | responses=0}, 95 | {ok, prepare, State, 0}. 96 | 97 | prepare(timeout, State) -> 98 | DocIdx = riak_core_util:chash_key({<<"ping">>, term_to_binary(os:timestamp())}), 99 | Preflist = riak_core_apl:get_primary_apl(DocIdx, ?N, unir), 100 | Preflist2 = [{Index, Node} || {{Index, Node}, _Type} <- Preflist], 101 | {next_state, execute, State#state{preflist=Preflist2}, 0}. 102 | 103 | execute(timeout, #state{preflist=Preflist, 104 | req_id=ReqId, 105 | coordinator=Coordinator}=State) -> 106 | unir_vnode:ping(Preflist, {ReqId, Coordinator}), 107 | {next_state, waiting, State}. 108 | 109 | waiting({ok, ReqId}, #state{responses=Responses0, from=From}=State0) -> 110 | Responses = Responses0 + 1, 111 | State = State0#state{responses=Responses}, 112 | case Responses =:= ?W of 113 | true -> 114 | From ! {ReqId, ok}, 115 | {stop, normal, State}; 116 | false -> 117 | {next_state, waiting, State} 118 | end. 119 | 120 | %%%=================================================================== 121 | %%% Internal Functions 122 | %%%=================================================================== -------------------------------------------------------------------------------- /src/unir_ping_fsm_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2017 Christopher Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(unir_ping_fsm_sup). 22 | -author('Christopher S. Meiklejohn '). 23 | 24 | -behaviour(supervisor). 25 | 26 | %% API 27 | -export([start_link/0, 28 | start_child/1, 29 | terminate_child/2]). 30 | 31 | %% Supervisor callbacks 32 | -export([init/1]). 33 | 34 | %% =================================================================== 35 | %% API functions 36 | %% =================================================================== 37 | 38 | %% @doc API for starting the supervisor. 39 | start_link() -> 40 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 41 | 42 | %% @doc Start a child. 43 | start_child(Args) -> 44 | supervisor:start_child(?MODULE, Args). 45 | 46 | %% @doc Stop a child immediately 47 | terminate_child(Supervisor, Pid) -> 48 | supervisor:terminate_child(Supervisor, Pid). 49 | 50 | %% =================================================================== 51 | %% Supervisor callbacks 52 | %% =================================================================== 53 | 54 | %% @doc supervisor callback. 55 | init([]) -> 56 | Spec = {unir_ping_fsm, 57 | {unir_ping_fsm, start_link, []}, 58 | temporary, 5000, worker, [unir_ping_fsm]}, 59 | 60 | {ok, {{simple_one_for_one, 10, 10}, [Spec]}}. -------------------------------------------------------------------------------- /src/unir_put_fsm.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2017 Christopher Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(unir_put_fsm). 22 | -author('Christopher S. Meiklejohn '). 23 | 24 | -behaviour(gen_fsm). 25 | 26 | %% API 27 | -export([start_link/4, 28 | put/2]). 29 | 30 | %% Callbacks 31 | -export([init/1, 32 | code_change/4, 33 | handle_event/3, 34 | handle_info/3, 35 | handle_sync_event/4, 36 | terminate/3]). 37 | 38 | %% States 39 | -export([prepare/2, 40 | execute/2, 41 | waiting/2]). 42 | 43 | -record(state, {preflist, 44 | req_id, 45 | coordinator, 46 | from, 47 | key, 48 | value, 49 | responses}). 50 | 51 | -define(N, 3). 52 | -define(W, 2). 53 | 54 | %%%=================================================================== 55 | %%% API 56 | %%%=================================================================== 57 | 58 | start_link(ReqId, From, Key, Value) -> 59 | gen_fsm:start_link(?MODULE, [ReqId, From, Key, Value], []). 60 | 61 | %% @doc Join a pid to a group. 62 | put(Key, Value) -> 63 | ReqId = unir:mk_reqid(), 64 | _ = unir_put_fsm_sup:start_child([ReqId, self(), Key, Value]), 65 | {ok, ReqId}. 66 | 67 | %%%=================================================================== 68 | %%% Callbacks 69 | %%%=================================================================== 70 | 71 | handle_info(_Info, _StateName, StateData) -> 72 | {stop, badmsg, StateData}. 73 | 74 | handle_event(_Event, _StateName, StateData) -> 75 | {stop, badmsg, StateData}. 76 | 77 | handle_sync_event(_Event, _From, _StateName, StateData) -> 78 | {stop, badmsg, StateData}. 79 | 80 | code_change(_OldVsn, StateName, State, _Extra) -> 81 | {ok, StateName, State}. 82 | 83 | terminate(_Reason, _SN, _SD) -> 84 | ok. 85 | 86 | %%%=================================================================== 87 | %%% States 88 | %%%=================================================================== 89 | 90 | %% @doc Initialize the request. 91 | init([ReqId, From, Key, Value]) -> 92 | State = #state{preflist=undefined, 93 | req_id=ReqId, 94 | key=Key, 95 | value=Value, 96 | coordinator=node(), 97 | from=From, 98 | responses=0}, 99 | {ok, prepare, State, 0}. 100 | 101 | prepare(timeout, #state{key=Key}=State) -> 102 | DocIdx = riak_core_util:chash_key({<<"objects">>, term_to_binary(Key)}), 103 | Preflist = riak_core_apl:get_primary_apl(DocIdx, ?N, unir), 104 | Preflist2 = [{Index, Node} || {{Index, Node}, _Type} <- Preflist], 105 | {next_state, execute, State#state{preflist=Preflist2}, 0}. 106 | 107 | execute(timeout, #state{preflist=Preflist, 108 | req_id=ReqId, 109 | key=Key, 110 | value=Value, 111 | coordinator=Coordinator}=State) -> 112 | %% lager:info("** sending requests to preflist: ~p", [Preflist]), 113 | unir_vnode:put(Preflist, {ReqId, Coordinator}, Key, Value), 114 | {next_state, waiting, State}. 115 | 116 | waiting({ok, ReqId, Value}, #state{responses=Responses0, from=From}=State0) -> 117 | %% lager:info("** got response with value ~p", [Value]), 118 | Responses = Responses0 + 1, 119 | State = State0#state{responses=Responses}, 120 | case Responses =:= ?W of 121 | true -> 122 | From ! {ReqId, ok, Value}, 123 | {stop, normal, State}; 124 | false -> 125 | {next_state, waiting, State} 126 | end. 127 | 128 | %%%=================================================================== 129 | %%% Internal Functions 130 | %%%=================================================================== -------------------------------------------------------------------------------- /src/unir_put_fsm_sup.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2017 Christopher Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(unir_put_fsm_sup). 22 | -author('Christopher S. Meiklejohn '). 23 | 24 | -behaviour(supervisor). 25 | 26 | %% API 27 | -export([start_link/0, 28 | start_child/1, 29 | terminate_child/2]). 30 | 31 | %% Supervisor callbacks 32 | -export([init/1]). 33 | 34 | %% =================================================================== 35 | %% API functions 36 | %% =================================================================== 37 | 38 | %% @doc API for starting the supervisor. 39 | start_link() -> 40 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 41 | 42 | %% @doc Start a child. 43 | start_child(Args) -> 44 | supervisor:start_child(?MODULE, Args). 45 | 46 | %% @doc Stop a child immediately 47 | terminate_child(Supervisor, Pid) -> 48 | supervisor:terminate_child(Supervisor, Pid). 49 | 50 | %% =================================================================== 51 | %% Supervisor callbacks 52 | %% =================================================================== 53 | 54 | %% @doc supervisor callback. 55 | init([]) -> 56 | Spec = {unir_put_fsm, 57 | {unir_put_fsm, start_link, []}, 58 | temporary, 5000, worker, [unir_put_fsm]}, 59 | 60 | {ok, {{simple_one_for_one, 10, 10}, [Spec]}}. -------------------------------------------------------------------------------- /src/unir_sup.erl: -------------------------------------------------------------------------------- 1 | -module(unir_sup). 2 | 3 | -behaviour(supervisor). 4 | 5 | %% API 6 | -export([start_link/0]). 7 | 8 | %% Supervisor callbacks 9 | -export([init/1]). 10 | 11 | %% =================================================================== 12 | %% API functions 13 | %% =================================================================== 14 | 15 | start_link() -> 16 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 17 | 18 | %% =================================================================== 19 | %% Supervisor callbacks 20 | %% =================================================================== 21 | 22 | init(_Args) -> 23 | VMaster = {unir_vnode_master, 24 | {riak_core_vnode_master, start_link, [unir_vnode]}, 25 | permanent, 5000, worker, [riak_core_vnode_master]}, 26 | 27 | PingFSM = {unir_ping_fsm_sup, 28 | {unir_ping_fsm_sup, start_link, []}, 29 | permanent, infinity, supervisor, [unir_ping_fsm_sup]}, 30 | 31 | PutFSM = {unir_put_fsm_sup, 32 | {unir_put_fsm_sup, start_link, []}, 33 | permanent, infinity, supervisor, [unir_put_fsm_sup]}, 34 | 35 | GetFSM = {unir_get_fsm_sup, 36 | {unir_get_fsm_sup, start_link, []}, 37 | permanent, infinity, supervisor, [unir_get_fsm_sup]}, 38 | 39 | FailureFSM = {unir_failure_fsm_sup, 40 | {unir_failure_fsm_sup, start_link, []}, 41 | permanent, infinity, supervisor, [unir_failure_fsm_sup]}, 42 | 43 | {ok, {{one_for_one, 5, 10}, [VMaster, PingFSM, PutFSM, GetFSM, FailureFSM]}}. -------------------------------------------------------------------------------- /src/unir_vnode.erl: -------------------------------------------------------------------------------- 1 | -module(unir_vnode). 2 | -author("Christopher S. Meiklejohn "). 3 | 4 | -behaviour(riak_core_vnode). 5 | -behaviour(testable_vnode). 6 | 7 | -export([start_vnode/1, 8 | init/1, 9 | ping/2, 10 | put/4, 11 | get/3]). 12 | 13 | -export([inject_failure/4]). 14 | 15 | -export([terminate/2, 16 | handle_command/3, 17 | is_empty/1, 18 | delete/1, 19 | handle_handoff_command/3, 20 | handoff_starting/2, 21 | handoff_cancelled/1, 22 | handoff_finished/2, 23 | handle_handoff_data/2, 24 | encode_handoff_item/2, 25 | handle_overload_command/3, 26 | handle_overload_info/2, 27 | handle_coverage/4, 28 | handle_exit/3]). 29 | 30 | -ignore_xref([start_vnode/1]). 31 | 32 | -record(state, {partition, store}). 33 | 34 | -define(MASTER, unir_vnode_master). 35 | 36 | %% API 37 | start_vnode(I) -> 38 | riak_core_vnode_master:get_vnode_pid(I, ?MODULE). 39 | 40 | init([Partition]) -> 41 | Store = dict:new(), 42 | {ok, #state {partition=Partition, store=Store}}. 43 | 44 | put(Preflist, Identity, Key, Value) -> 45 | riak_core_vnode_master:command(Preflist, 46 | {put, Identity, Key, Value}, 47 | {fsm, undefined, self()}, 48 | ?MASTER). 49 | 50 | get(Preflist, Identity, Key) -> 51 | riak_core_vnode_master:command(Preflist, 52 | {get, Identity, Key}, 53 | {fsm, undefined, self()}, 54 | ?MASTER). 55 | 56 | inject_failure(Preflist, Identity, Key, Value) -> 57 | lager:info("*** failure injection is about to call vnode"), 58 | riak_core_vnode_master:command(Preflist, 59 | {inject_failure, Identity, Key, Value}, 60 | {fsm, undefined, self()}, 61 | ?MASTER). 62 | 63 | ping(Preflist, Identity) -> 64 | riak_core_vnode_master:command(Preflist, 65 | {ping, Identity}, 66 | {fsm, undefined, self()}, 67 | ?MASTER). 68 | 69 | handle_command({put, {ReqId, _}, Key, Value}, _Sender, #state{store=Store0}=State) -> 70 | %% Sleep for 10ms, read 1MB from disk. 71 | %% https://gist.github.com/jboner/2841832 72 | timer:sleep(10), 73 | Store = dict:store(Key, Value, Store0), 74 | {reply, {ok, ReqId, Value}, State#state{store=Store}}; 75 | handle_command({inject_failure, {ReqId, _}, Key, Value}, _Sender, #state{store=Store0}=State) -> 76 | Store = case Value of 77 | undefined -> 78 | dict:erase(Key, Store0); 79 | Value -> 80 | case dict:find(Key, Store0) of 81 | {ok, _} -> 82 | dict:store(Key, Value, Store0); 83 | error -> 84 | Store0 85 | end 86 | end, 87 | {reply, {ok, ReqId}, State#state{store=Store}}; 88 | handle_command({get, {ReqId, _}, Key}, _Sender, #state{store=Store}=State) -> 89 | %% Sleep for 10ms, read 1MB from disk. 90 | %% https://gist.github.com/jboner/2841832 91 | timer:sleep(10), 92 | Value = case dict:find(Key, Store) of 93 | {ok, V} -> 94 | V; 95 | error -> 96 | not_found 97 | end, 98 | {reply, {ok, ReqId, Value}, State}; 99 | handle_command({ping, {ReqId, _}}, _Sender, State) -> 100 | {reply, {ok, ReqId}, State}; 101 | handle_command({echo, EchoBinary, FromNode, FromPid}, _Sender, State) -> 102 | riak_core_partisan_utils:forward(vnode, FromNode, FromPid, {echo, EchoBinary}), 103 | {reply, ok, State}; 104 | handle_command(ping, _Sender, State) -> 105 | {reply, {pong, State#state.partition}, State}; 106 | handle_command(Message, _Sender, State) -> 107 | lager:warning("unhandled_command ~p", [Message]), 108 | {noreply, State}. 109 | 110 | handle_overload_command(_, _, _) -> 111 | ok. 112 | 113 | handle_overload_info(_, _) -> 114 | ok. 115 | 116 | handle_handoff_command(_Message, _Sender, State) -> 117 | {noreply, State}. 118 | 119 | handoff_starting(_TargetNode, State) -> 120 | {true, State}. 121 | 122 | handoff_cancelled(State) -> 123 | {ok, State}. 124 | 125 | handoff_finished(_TargetNode, State) -> 126 | {ok, State}. 127 | 128 | handle_handoff_data(_Data, State) -> 129 | {reply, ok, State}. 130 | 131 | encode_handoff_item(_ObjectName, _ObjectValue) -> 132 | <<>>. 133 | 134 | is_empty(State) -> 135 | {true, State}. 136 | 137 | delete(State) -> 138 | {ok, State}. 139 | 140 | handle_coverage(_Req, _KeySpaces, _Sender, State) -> 141 | {stop, not_implemented, State}. 142 | 143 | handle_exit(_Pid, _Reason, State) -> 144 | {noreply, State}. 145 | 146 | terminate(_Reason, _State) -> 147 | ok. -------------------------------------------------------------------------------- /test/functionality_SUITE.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2017 Christopher S. Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | %% 21 | 22 | -module(functionality_SUITE). 23 | -author("Christopher S. Meiklejohn "). 24 | 25 | %% common_test callbacks 26 | -export([suite/0, 27 | init_per_suite/1, 28 | end_per_suite/1, 29 | init_per_testcase/2, 30 | end_per_testcase/2, 31 | all/0, 32 | groups/0, 33 | init_per_group/2]). 34 | 35 | %% tests 36 | -compile([export_all]). 37 | 38 | -include_lib("common_test/include/ct.hrl"). 39 | -include_lib("eunit/include/eunit.hrl"). 40 | -include_lib("kernel/include/inet.hrl"). 41 | 42 | -define(APP, unir). 43 | -define(CLIENT_NUMBER, 3). 44 | -define(PEER_PORT, 9000). 45 | -define(GET_REQUEST, fsm_get). 46 | -define(PUT_REQUEST, fsm_put). 47 | 48 | -define(SUPPORT, support). 49 | 50 | %% =================================================================== 51 | %% common_test callbacks 52 | %% =================================================================== 53 | 54 | suite() -> 55 | [{timetrap, {hours, 10}}]. 56 | 57 | init_per_suite(_Config) -> 58 | _Config. 59 | 60 | end_per_suite(_Config) -> 61 | _Config. 62 | 63 | init_per_testcase(Case, Config) -> 64 | lager:info("Beginning test case ~p", [Case]), 65 | [{hash, erlang:phash2({Case, Config})}|Config]. 66 | 67 | end_per_testcase(Case, Config) -> 68 | lager:info("Ending test case ~p", [Case]), 69 | Config. 70 | 71 | init_per_group(disterl, Config) -> 72 | Config; 73 | 74 | init_per_group(partisan, Config) -> 75 | [{partisan_dispatch, true}] ++ Config; 76 | 77 | init_per_group(partisan_races, Config) -> 78 | init_per_group(partisan, Config); 79 | init_per_group(partisan_scale, Config) -> 80 | init_per_group(partisan, Config); 81 | init_per_group(partisan_large_scale, Config) -> 82 | init_per_group(partisan, Config); 83 | 84 | init_per_group(partisan_with_parallelism, Config) -> 85 | [{parallelism, 5}] ++ init_per_group(partisan, Config); 86 | init_per_group(partisan_with_binary_padding, Config) -> 87 | [{binary_padding, true}] ++ init_per_group(partisan, Config); 88 | init_per_group(partisan_with_vnode_partitioning, Config) -> 89 | [{vnode_partitioning, true}] ++ init_per_group(partisan, Config); 90 | init_per_group(partisan_without_fast_forward, Config) -> 91 | [{disable_fast_forward, true}] ++ init_per_group(partisan, Config); 92 | 93 | init_per_group(_, Config) -> 94 | Config. 95 | 96 | end_per_group(_, _Config) -> 97 | ok. 98 | 99 | all() -> 100 | [ 101 | {group, default, []} 102 | ]. 103 | 104 | groups() -> 105 | [ 106 | {basic, [], 107 | [membership_test, 108 | metadata_test, 109 | get_put_test, 110 | vnode_test]}, 111 | 112 | {failures, [], 113 | [large_gossip_test, 114 | transition_test]}, 115 | 116 | {default, [], 117 | [{group, basic}] 118 | }, 119 | 120 | {disterl, [], 121 | [{group, basic}] 122 | }, 123 | 124 | {partisan, [], 125 | [{group, basic}] 126 | }, 127 | 128 | {races, [], 129 | [four_node_membership_test]}, 130 | 131 | {partisan_races, [], 132 | [four_node_membership_test]}, 133 | 134 | {scale, [], 135 | [scale_test]}, 136 | 137 | {partisan_scale, [], 138 | [scale_test]}, 139 | 140 | {large_scale, [], 141 | [large_scale_test]}, 142 | 143 | {partisan_large_scale, [], 144 | [large_scale_test]}, 145 | 146 | {partisan_with_parallelism, [], 147 | [{group, basic}]}, 148 | 149 | {partisan_with_binary_padding, [], 150 | [{group, basic}]}, 151 | 152 | {partisan_with_vnode_partitioning, [], 153 | [{group, basic}]}, 154 | 155 | {partisan_without_fast_forward, [], 156 | [get_put_with_partition_test]} 157 | ]. 158 | 159 | %% =================================================================== 160 | %% Tests. 161 | %% =================================================================== 162 | 163 | large_scale_test(Config) -> 164 | case os:getenv("TRAVIS") of 165 | "true" -> 166 | lager:info("Skipping test; outside of the travis environment."); 167 | _ -> 168 | Nodes = ?SUPPORT:start(large_scale_test, 169 | Config, 170 | [{partisan_peer_service_manager, 171 | partisan_default_peer_service_manager}, 172 | {num_nodes, 10}, 173 | {cluster_nodes, false}]), 174 | 175 | ?SUPPORT:scale(Nodes, Config) 176 | end, 177 | 178 | ok. 179 | 180 | scale_test(Config) -> 181 | Nodes = ?SUPPORT:start(scale_test, 182 | Config, 183 | [{partisan_peer_service_manager, 184 | partisan_default_peer_service_manager}, 185 | {num_nodes, 5}, 186 | {cluster_nodes, false}]), 187 | 188 | ?SUPPORT:scale(Nodes, Config), 189 | 190 | ok. 191 | 192 | transition_test(Config) -> 193 | Nodes = ?SUPPORT:start(transition_test, 194 | Config, 195 | [{partisan_peer_service_manager, 196 | partisan_default_peer_service_manager}, 197 | {num_nodes, 4}, 198 | {cluster_nodes, false}]), 199 | 200 | %% Get the list of nodes. 201 | [{_, Node1}, {_, Node2}, {_, Node3}, {_, Node4}] = Nodes, 202 | 203 | SortedNodes = lists:usort([Node || {_Name, Node} <- Nodes]), 204 | 205 | %% Cluster the first two ndoes. 206 | ?assertEqual(ok, ?SUPPORT:join_cluster([Node1, Node2])), 207 | 208 | %% Verify appropriate number of connections. 209 | ?assertEqual(ok, ?SUPPORT:wait_until_all_connections([Node1, Node2])), 210 | 211 | %% Perform metadata storage write. 212 | ?assertEqual(ok, ?SUPPORT:perform_metadata_write(Node1)), 213 | 214 | %% Join the third node. 215 | ?assertEqual(ok, ?SUPPORT:staged_join(Node3, Node1)), 216 | 217 | %% Plan will only succeed once the ring has been gossiped. 218 | ?assertEqual(ok, ?SUPPORT:plan_and_commit(Node1)), 219 | 220 | %% Verify appropriate number of connections. 221 | ?assertEqual(ok, ?SUPPORT:wait_until_all_connections([Node1, Node2, Node3])), 222 | 223 | %% Join the fourth node. 224 | ?assertEqual(ok, ?SUPPORT:staged_join(Node4, Node1)), 225 | 226 | %% Plan will only succeed once the ring has been gossiped. 227 | ?assertEqual(ok, ?SUPPORT:plan_and_commit(Node1)), 228 | 229 | %% Verify appropriate number of connections. 230 | ?assertEqual(ok, ?SUPPORT:wait_until_all_connections([Node1, Node2, Node3, Node4])), 231 | 232 | %% Verify that we can read that value at all nodes. 233 | ?assertEqual(ok, ?SUPPORT:wait_until_metadata_read(SortedNodes)), 234 | 235 | %% Leave a node. 236 | ?assertEqual(ok, ?SUPPORT:leave(Node3)), 237 | 238 | %% Verify appropriate number of connections. 239 | ?assertEqual(ok, ?SUPPORT:wait_until_all_connections([Node1, Node2, Node4])), 240 | 241 | ?SUPPORT:stop(Nodes), 242 | 243 | ok. 244 | 245 | metadata_test(Config) -> 246 | Nodes = ?SUPPORT:start(metadata_test, 247 | Config, 248 | [{partisan_peer_service_manager, 249 | partisan_default_peer_service_manager}]), 250 | 251 | SortedNodes = lists:usort([Node || {_Name, Node} <- Nodes]), 252 | 253 | %% Get the first node. 254 | [{_Name, Node}|_] = Nodes, 255 | 256 | %% Put a value into the metadata system. 257 | ?assertEqual(ok, ?SUPPORT:perform_metadata_write(Node)), 258 | 259 | %% Verify that we can read that value at all nodes. 260 | ?assertEqual(ok, ?SUPPORT:wait_until_metadata_read(SortedNodes)), 261 | 262 | ?SUPPORT:stop(Nodes), 263 | 264 | ok. 265 | 266 | get_put_with_partition_test(Config) -> 267 | Nodes = ?SUPPORT:start(get_put_with_partition_test, 268 | Config, 269 | [{partisan_peer_service_manager, 270 | partisan_default_peer_service_manager}]), 271 | 272 | SortedNodes = lists:usort([Node || {_Name, Node} <- Nodes]), 273 | 274 | Key = key, 275 | Value = <<"binary">>, 276 | 277 | %% Get first node. 278 | Node = hd(SortedNodes), 279 | 280 | %% Partition node from all other nodes. 281 | PartitionFun = fun(N) -> 282 | lager:info("Adding message filter for node ~p", [N]), 283 | 284 | FilterFun = fun({MessageNode, MessageBody}) -> 285 | lager:info("Filter function invoked for message ~p ~p", [MessageNode, MessageBody]), 286 | lager:info("Filter function invoked for message ~p ~p", [MessageNode, MessageBody]), 287 | 288 | case MessageNode of 289 | N -> 290 | false; 291 | _ -> 292 | true 293 | end 294 | end, 295 | ok = rpc:call(Node, partisan_default_peer_service_manager, add_message_filter, [N, FilterFun]) 296 | end, 297 | lists:foreach(PartitionFun, SortedNodes), 298 | 299 | %% Make get request. 300 | case rpc:call(Node, ?APP, ?GET_REQUEST, [Key]) of 301 | {ok, _} -> 302 | ct:fail({error, successful}); 303 | {error, timeout} -> 304 | ok 305 | end, 306 | 307 | %% Make put request. 308 | case rpc:call(Node, ?APP, ?PUT_REQUEST, [Key, Value]) of 309 | {ok, _} -> 310 | ct:fail({error, successful}); 311 | {error, timeout} -> 312 | ok 313 | end, 314 | 315 | ?SUPPORT:stop(Nodes), 316 | 317 | ok. 318 | 319 | get_put_test(Config) -> 320 | Nodes = ?SUPPORT:start(get_put_test, 321 | Config, 322 | [{partisan_peer_service_manager, 323 | partisan_default_peer_service_manager}]), 324 | 325 | SortedNodes = lists:usort([Node || {_Name, Node} <- Nodes]), 326 | 327 | Key = key, 328 | Value = <<"binary">>, 329 | 330 | %% Get first node. 331 | Node = hd(SortedNodes), 332 | 333 | %% Make get request. 334 | case rpc:call(Node, ?APP, ?GET_REQUEST, [Key]) of 335 | {ok, _} -> 336 | ok; 337 | GetError -> 338 | lager:info("Get failed: ~p", [GetError]), 339 | ct:fail({error, GetError}) 340 | end, 341 | 342 | %% Make put request. 343 | case rpc:call(Node, ?APP, ?PUT_REQUEST, [Key, Value]) of 344 | {ok, _} -> 345 | ok; 346 | PutError -> 347 | lager:info("Put failed: ~p", [PutError]), 348 | ct:fail({error, PutError}) 349 | end, 350 | 351 | ?SUPPORT:stop(Nodes), 352 | 353 | ok. 354 | 355 | four_node_membership_test(Config) -> 356 | Nodes = ?SUPPORT:start(four_node_membership_test, 357 | Config, 358 | [{num_nodes, 4}, 359 | {partisan_peer_service_manager, 360 | partisan_default_peer_service_manager}]), 361 | 362 | SortedNodes = lists:usort([Node || {_Name, Node} <- Nodes]), 363 | 364 | %% Verify partisan connection is configured with the correct 365 | %% membership information. 366 | lager:info("Waiting for partisan membership..."), 367 | ?assertEqual(ok, ?SUPPORT:wait_until_partisan_membership(SortedNodes)), 368 | 369 | %% Ensure we have the right number of connections. 370 | %% Verify appropriate number of connections. 371 | lager:info("Waiting for partisan connections..."), 372 | ?assertEqual(ok, ?SUPPORT:wait_until_all_connections(SortedNodes)), 373 | 374 | ?SUPPORT:stop(Nodes), 375 | 376 | ok. 377 | 378 | large_gossip_test(Config) -> 379 | Nodes = ?SUPPORT:start(large_gossip_test, 380 | Config, 381 | [{num_nodes, 5}, 382 | {partisan_peer_service_manager, 383 | partisan_default_peer_service_manager}]), 384 | 385 | SortedNodes = lists:usort([Node || {_Name, Node} <- Nodes]), 386 | 387 | %% Verify partisan connection is configured with the correct 388 | %% membership information. 389 | lager:info("Waiting for partisan membership..."), 390 | ?assertEqual(ok, ?SUPPORT:wait_until_partisan_membership(SortedNodes)), 391 | 392 | %% Ensure we have the right number of connections. 393 | %% Verify appropriate number of connections. 394 | lager:info("Waiting for partisan connections..."), 395 | ?assertEqual(ok, ?SUPPORT:wait_until_all_connections(SortedNodes)), 396 | 397 | %% Bloat ring. 398 | lager:info("Attempting to bloat the ring to see performance effect..."), 399 | Node1 = hd(SortedNodes), 400 | ok = rpc:call(Node1, riak_core_ring_manager, bloat_ring, []), 401 | 402 | %% Sleep for gossip rounds. 403 | lager:info("Sleeping for 50 seconds..."), 404 | timer:sleep(50000), 405 | 406 | ?SUPPORT:stop(Nodes), 407 | 408 | ok. 409 | 410 | membership_test(Config) -> 411 | Nodes = ?SUPPORT:start(membership_test, 412 | Config, 413 | [{num_nodes, 3}, 414 | {partisan_peer_service_manager, 415 | partisan_default_peer_service_manager}]), 416 | 417 | SortedNodes = lists:usort([Node || {_Name, Node} <- Nodes]), 418 | 419 | %% Verify partisan connection is configured with the correct 420 | %% membership information. 421 | lager:info("Waiting for partisan membership..."), 422 | ?assertEqual(ok, ?SUPPORT:wait_until_partisan_membership(SortedNodes)), 423 | 424 | %% Ensure we have the right number of connections. 425 | %% Verify appropriate number of connections. 426 | lager:info("Waiting for partisan connections..."), 427 | ?assertEqual(ok, ?SUPPORT:wait_until_all_connections(SortedNodes)), 428 | 429 | ?SUPPORT:stop(Nodes), 430 | 431 | ok. 432 | 433 | join_test(Config) -> 434 | Nodes = ?SUPPORT:start(join_test, 435 | Config, 436 | [{num_nodes, 3}, 437 | {partisan_peer_service_manager, 438 | partisan_default_peer_service_manager}]), 439 | 440 | ?SUPPORT:stop(Nodes), 441 | 442 | ok. 443 | 444 | vnode_test(Config) -> 445 | Nodes = ?SUPPORT:start(vnode_test, 446 | Config, 447 | [{partisan_peer_service_manager, 448 | partisan_default_peer_service_manager}]), 449 | 450 | SortedNodes = lists:usort([Node || {_Name, Node} <- Nodes]), 451 | 452 | %% Verify partisan connection is configured with the correct 453 | %% membership information. 454 | lager:info("Waiting for partisan membership..."), 455 | ?assertEqual(ok, ?SUPPORT:wait_until_partisan_membership(SortedNodes)), 456 | 457 | %% Ensure we have the right number of connections. 458 | %% Verify appropriate number of connections. 459 | lager:info("Waiting for partisan connections..."), 460 | ?assertEqual(ok, ?SUPPORT:wait_until_all_connections(SortedNodes)), 461 | 462 | %% Get the list of nodes. 463 | lager:info("Nodes is: ~p", [Nodes]), 464 | [{_, Node1}, {_, _Node2}, {_, _Node3}] = Nodes, 465 | 466 | %% Attempt to access the vnode request API. 467 | %% This will test command/3 and command/4 behavior. 468 | lager:info("Waiting for response from ping command..."), 469 | CommandResult = rpc:call(Node1, ?APP, ping, []), 470 | ?assertEqual(ok, CommandResult), 471 | 472 | %% Attempt to access the vnode request API. 473 | %% This will test sync_command/3 and sync_command/4 behavior. 474 | lager:info("Waiting for response from sync_ping command..."), 475 | SyncCommandResult = rpc:call(Node1, ?APP, sync_ping, []), 476 | ?assertMatch({pong, _}, SyncCommandResult), 477 | 478 | %% Attempt to access the vnode request API. 479 | %% This will test sync_spawn_command/3 and sync_spawn_command/4 behavior. 480 | lager:info("Waiting for response from sync_spawn_ping command..."), 481 | SyncSpawnCommandResult = rpc:call(Node1, ?APP, sync_spawn_ping, []), 482 | ?assertMatch({pong, _}, SyncSpawnCommandResult), 483 | 484 | %% Test the echo functionality. 485 | lager:info("Waiting for response from echo command..."), 486 | EchoCommandResult = rpc:call(Node1, ?APP, echo, []), 487 | ?assertMatch(ok, EchoCommandResult), 488 | 489 | %% Attempt to access the vnode request API via FSM. 490 | lager:info("Waiting for response from fsm command..."), 491 | FsmResult = rpc:call(Node1, ?APP, fsm_ping, []), 492 | lager:info("FSM result: ~p", [FsmResult]), 493 | ?assertMatch(ok, FsmResult), 494 | 495 | ?SUPPORT:stop(Nodes), 496 | 497 | ok. -------------------------------------------------------------------------------- /test/partisan_support.erl: -------------------------------------------------------------------------------- 1 | -module(partisan_support). 2 | 3 | -export([start/3, node_list/3]). 4 | 5 | -define(APP, unir). 6 | -define(GOSSIP_INTERVAL, 1000). 7 | -define(PEER_IP, {127, 0, 0, 1}). 8 | -define(PEER_PORT, 9000). 9 | -define(PEER_SERVICE_SERVER, partisan_peer_service_server). 10 | -define(FANOUT, 5). 11 | -define(CACHE, partisan_connection_cache). 12 | -define(PARALLELISM, 1). 13 | -define(DEFAULT_CHANNEL, undefined). 14 | -define(DEFAULT_PARTITION_KEY, undefined). 15 | -define(CHANNELS, [?DEFAULT_CHANNEL]). 16 | -define(CONNECTION_JITTER, 1000). 17 | -define(DEFAULT_PEER_SERVICE_MANAGER, partisan_default_peer_service_manager). 18 | 19 | -define(UTIL, partisan_plumtree_util). 20 | -define(DEFAULT_LAZY_TICK_PERIOD, 1000). 21 | -define(DEFAULT_EXCHANGE_TICK_PERIOD, 10000). 22 | 23 | -include_lib("common_test/include/ct.hrl"). 24 | -include_lib("eunit/include/eunit.hrl"). 25 | -include_lib("kernel/include/inet.hrl"). 26 | 27 | %% @private 28 | start(_Case, Config, Options) -> 29 | %% Launch distribution for the test runner. 30 | ct:pal("Launching Erlang distribution..."), 31 | 32 | {ok, Hostname} = inet:gethostname(), 33 | os:cmd(os:find_executable("epmd") ++ " -daemon"), 34 | case net_kernel:start([list_to_atom("runner@" ++ Hostname), shortnames]) of 35 | {ok, _} -> 36 | ok; 37 | {error, {already_started, _}} -> 38 | ok 39 | end, 40 | 41 | %% Load sasl. 42 | application:load(sasl), 43 | ok = application:set_env(sasl, 44 | sasl_error_logger, 45 | false), 46 | application:start(sasl), 47 | 48 | %% Load lager. 49 | {ok, _} = application:ensure_all_started(lager), 50 | 51 | Servers = proplists:get_value(servers, Options, []), 52 | Clients = proplists:get_value(clients, Options, []), 53 | 54 | NodeNames = lists:flatten(Servers ++ Clients), 55 | 56 | %% Start all nodes. 57 | InitializerFun = fun(Name) -> 58 | ct:pal("Starting node: ~p", [Name]), 59 | 60 | NodeConfig = [{monitor_master, true}, 61 | {startup_functions, [{code, set_path, [codepath()]}]}], 62 | 63 | case ct_slave:start(Name, NodeConfig) of 64 | {ok, Node} -> 65 | {Name, Node}; 66 | Error -> 67 | ct:fail(Error) 68 | end 69 | end, 70 | Nodes = lists:map(InitializerFun, NodeNames), 71 | 72 | %% Load applications on all of the nodes. 73 | LoaderFun = fun({_Name, Node}) -> 74 | ct:pal("Loading applications on node: ~p", [Node]), 75 | 76 | PrivDir = code:priv_dir(?APP), 77 | NodeDir = filename:join([PrivDir, "lager", Node]), 78 | 79 | %% Manually force sasl loading, and disable the logger. 80 | ok = rpc:call(Node, application, load, [sasl]), 81 | ok = rpc:call(Node, application, set_env, 82 | [sasl, sasl_error_logger, false]), 83 | ok = rpc:call(Node, application, start, [sasl]), 84 | 85 | ok = rpc:call(Node, application, load, [partisan]), 86 | ok = rpc:call(Node, application, load, [lager]), 87 | ok = rpc:call(Node, application, set_env, [sasl, 88 | sasl_error_logger, 89 | false]), 90 | ok = rpc:call(Node, application, set_env, [lager, 91 | log_root, 92 | NodeDir]) 93 | end, 94 | lists:map(LoaderFun, Nodes), 95 | 96 | %% Configure settings. 97 | ConfigureFun = fun({Name, Node}) -> 98 | %% Configure the peer service. 99 | PeerService = proplists:get_value(partisan_peer_service_manager, Options), 100 | ct:pal("Setting peer service manager on node ~p to ~p", [Node, PeerService]), 101 | ok = rpc:call(Node, partisan_config, set, 102 | [partisan_peer_service_manager, PeerService]), 103 | 104 | MaxActiveSize = proplists:get_value(max_active_size, Options, 5), 105 | ok = rpc:call(Node, partisan_config, set, 106 | [max_active_size, MaxActiveSize]), 107 | 108 | ok = rpc:call(Node, partisan_config, set, 109 | [gossip_interval, ?GOSSIP_INTERVAL]), 110 | 111 | ok = rpc:call(Node, application, set_env, [partisan, peer_ip, ?PEER_IP]), 112 | 113 | ForwardOptions = case ?config(forward_options, Config) of 114 | undefined -> 115 | []; 116 | FO -> 117 | FO 118 | end, 119 | ct:pal("Setting forward_options to: ~p", [ForwardOptions]), 120 | ok = rpc:call(Node, partisan_config, set, [forward_options, ForwardOptions]), 121 | 122 | %% If users call partisan directly, and you want to ensure you dispatch 123 | %% using partisan and not riak core, please toggle this option. 124 | Disterl = case ?config(partisan_dispatch, Config) of 125 | true -> 126 | false; 127 | _ -> 128 | false 129 | end, 130 | ct:pal("Setting disterl to: ~p", [Disterl]), 131 | ok = rpc:call(Node, partisan_config, set, [disterl, Disterl]), 132 | 133 | BinaryPadding = case ?config(binary_padding, Config) of 134 | undefined -> 135 | false; 136 | BP -> 137 | BP 138 | end, 139 | ct:pal("Setting binary_padding to: ~p", [BinaryPadding]), 140 | ok = rpc:call(Node, partisan_config, set, [binary_padding, BinaryPadding]), 141 | 142 | Broadcast = case ?config(broadcast, Config) of 143 | undefined -> 144 | false; 145 | B -> 146 | B 147 | end, 148 | ct:pal("Setting broadcast to: ~p", [Broadcast]), 149 | ok = rpc:call(Node, partisan_config, set, [broadcast, Broadcast]), 150 | 151 | PidEncoding = case ?config(pid_encoding, Config) of 152 | undefined -> 153 | true; 154 | PE -> 155 | PE 156 | end, 157 | ct:pal("Setting pid_encoding to: ~p", [PidEncoding]), 158 | ok = rpc:call(Node, partisan_config, set, [pid_encoding, PidEncoding]), 159 | 160 | Channels = case ?config(channels, Config) of 161 | undefined -> 162 | ?CHANNELS; 163 | C -> 164 | C 165 | end, 166 | ct:pal("Setting channels to: ~p", [Channels]), 167 | ok = rpc:call(Node, partisan_config, set, [channels, Channels]), 168 | 169 | ok = rpc:call(Node, partisan_config, set, [tls, ?config(tls, Config)]), 170 | Parallelism = case ?config(parallelism, Config) of 171 | undefined -> 172 | ?PARALLELISM; 173 | P -> 174 | P 175 | end, 176 | ct:pal("Setting parallelism to: ~p", [Parallelism]), 177 | ok = rpc:call(Node, partisan_config, set, [parallelism, Parallelism]), 178 | 179 | Servers = proplists:get_value(servers, Options, []), 180 | Clients = proplists:get_value(clients, Options, []), 181 | 182 | %% Configure servers. 183 | case lists:member(Name, Servers) of 184 | true -> 185 | ok = rpc:call(Node, partisan_config, set, [tag, server]), 186 | ok = rpc:call(Node, partisan_config, set, [tls_options, ?config(tls_server_opts, Config)]); 187 | false -> 188 | ok 189 | end, 190 | 191 | %% Configure clients. 192 | case lists:member(Name, Clients) of 193 | true -> 194 | ok = rpc:call(Node, partisan_config, set, [tag, client]), 195 | ok = rpc:call(Node, partisan_config, set, [tls_options, ?config(tls_client_opts, Config)]); 196 | false -> 197 | ok 198 | end 199 | end, 200 | lists:foreach(ConfigureFun, Nodes), 201 | 202 | ct:pal("Starting nodes."), 203 | 204 | StartFun = fun({_Name, Node}) -> 205 | %% Start partisan. 206 | {ok, _} = rpc:call(Node, application, ensure_all_started, [partisan]), 207 | %% Start a dummy registered process that saves in the environment 208 | %% whatever message it gets, it will only do this *x* amount of times 209 | %% *x* being the number of nodes present in the cluster 210 | Pid = rpc:call(Node, erlang, spawn, 211 | [fun() -> 212 | lists:foreach(fun(_) -> 213 | receive 214 | {store, N} -> 215 | %% save the number in the environment 216 | application:set_env(partisan, forward_message_test, N) 217 | end 218 | end, lists:seq(1, length(NodeNames))) 219 | end]), 220 | true = rpc:call(Node, erlang, register, [store_proc, Pid]), 221 | ct:pal("registered store_proc on pid ~p, node ~p", 222 | [Pid, Node]) 223 | end, 224 | lists:foreach(StartFun, Nodes), 225 | 226 | ct:pal("Clustering nodes."), 227 | lists:foreach(fun(Node) -> cluster(Node, Nodes, Options, Config) end, Nodes), 228 | 229 | ct:pal("Partisan fully initialized."), 230 | 231 | Nodes. 232 | 233 | %% @private 234 | codepath() -> 235 | lists:filter(fun filelib:is_dir/1, code:get_path()). 236 | 237 | %% @private 238 | %% 239 | %% We have to cluster each node with all other nodes to compute the 240 | %% correct overlay: for instance, sometimes you'll want to establish a 241 | %% client/server topology, which requires all nodes talk to every other 242 | %% node to correctly compute the overlay. 243 | %% 244 | cluster({Name, _Node} = Myself, Nodes, Options, Config) when is_list(Nodes) -> 245 | Manager = proplists:get_value(partisan_peer_service_manager, Options), 246 | 247 | Servers = proplists:get_value(servers, Options, []), 248 | Clients = proplists:get_value(clients, Options, []), 249 | 250 | AmIServer = lists:member(Name, Servers), 251 | AmIClient = lists:member(Name, Clients), 252 | 253 | OtherNodes = case Manager of 254 | partisan_default_peer_service_manager -> 255 | %% Omit just ourselves. 256 | omit([Name], Nodes); 257 | partisan_amqp_peer_service_manager -> 258 | %% Omit just ourselves. 259 | omit([Name], Nodes); 260 | partisan_client_server_peer_service_manager -> 261 | case {AmIServer, AmIClient} of 262 | {true, false} -> 263 | %% If I'm a server, I connect to both 264 | %% clients and servers! 265 | omit([Name], Nodes); 266 | {false, true} -> 267 | %% I'm a client, pick servers. 268 | omit(Clients, Nodes); 269 | {_, _} -> 270 | omit([Name], Nodes) 271 | end; 272 | partisan_hyparview_peer_service_manager -> 273 | case {AmIServer, AmIClient} of 274 | {true, false} -> 275 | %% If I'm a server, I connect to both 276 | %% clients and servers! 277 | omit([Name], Nodes); 278 | {false, true} -> 279 | %% I'm a client, pick servers. 280 | omit(Clients, Nodes); 281 | {_, _} -> 282 | omit([Name], Nodes) 283 | end 284 | end, 285 | lists:map(fun(OtherNode) -> cluster(Myself, OtherNode, Config) end, OtherNodes). 286 | cluster({_, Node}, {_, OtherNode}, Config) -> 287 | PeerPort = rpc:call(OtherNode, 288 | partisan_config, 289 | get, 290 | [peer_port, ?PEER_PORT]), 291 | Parallelism = case ?config(parallelism, Config) of 292 | undefined -> 293 | 1; 294 | P -> 295 | P 296 | end, 297 | Channels = case ?config(channels, Config) of 298 | undefined -> 299 | []; 300 | C -> 301 | C 302 | end, 303 | JoinMethod = case ?config(sync_join, Config) of 304 | undefined -> 305 | join; 306 | true -> 307 | sync_join 308 | end, 309 | ct:pal("Joining node: ~p to ~p at port ~p", [Node, OtherNode, PeerPort]), 310 | ok = rpc:call(Node, 311 | partisan_peer_service, 312 | JoinMethod, 313 | [#{name => OtherNode, 314 | listen_addrs => [#{ip => {127, 0, 0, 1}, port => PeerPort}], 315 | channels => Channels, 316 | parallelism => Parallelism}]). 317 | 318 | omit(OmitNameList, Nodes0) -> 319 | FoldFun = fun({Name, _Node} = N, Nodes) -> 320 | case lists:member(Name, OmitNameList) of 321 | true -> 322 | Nodes; 323 | false -> 324 | Nodes ++ [N] 325 | end 326 | end, 327 | lists:foldl(FoldFun, [], Nodes0). 328 | 329 | %% @private 330 | node_list(0, _Name, _Config) -> 331 | []; 332 | node_list(N, Name, Config) -> 333 | [ list_to_atom(string:join([Name, 334 | integer_to_list(?config(hash, Config)), 335 | integer_to_list(X)], 336 | "_")) || 337 | X <- lists:seq(1, N) ]. -------------------------------------------------------------------------------- /test/prop_unir_vnode.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2017 Christopher S. Meiklejohn. All Rights Reserved. 4 | %% 5 | %% ------------------------------------------------------------------- 6 | 7 | -module(prop_unir_vnode). 8 | -author("Christopher S. Meiklejohn "). 9 | 10 | -include_lib("proper/include/proper.hrl"). 11 | 12 | -compile([export_all]). 13 | 14 | %% Application under test. 15 | -define(APP, unir). %% The name of the top-level application that requests 16 | %% should be issued to using the RPC mechanism. 17 | 18 | %% Configuration parameters. 19 | -define(PERFORM_BYZANTINE_NODE_FAULTS, false). %% Whether or not we should use cluster-specific byzantine faults. 20 | %% ie. data loss bugs, bit flips, etc. 21 | -define(MONOTONIC_READS, false). %% Do we assume the system provides monotonic read? 22 | -define(STRONG_READS, false). %% Do we assume the system provides strong reads? 23 | -define(NUM_NODES, 3). 24 | -define(NODE_DEBUG, true). %% Should we print out debugging information? 25 | 26 | %% Helpers. 27 | -define(ETS, prop_unir). 28 | -define(NAME, fun(Name) -> [{_, NodeName}] = ets:lookup(?ETS, Name), NodeName end). 29 | 30 | %%%=================================================================== 31 | %%% Generators 32 | %%%=================================================================== 33 | 34 | key() -> 35 | oneof([<<"key">>]). 36 | 37 | value() -> 38 | ?LET(Binary, binary(), 39 | {erlang:timestamp(), Binary}). 40 | 41 | node_name() -> 42 | ?LET(Names, names(), oneof(Names)). 43 | 44 | names() -> 45 | NameFun = fun(N) -> 46 | list_to_atom("node_" ++ integer_to_list(N)) 47 | end, 48 | lists:map(NameFun, lists:seq(1, ?NUM_NODES)). 49 | 50 | %%%=================================================================== 51 | %%% Node Functions 52 | %%%=================================================================== 53 | 54 | %% What node-specific operations should be called. 55 | node_commands() -> 56 | ByzantineCommands = case ?PERFORM_BYZANTINE_NODE_FAULTS of 57 | true -> 58 | [ 59 | {call, ?MODULE, induce_byzantine_disk_loss_fault, [node_name(), key()]}, 60 | {call, ?MODULE, induce_byzantine_bit_flip_fault, [node_name(), key(), binary()]} 61 | ]; 62 | false -> 63 | [] 64 | end, 65 | 66 | [ 67 | {call, ?MODULE, read_object, [node_name(), key()]}, 68 | {call, ?MODULE, write_object, [node_name(), key(), value()]} 69 | ] ++ 70 | 71 | ByzantineCommands. 72 | 73 | %% What should the initial node state be. 74 | node_initial_state() -> 75 | {dict:new(), dict:new()}. 76 | 77 | %% Names of the node functions so we kow when we can dispatch to the node 78 | %% pre- and postconditions. 79 | node_functions() -> 80 | lists:map(fun({call, _Mod, Fun, _Args}) -> Fun end, node_commands()). 81 | 82 | %% Postconditions for node commands. 83 | node_postcondition({DatabaseState, ClientState}, {call, ?MODULE, read_object, [_Node, Key]}, {ok, Value}) -> 84 | node_debug("read_object: returned key ~p value ~p", [Key, Value]), 85 | %% Only pass acknowledged reads. 86 | StartingValues = [not_found], 87 | 88 | case dict:find(Key, DatabaseState) of 89 | {ok, KeyValues} -> 90 | ValueList = StartingValues ++ KeyValues, 91 | node_debug("read_object: looking for ~p in ~p", [Value, ValueList]), 92 | ItWasWritten = lists:member(Value, ValueList), 93 | node_debug("read_object: value read in write history: ~p", [ItWasWritten]), 94 | case ItWasWritten of 95 | true -> 96 | case ?MONOTONIC_READS of 97 | true -> 98 | is_monotonic_read(Key, Value, ClientState); 99 | false -> 100 | case ?STRONG_READS of 101 | true -> 102 | MostRecentValue = hd(lists:reverse(ValueList)), 103 | MostRecentValue =:= Value; 104 | false -> 105 | true 106 | end 107 | end; 108 | false -> 109 | false 110 | end; 111 | _ -> 112 | case Value of 113 | not_found -> 114 | node_debug("read_object: object wasn't written yet, not_found might be OK", []), 115 | is_monotonic_read(Key, Value, ClientState); 116 | _ -> 117 | node_debug("read_object: consistency violation, object was not written but was read", []), 118 | false 119 | end 120 | end; 121 | node_postcondition({_DatabaseState, _ClientState}, {call, ?MODULE, read_object, [Node, Key]}, {error, timeout}) -> 122 | node_debug("read_object ~p ~p timeout", [Node, Key]), 123 | %% Consider timeouts as failures for now. 124 | false; 125 | node_postcondition({_DatabaseState, _ClientState}, {call, ?MODULE, induce_byzantine_bit_flip_fault, [Node, Key, Value]}, ok) -> 126 | node_debug("induce_byzantine_bit_flip_fault: ~p ~p ~p", [Node, Key, Value]), 127 | true; 128 | node_postcondition({_DatabaseState, _ClientState}, {call, ?MODULE, induce_byzantine_disk_loss_fault, [Node, Key]}, ok) -> 129 | node_debug("induce_byzantine_disk_loss_fault: ~p ~p", [Node, Key]), 130 | true; 131 | node_postcondition({_DatabaseState, _ClientState}, {call, ?MODULE, write_object, [_Node, _Key, _Value]}, {ok, _Value}) -> 132 | node_debug("write_object returned ok", []), 133 | %% Only pass acknowledged writes. 134 | true; 135 | node_postcondition({_DatabaseState, _ClientState}, {call, ?MODULE, write_object, [Node, Key, _Value]}, {error, timeout}) -> 136 | node_debug("write_object ~p ~p timeout", [Node, Key]), 137 | %% Consider timeouts as failures for now. 138 | false. 139 | 140 | 141 | %% Next state. 142 | 143 | %% Failures don't modify what the state should be. 144 | node_next_state({DatabaseState, ClientState}, _Res, {call, ?MODULE, induce_byzantine_disk_loss_fault, [_Node, _Key]}) -> 145 | {DatabaseState, ClientState}; 146 | 147 | node_next_state({DatabaseState, ClientState}, _Res, {call, ?MODULE, induce_byzantine_bit_flip_fault, [_Node, _Key, _Value]}) -> 148 | {DatabaseState, ClientState}; 149 | 150 | %% Reads don't modify state. 151 | %% TODO: Advance client state on read. 152 | node_next_state({DatabaseState, ClientState}, _Res, {call, ?MODULE, read_object, [_Node, _Key]}) -> 153 | {DatabaseState, ClientState}; 154 | 155 | %% All we know is that the write was potentially acknowledged at some of the nodes. 156 | node_next_state({DatabaseState0, ClientState0}, {error, timeout}, {call, ?MODULE, write_object, [_Node, Key, Value]}) -> 157 | DatabaseState = dict:append_list(Key, [Value], DatabaseState0), 158 | {DatabaseState, ClientState0}; 159 | 160 | %% Write succeeded at all nodes. 161 | node_next_state({DatabaseState0, ClientState0}, _Res, {call, ?MODULE, write_object, [_Node, Key, Value]}) -> 162 | DatabaseState = dict:append_list(Key, [Value], DatabaseState0), 163 | ClientState = dict:store(Key, Value, ClientState0), 164 | {DatabaseState, ClientState}. 165 | 166 | %% Precondition. 167 | node_precondition({_DatabaseState, _ClientState}, {call, _Mod, induce_byzantine_disk_loss_fault, [_Node, _Key]}) -> 168 | true; 169 | node_precondition({_DatabaseState, _ClientState}, {call, _Mod, induce_byzantine_bit_flip_fault, [_Node, _Key, _Value]}) -> 170 | true; 171 | node_precondition({_DatabaseState, _ClientState}, {call, _Mod, read_object, [_Node, _Key]}) -> 172 | true; 173 | node_precondition({_DatabaseState, _ClientState}, {call, _Mod, write_object, [_Node, _Key, _Value]}) -> 174 | true. 175 | 176 | %%%=================================================================== 177 | %%% Helper Functions 178 | %%%=================================================================== 179 | 180 | %% Should we do node debugging? 181 | node_debug(Line, Args) -> 182 | case ?NODE_DEBUG of 183 | true -> 184 | lager:info(Line, Args); 185 | false -> 186 | ok 187 | end. 188 | 189 | %% Determine if a read is monotonic or not? 190 | is_monotonic_read(Key, not_found, ClientState) -> 191 | case dict:find(Key, ClientState) of 192 | {ok, {_LastReadTimestamp, _LastReadBinary} = LastReadValue} -> 193 | node_debug("got not_found, should have read ~p", [LastReadValue]), 194 | false; 195 | _ -> 196 | true 197 | end; 198 | %% Old style tests. 199 | is_monotonic_read(_Key, Binary, _ClientState) when is_binary(Binary) -> 200 | true; 201 | is_monotonic_read(Key, {ReadTimestamp, _ReadBinary} = ReadValue, ClientState) -> 202 | case dict:find(Key, ClientState) of 203 | {ok, {LastReadTimestamp, _LastReadBinary} = LastReadValue} -> 204 | Result = timer:now_diff(ReadTimestamp, LastReadTimestamp) >= 0, 205 | node_debug("last read ~p now read ~p, result ~p", [LastReadValue, ReadValue, Result]), 206 | Result; 207 | _ -> 208 | true 209 | end. 210 | 211 | induce_byzantine_disk_loss_fault(Node, Key) -> 212 | rpc:call(?NAME(Node), ?APP, inject_failure, [Key, undefined]). 213 | 214 | induce_byzantine_bit_flip_fault(Node, Key, Value) -> 215 | rpc:call(?NAME(Node), ?APP, inject_failure, [Key, Value]). 216 | 217 | write_object(Node, Key, Value) -> 218 | node_debug("write_object: node ~p key ~p value ~p", [Node, Key, Value]), 219 | rpc:call(?NAME(Node), ?APP, fsm_put, [Key, Value]). 220 | 221 | read_object(Node, Key) -> 222 | node_debug("read_object: node ~p key ~p", [Node, Key]), 223 | rpc:call(?NAME(Node), ?APP, fsm_get, [Key]). -------------------------------------------------------------------------------- /test/proper-regressions.consult: -------------------------------------------------------------------------------- 1 | 2 | {prop_unir,prop_sequential, 3 | [[{set,{var,1}, 4 | {call,prop_unir,write_object, 5 | [node_3,<<"key">>,{{1528,254624,355698},<<>>}]}}]]}. 6 | 7 | {prop_unir,prop_sequential, 8 | [[{set,{var,1},{call,prop_unir,read_object,[node_1,<<"key">>]}}, 9 | {set,{var,2},{call,prop_unir,read_object,[node_2,<<"key">>]}}, 10 | {set,{var,3}, 11 | {call,prop_unir,induce_sync_partition,[node_1,node_3]}}, 12 | {set,{var,4}, 13 | {call,prop_unir,induce_sync_partition,[node_1,node_2]}}, 14 | {set,{var,5}, 15 | {call,prop_unir,write_object, 16 | [node_2,<<"key">>,{{1528,268940,839269},<<215,26>>}]}}, 17 | {set,{var,6},{call,prop_unir,read_object,[node_3,<<"key">>]}}, 18 | {set,{var,7}, 19 | {call,prop_unir,resolve_sync_partition,[node_1,node_3]}}, 20 | {set,{var,8}, 21 | {call,prop_unir,write_object, 22 | [node_2,<<"key">>, 23 | {{1528,268940,841591},<<24,99,88,246,70,37>>}]}}, 24 | {set,{var,9},{call,prop_unir,read_object,[node_3,<<"key">>]}}, 25 | {set,{var,10}, 26 | {call,prop_unir,write_object, 27 | [node_3,<<"key">>, 28 | {{1528,268940,843086}, 29 | <<151,119,205,24,47,253,35,157,34,218,84,177>>}]}}]]}. 30 | 31 | {prop_unir,prop_sequential, 32 | [[{set,{var,1}, 33 | {call,prop_unir,induce_sync_partition,[node_3,node_1]}}, 34 | {set,{var,2}, 35 | {call,prop_unir,write_object, 36 | [node_1,<<"key">>,{{1528,392754,379054},<<>>}]}}, 37 | {set,{var,3}, 38 | {call,prop_unir,induce_byzantine_fault,[node_1,<<"key">>]}}, 39 | {set,{var,4},{call,prop_unir,read_object,[node_3,<<"key">>]}}]]}. 40 | 41 | {prop_unir,prop_sequential, 42 | [[{set,{var,1}, 43 | {call,prop_unir,write_object, 44 | [node_1,<<"key">>, 45 | {{1528,523368,74593}, 46 | <<172,103,177,59,235,214,85,178,131,111,116,157>>}]}}, 47 | {set,{var,2},{call,prop_unir,read_object,[node_2,<<"key">>]}}, 48 | {set,{var,3},{call,prop_unir,read_object,[node_1,<<"key">>]}}, 49 | {set,{var,4}, 50 | {call,prop_unir,induce_byzantine_fault,[node_3,<<"key">>]}}, 51 | {set,{var,5}, 52 | {call,prop_unir,induce_sync_partition,[node_2,node_3]}}, 53 | {set,{var,6}, 54 | {call,prop_unir,resolve_sync_partition,[node_3,node_2]}}, 55 | {set,{var,7},{call,prop_unir,read_object,[node_2,<<"key">>]}}, 56 | {set,{var,8}, 57 | {call,prop_unir,induce_byzantine_fault,[node_1,<<"key">>]}}, 58 | {set,{var,9}, 59 | {call,prop_unir,induce_byzantine_fault,[node_3,<<"key">>]}}, 60 | {set,{var,10}, 61 | {call,prop_unir,induce_sync_partition,[node_3,node_1]}}]]}. 62 | 63 | {prop_unir,prop_sequential, 64 | [[{set,{var,1},{call,prop_unir,read_object,[node_2,<<"key">>]}}, 65 | {set,{var,2}, 66 | {call,prop_unir,induce_byzantine_fault,[node_1,<<"key">>]}}, 67 | {set,{var,3},{call,prop_unir,read_object,[node_2,<<"key">>]}}]]}. 68 | 69 | {prop_unir,prop_sequential, 70 | [[{set,{var,1}, 71 | {call,prop_unir,write_object, 72 | [node_1,<<"key">>,{{1529,728744,503235},<<"`e">>}]}}, 73 | {set,{var,2}, 74 | {call,prop_unir,induce_byzantine_bit_flip_fault, 75 | [node_3,<<"key">>,<<"î">>]}}, 76 | {set,{var,3},{call,prop_unir,read_object,[node_1,<<"key">>]}}]]}. 77 | 78 | {prop_unir,prop_sequential, 79 | [[{set,{var,1},{call,prop_unir,read_object,[node_1,<<"key">>]}}, 80 | {set,{var,2}, 81 | {call,prop_unir,induce_byzantine_message_corruption_fault, 82 | [node_2,node_3,{{1529,737690,533730},<<"`G">>}]}}, 83 | {set,{var,3}, 84 | {call,prop_unir,write_object, 85 | [node_2,<<"key">>, 86 | {{1529,737690,535014},<<"ýoû">>}]}}]]}. 87 | 88 | {prop_unir,prop_sequential, 89 | [[{set,{var,1}, 90 | {call,prop_unir,induce_byzantine_bit_flip_fault, 91 | [node_2,<<"key">>,<<184,198,165,14>>]}}, 92 | {set,{var,2}, 93 | {call,prop_unir,induce_byzantine_disk_loss_fault, 94 | [node_2,<<"key">>]}}, 95 | {set,{var,3}, 96 | {call,prop_unir,induce_byzantine_bit_flip_fault, 97 | [node_2,<<"key">>,<<150,22,165,102,25>>]}}, 98 | {set,{var,4}, 99 | {call,prop_unir,induce_byzantine_disk_loss_fault, 100 | [node_3,<<"key">>]}}, 101 | {set,{var,5},{call,prop_unir,read_object,[node_2,<<"key">>]}}, 102 | {set,{var,6}, 103 | {call,prop_unir,write_object, 104 | [node_2,<<"key">>, 105 | {{1531,313406,774094}, 106 | <<13,3,139,45,42,154,24,104>>}]}}, 107 | {set,{var,7}, 108 | {call,prop_unir,induce_byzantine_message_corruption_fault, 109 | [node_1,node_2,{{1531,313406,776285},<<220,23>>}]}}, 110 | {set,{var,8},{call,prop_unir,read_object,[node_2,<<"key">>]}}]]}. 111 | 112 | {prop_unir,prop_sequential, 113 | [[{set,{var,1}, 114 | {call,prop_unir,induce_sync_partition,[node_2,node_1]}}, 115 | {set,{var,2}, 116 | {call,prop_unir,induce_byzantine_disk_loss_fault, 117 | [node_1,<<"key">>]}}, 118 | {set,{var,3}, 119 | {call,prop_unir,induce_byzantine_bit_flip_fault, 120 | [node_2,<<"key">>,<<191,127,130,188,67>>]}}, 121 | {set,{var,4}, 122 | {call,prop_unir,induce_byzantine_bit_flip_fault, 123 | [node_2,<<"key">>, 124 | <<169,122,19,124,18,136,241,167,60,219,26,37,40, 125 | 234,217,127,178,225,65,83,85,40,129,236,11,13>>]}}, 126 | {set,{var,5},{call,prop_unir,read_object,[node_1,<<"key">>]}}, 127 | {set,{var,6}, 128 | {call,prop_unir,induce_sync_partition,[node_2,node_3]}}, 129 | {set,{var,7}, 130 | {call,prop_unir,write_object, 131 | [node_2,<<"key">>, 132 | {{1532,204262,956872}, 133 | <<44,16,99,145,76,142,245,24,135,109,236,250,99, 134 | 249,44,138,3,235,216,127,88,151>>}]}}, 135 | {set,{var,8}, 136 | {call,prop_unir,induce_byzantine_disk_loss_fault, 137 | [node_1,<<"key">>]}}, 138 | {set,{var,9}, 139 | {call,prop_unir,induce_byzantine_bit_flip_fault, 140 | [node_1,<<"key">>, 141 | <<98,141,208,167,84,245,197,43,67,35,176,81,5,34,7, 142 | 14,122,120,24,241,135,126,118,19,136,117>>]}}, 143 | {set,{var,10}, 144 | {call,prop_unir,induce_byzantine_disk_loss_fault, 145 | [node_2,<<"key">>]}}, 146 | {set,{var,11},{call,prop_unir,read_object,[node_2,<<"key">>]}}, 147 | {set,{var,12}, 148 | {call,prop_unir,resolve_sync_partition,[node_1,node_2]}}, 149 | {set,{var,13},{call,prop_unir,read_object,[node_1,<<"key">>]}}, 150 | {set,{var,14}, 151 | {call,prop_unir,induce_byzantine_disk_loss_fault, 152 | [node_2,<<"key">>]}}, 153 | {set,{var,15}, 154 | {call,prop_unir,write_object, 155 | [node_3,<<"key">>,{{1532,204262,995316},<<"[¶Ü/">>}]}}, 156 | {set,{var,16}, 157 | {call,prop_unir,induce_byzantine_disk_loss_fault, 158 | [node_3,<<"key">>]}}, 159 | {set,{var,17}, 160 | {call,prop_unir,resolve_sync_partition,[node_2,node_3]}}, 161 | {set,{var,18},{call,prop_unir,read_object,[node_2,<<"key">>]}}, 162 | {set,{var,19}, 163 | {call,prop_unir,induce_byzantine_disk_loss_fault, 164 | [node_2,<<"key">>]}}, 165 | {set,{var,20},{call,prop_unir,read_object,[node_1,<<"key">>]}}, 166 | {set,{var,21}, 167 | {call,prop_unir,induce_byzantine_disk_loss_fault, 168 | [node_1,<<"key">>]}}, 169 | {set,{var,22},{call,prop_unir,read_object,[node_1,<<"key">>]}}]]}. 170 | 171 | {prop_unir,prop_sequential, 172 | [[{set,{var,1},{call,prop_unir,read_object,[node_3,<<"key">>]}}, 173 | {set,{var,2}, 174 | {call,prop_unir,induce_byzantine_disk_loss_fault, 175 | [node_1,<<"key">>]}}, 176 | {set,{var,3}, 177 | {call,prop_unir,write_object, 178 | [node_3,<<"key">>, 179 | {{1532,225382,870009}, 180 | <<4,118,165,143,217,102,89,45,155,156,6,66,50, 181 | 236,160,89,54,51,161,154,110,140,143,205,55, 182 | 124,142>>}]}}, 183 | {set,{var,4}, 184 | {call,prop_unir,induce_byzantine_bit_flip_fault, 185 | [node_3,<<"key">>, 186 | <<21,9,228,216,1,184,1,158,54,51,2,28,152,10,33>>]}}, 187 | {set,{var,5},{call,prop_unir,read_object,[node_1,<<"key">>]}}, 188 | {set,{var,6},{call,prop_unir,read_object,[node_3,<<"key">>]}}, 189 | {set,{var,7}, 190 | {call,prop_unir,write_object, 191 | [node_2,<<"key">>, 192 | {{1532,225382,875343}, 193 | <<3,3,37,25,87,111,145,249,92,195,115,244,31,98, 194 | 191,10,62,155,105,72,216,38,80,141,11,48,39,215>>}]}}, 195 | {set,{var,8}, 196 | {call,prop_unir,induce_byzantine_disk_loss_fault, 197 | [node_1,<<"key">>]}}, 198 | {set,{var,9}, 199 | {call,prop_unir,induce_byzantine_disk_loss_fault, 200 | [node_3,<<"key">>]}}]]}. 201 | -------------------------------------------------------------------------------- /test/throughput_SUITE.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2017 Christopher S. Meiklejohn. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | %% 21 | 22 | -module(throughput_SUITE). 23 | -author("Christopher S. Meiklejohn "). 24 | 25 | %% common_test callbacks 26 | -export([suite/0, 27 | init_per_suite/1, 28 | end_per_suite/1, 29 | init_per_testcase/2, 30 | end_per_testcase/2, 31 | all/0, 32 | groups/0, 33 | init_per_group/2]). 34 | 35 | %% tests 36 | -compile([export_all]). 37 | 38 | -include_lib("common_test/include/ct.hrl"). 39 | -include_lib("eunit/include/eunit.hrl"). 40 | -include_lib("kernel/include/inet.hrl"). 41 | 42 | -define(SUPPORT, support). 43 | 44 | %% =================================================================== 45 | %% common_test callbacks 46 | %% =================================================================== 47 | 48 | suite() -> 49 | [{timetrap, {hours, 10}}]. 50 | 51 | init_per_suite(_Config) -> 52 | _Config. 53 | 54 | end_per_suite(_Config) -> 55 | _Config. 56 | 57 | init_per_testcase(Case, Config) -> 58 | ct:pal("Beginning test case ~p", [Case]), 59 | [{hash, erlang:phash2({Case, Config})}|Config]. 60 | 61 | end_per_testcase(Case, Config) -> 62 | ct:pal("Ending test case ~p", [Case]), 63 | Config. 64 | 65 | init_per_group(disterl, Config) -> 66 | Config; 67 | 68 | init_per_group(partisan, Config) -> 69 | [{pid_encoding, false}, {partisan_dispatch, true}] ++ Config; 70 | 71 | init_per_group(partisan_with_parallelism, Config) -> 72 | parallelism() ++ init_per_group(partisan, Config); 73 | init_per_group(partisan_with_channels, Config) -> 74 | channels() ++ init_per_group(partisan, Config); 75 | init_per_group(partisan_with_monotonic_channels, Config) -> 76 | monotonic_channels() ++ init_per_group(partisan, Config); 77 | init_per_group(partisan_with_partitioned_parallelism, Config) -> 78 | parallelism() ++ [{vnode_partitioning, true}] ++ init_per_group(partisan, Config); 79 | init_per_group(partisan_with_partitioned_parallelism_and_channels, Config) -> 80 | channels() ++ parallelism() ++ [{vnode_partitioning, true}] ++ init_per_group(partisan, Config); 81 | init_per_group(partisan_with_partitioned_parallelism_and_channels_and_monotonic_channels, Config) -> 82 | monotonic_channels() ++ parallelism() ++ [{vnode_partitioning, true}] ++ init_per_group(partisan, Config); 83 | init_per_group(partisan_with_binary_padding, Config) -> 84 | [{binary_padding, true}] ++ init_per_group(partisan, Config); 85 | init_per_group(partisan_with_vnode_partitioning, Config) -> 86 | [{vnode_partitioning, true}] ++ init_per_group(partisan, Config); 87 | 88 | init_per_group(bench, Config) -> 89 | ?SUPPORT:bench_config() ++ Config; 90 | 91 | init_per_group(_, Config) -> 92 | Config. 93 | 94 | end_per_group(_, _Config) -> 95 | ok. 96 | 97 | all() -> 98 | [ 99 | {group, default, []} 100 | ]. 101 | 102 | groups() -> 103 | [ 104 | {bench, [], 105 | [%% bench_test, 106 | fsm_performance_test, 107 | partisan_performance_test, 108 | echo_performance_test]}, 109 | 110 | {default, [], 111 | [{group, bench}] }, 112 | 113 | {disterl, [], 114 | [{group, bench}] }, 115 | 116 | {partisan, [], 117 | [{group, bench}]}, 118 | 119 | {partisan_with_parallelism, [], 120 | [{group, bench}]}, 121 | 122 | {partisan_with_channels, [], 123 | [{group, bench}]}, 124 | 125 | {partisan_with_monotonic_channels, [], 126 | [{group, bench}]}, 127 | 128 | {partisan_with_partitioned_parallelism, [], 129 | [{group, bench}]}, 130 | 131 | {partisan_with_partitioned_parallelism_and_channels, [], 132 | [{group, bench}]}, 133 | 134 | {partisan_with_partitioned_parallelism_and_channels_and_monotonic_channels, [], 135 | [{group, bench}]}, 136 | 137 | {partisan_with_binary_padding, [], 138 | [{group, bench}]}, 139 | 140 | {partisan_with_vnode_partitioning, [], 141 | [{group, bench}]} 142 | ]. 143 | 144 | %% =================================================================== 145 | %% Tests. 146 | %% =================================================================== 147 | 148 | partisan_performance_test(Config) -> 149 | Manager = partisan_default_peer_service_manager, 150 | 151 | Nodes = case os:getenv("PARTISAN_INIT", false) of 152 | "true" -> 153 | %% Specify servers. 154 | Servers = partisan_support:node_list(1, "server", Config), 155 | 156 | %% Specify clients. 157 | Clients = partisan_support:node_list(1, "client", Config), 158 | 159 | %% Start nodes. 160 | partisan_support:start(partisan_performance_test, Config, 161 | [{partisan_peer_service_manager, Manager}, 162 | {servers, Servers}, 163 | {clients, Clients}]); 164 | _ -> 165 | ?SUPPORT:start(partisan_performance_test, 166 | Config, 167 | [{num_nodes, 3}, 168 | {partisan_peer_service_manager, Manager}]) 169 | end, 170 | 171 | App = case os:getenv("PARTISAN_INIT", false) of 172 | "true" -> 173 | partisan; 174 | _ -> 175 | riak_core 176 | end, 177 | 178 | ct:pal("Configuration: ~p", [Config]), 179 | 180 | %% Pause for clustering. 181 | timer:sleep(1000), 182 | 183 | [{_, Node1}, {_, Node2}|_] = Nodes, 184 | 185 | Profile = case os:getenv("PROFILE", false) of 186 | "true" -> 187 | ct:pal("Enabling profiling!"), 188 | true; 189 | _ -> 190 | ct:pal("Disabling profiling!"), 191 | false 192 | end, 193 | 194 | case Profile of 195 | true -> 196 | rpc:call(Node1, eprof, start, []); 197 | _ -> 198 | ok 199 | end, 200 | 201 | %% One process per connection. 202 | Concurrency = case os:getenv("CONCURRENCY", "1") of 203 | undefined -> 204 | 1; 205 | C -> 206 | list_to_integer(C) 207 | end, 208 | 209 | %% Latency. 210 | Latency = case os:getenv("LATENCY", "0") of 211 | undefined -> 212 | 0; 213 | L -> 214 | list_to_integer(L) 215 | end, 216 | 217 | %% Size. 218 | Size = case os:getenv("SIZE", "1024") of 219 | undefined -> 220 | 0; 221 | S -> 222 | list_to_integer(S) 223 | end, 224 | 225 | %% Parallelism. 226 | Parallelism = case rpc:call(Node1, partisan_config, get, [parallelism]) of 227 | undefined -> 228 | 1; 229 | P -> 230 | P 231 | end, 232 | 233 | NumMessages = 1000, 234 | BenchPid = self(), 235 | BytesSize = Size * 1024, 236 | 237 | %% Prime a binary at each node. 238 | ct:pal("Generating binaries!"), 239 | EchoBinary = rand_bits(BytesSize * 8), 240 | 241 | %% Spawn processes to send receive messages on node 1. 242 | ct:pal("Spawning processes."), 243 | SenderPids = lists:map(fun(SenderNum) -> 244 | ReceiverFun = fun() -> 245 | receiver(Manager, BenchPid, NumMessages) 246 | end, 247 | ReceiverPid = rpc:call(Node2, erlang, spawn, [ReceiverFun]), 248 | 249 | SenderFun = fun() -> 250 | init_sender(EchoBinary, Manager, Node2, ReceiverPid, SenderNum, NumMessages) 251 | end, 252 | SenderPid = rpc:call(Node1, erlang, spawn, [SenderFun]), 253 | SenderPid 254 | end, lists:seq(1, Concurrency)), 255 | 256 | %% Start bench. 257 | ProfileFun = fun() -> 258 | %% Start sending. 259 | lists:foreach(fun(SenderPid) -> 260 | SenderPid ! start 261 | end, SenderPids), 262 | 263 | %% Wait for them all. 264 | bench_receiver(Concurrency) 265 | end, 266 | {Time, _Value} = timer:tc(ProfileFun), 267 | 268 | %% Write results. 269 | RootDir = root_dir(Config), 270 | ResultsFile = RootDir ++ "results.csv", 271 | ct:pal("Writing results to: ~p", [ResultsFile]), 272 | {ok, FileHandle} = file:open(ResultsFile, [append]), 273 | Backend = case ?config(partisan_dispatch, Config) of 274 | true -> 275 | partisan; 276 | _ -> 277 | disterl 278 | end, 279 | NumChannels = case ?config(channels, Config) of 280 | undefined -> 281 | 1; 282 | [] -> 283 | 1; 284 | List -> 285 | length(List) 286 | end, 287 | Partitioned = case ?config(vnode_partitioning, Config) of 288 | undefined -> 289 | false; 290 | VP -> 291 | VP 292 | end, 293 | io:format(FileHandle, "~p,~p,~p,~p,~p,~p,~p,~p,~p,~p,~p~n", [App, Backend, Concurrency, NumChannels, false, Parallelism, Partitioned, BytesSize, NumMessages, Latency, Time]), 294 | file:close(FileHandle), 295 | 296 | case Profile of 297 | true -> 298 | ProfileFile = RootDir ++ "eprof/" ++ atom_to_list(App) ++ "-" ++ atom_to_list(Backend) ++ "-" ++ integer_to_list(Parallelism), 299 | ct:pal("Outputting profile results to file: ~p", [ProfileFile]), 300 | rpc:call(Node1, eprof, stop_profiling, []), 301 | rpc:call(Node1, eprof, log, [ProfileFile]), 302 | rpc:call(Node1, eprof, analyze, []); 303 | _ -> 304 | ok 305 | end, 306 | 307 | ct:pal("Time: ~p", [Time]), 308 | 309 | %% Stop nodes. 310 | ?SUPPORT:stop(Nodes), 311 | 312 | ok. 313 | 314 | echo_performance_test(Config) -> 315 | Manager = partisan_default_peer_service_manager, 316 | 317 | Nodes = ?SUPPORT:start(echo_performance_test, 318 | Config, 319 | [{num_nodes, 3}, 320 | {partisan_peer_service_manager, Manager}]), 321 | 322 | ct:pal("Configuration: ~p", [Config]), 323 | 324 | %% Pause for clustering. 325 | timer:sleep(1000), 326 | 327 | [{_, Node1}|_] = Nodes, 328 | 329 | %% One process per connection. 330 | Concurrency = case os:getenv("CONCURRENCY", "1") of 331 | undefined -> 332 | 1; 333 | C -> 334 | list_to_integer(C) 335 | end, 336 | 337 | %% Latency. 338 | Latency = case os:getenv("LATENCY", "0") of 339 | undefined -> 340 | 0; 341 | L -> 342 | list_to_integer(L) 343 | end, 344 | 345 | %% Size. 346 | Size = case os:getenv("SIZE", "1024") of 347 | undefined -> 348 | 0; 349 | S -> 350 | list_to_integer(S) 351 | end, 352 | 353 | %% Parallelism. 354 | Parallelism = case rpc:call(Node1, partisan_config, get, [parallelism]) of 355 | undefined -> 356 | 1; 357 | P -> 358 | P 359 | end, 360 | 361 | NumMessages = 1000, 362 | BenchPid = self(), 363 | BytesSize = Size * 1024, 364 | 365 | %% Prime a binary at each node. 366 | ct:pal("Generating binaries!"), 367 | EchoBinary = rand_bits(BytesSize * 8), 368 | 369 | %% Spawn processes to send receive messages on node 1. 370 | ct:pal("Spawning processes."), 371 | SenderPids = lists:flatmap(fun({_, Node}) -> 372 | lists:map(fun(SenderNum) -> 373 | SenderFun = fun() -> 374 | init_echo_sender(BenchPid, SenderNum, EchoBinary, NumMessages) 375 | end, 376 | rpc:call(Node, erlang, spawn, [SenderFun]) 377 | end, lists:seq(1, Concurrency)) 378 | end, Nodes), 379 | 380 | %% Start bench. 381 | ProfileFun = fun() -> 382 | %% Start sending. 383 | lists:foreach(fun(SenderPid) -> 384 | SenderPid ! start 385 | end, SenderPids), 386 | 387 | %% Wait for them all. 388 | bench_receiver(length(SenderPids)) 389 | end, 390 | {Time, _Value} = timer:tc(ProfileFun), 391 | 392 | %% Write results. 393 | RootDir = root_dir(Config), 394 | ResultsFile = RootDir ++ "results.csv", 395 | ct:pal("Writing results to: ~p", [ResultsFile]), 396 | {ok, FileHandle} = file:open(ResultsFile, [append]), 397 | Backend = case ?config(partisan_dispatch, Config) of 398 | true -> 399 | partisan; 400 | _ -> 401 | disterl 402 | end, 403 | NumChannels = case ?config(channels, Config) of 404 | undefined -> 405 | 1; 406 | [] -> 407 | 1; 408 | List -> 409 | length(List) 410 | end, 411 | Partitioned = case ?config(vnode_partitioning, Config) of 412 | undefined -> 413 | false; 414 | VP -> 415 | VP 416 | end, 417 | MonotonicChannels = case ?config(monotonic_channels, Config) of 418 | undefined -> 419 | false; 420 | MC -> 421 | MC 422 | end, 423 | io:format(FileHandle, "~p,~p,~p,~p,~p,~p,~p,~p,~p,~p,~p~n", [echo, Backend, Concurrency, NumChannels, MonotonicChannels, Parallelism, Partitioned, BytesSize, NumMessages, Latency, Time]), 424 | file:close(FileHandle), 425 | 426 | ct:pal("Time: ~p", [Time]), 427 | 428 | %% Stop nodes. 429 | ?SUPPORT:stop(Nodes), 430 | 431 | ok. 432 | 433 | fsm_performance_test(Config) -> 434 | Manager = partisan_default_peer_service_manager, 435 | 436 | Nodes = ?SUPPORT:start(fsm_performance_test, 437 | Config, 438 | [{num_nodes, 3}, 439 | {partisan_peer_service_manager, Manager}]), 440 | 441 | ct:pal("Configuration: ~p", [Config]), 442 | 443 | %% Pause for clustering. 444 | timer:sleep(1000), 445 | 446 | [{_, Node1}|_] = Nodes, 447 | 448 | %% One process per connection. 449 | Concurrency = case os:getenv("CONCURRENCY", "1") of 450 | undefined -> 451 | 1; 452 | C -> 453 | list_to_integer(C) 454 | end, 455 | 456 | %% Latency. 457 | Latency = case os:getenv("LATENCY", "0") of 458 | undefined -> 459 | 0; 460 | L -> 461 | list_to_integer(L) 462 | end, 463 | 464 | %% Size. 465 | Size = case os:getenv("SIZE", "1024") of 466 | undefined -> 467 | 0; 468 | S -> 469 | list_to_integer(S) 470 | end, 471 | 472 | %% Parallelism. 473 | Parallelism = case rpc:call(Node1, partisan_config, get, [parallelism]) of 474 | undefined -> 475 | 1; 476 | P -> 477 | P 478 | end, 479 | 480 | NumMessages = 1000, 481 | BenchPid = self(), 482 | BytesSize = Size * 1024, 483 | 484 | %% Prime a binary at each node. 485 | ct:pal("Generating binaries!"), 486 | EchoBinary = rand_bits(BytesSize * 8), 487 | 488 | %% Spawn processes to send receive messages on node 1. 489 | ct:pal("Spawning processes."), 490 | SenderPids = lists:flatmap(fun({_, Node}) -> 491 | lists:map(fun(SenderNum) -> 492 | SenderFun = fun() -> 493 | init_fsm_sender(BenchPid, SenderNum, EchoBinary, NumMessages) 494 | end, 495 | rpc:call(Node, erlang, spawn, [SenderFun]) 496 | end, lists:seq(1, Concurrency)) 497 | end, Nodes), 498 | 499 | %% Start bench. 500 | ProfileFun = fun() -> 501 | %% Start sending. 502 | lists:foreach(fun(SenderPid) -> 503 | SenderPid ! start 504 | end, SenderPids), 505 | 506 | %% Wait for them all. 507 | bench_receiver(length(SenderPids)) 508 | end, 509 | {Time, Value} = timer:tc(ProfileFun), 510 | 511 | %% Write results. 512 | RootDir = root_dir(Config), 513 | ResultsFile = RootDir ++ "results.csv", 514 | ct:pal("Writing results to: ~p", [ResultsFile]), 515 | {ok, FileHandle} = file:open(ResultsFile, [append]), 516 | Backend = case ?config(partisan_dispatch, Config) of 517 | true -> 518 | partisan; 519 | _ -> 520 | disterl 521 | end, 522 | NumChannels = case ?config(channels, Config) of 523 | undefined -> 524 | 1; 525 | [] -> 526 | 1; 527 | List -> 528 | length(List) 529 | end, 530 | MonotonicChannels = case ?config(monotonic_channels, Config) of 531 | undefined -> 532 | false; 533 | MC -> 534 | MC 535 | end, 536 | Partitioned = case ?config(vnode_partitioning, Config) of 537 | undefined -> 538 | false; 539 | VP -> 540 | VP 541 | end, 542 | io:format(FileHandle, "~p,~p,~p,~p,~p,~p,~p,~p,~p,~p,~p~n", [kvs, Backend, Concurrency, NumChannels, MonotonicChannels, Parallelism, Partitioned, BytesSize, NumMessages, Latency, Time]), 543 | file:close(FileHandle), 544 | 545 | ct:pal("Value: ~p, Time: ~p", [Value, Time]), 546 | 547 | %% Stop nodes. 548 | ?SUPPORT:stop(Nodes), 549 | 550 | ok. 551 | 552 | bench_test(Config0) -> 553 | RootDir = ?SUPPORT:root_dir(Config0), 554 | 555 | ct:pal("Configuration was: ~p", [Config0]), 556 | 557 | Config = case file:consult(RootDir ++ "config/test.config") of 558 | {ok, Terms} -> 559 | ct:pal("Read terms configuration: ~p", [Terms]), 560 | Terms ++ Config0; 561 | {error, Reason} -> 562 | ct:fail("Could not open the terms configuration: ~p", [Reason]), 563 | ok 564 | end, 565 | 566 | ct:pal("Configuration is now: ~p", [Config]), 567 | 568 | Nodes = ?SUPPORT:start(bench_test, 569 | Config, 570 | [{num_nodes, 3}, 571 | {partisan_peer_service_manager, 572 | partisan_default_peer_service_manager}]), 573 | 574 | ct:pal("Configuration: ~p", [Config]), 575 | 576 | RootDir = ?SUPPORT:root_dir(Config), 577 | 578 | %% Configure parameters. 579 | ResultsParameters = case proplists:get_value(partisan_dispatch, Config, false) of 580 | true -> 581 | BinaryPadding = case proplists:get_value(binary_padding, Config, false) of 582 | true -> 583 | "binary-padding"; 584 | false -> 585 | "no-binary-padding" 586 | end, 587 | 588 | VnodePartitioning = case proplists:get_value(vnode_partitioning, Config, true) of 589 | true -> 590 | "vnode-partitioning"; 591 | false -> 592 | "no-vnode-partitioning" 593 | end, 594 | 595 | Parallelism = case proplists:get_value(parallelism, Config, 1) of 596 | 1 -> 597 | "parallelism-" ++ integer_to_list(1); 598 | P -> 599 | "parallelism-" ++ integer_to_list(P) 600 | end, 601 | 602 | "partisan-" ++ BinaryPadding ++ "-" ++ VnodePartitioning ++ "-" ++ Parallelism; 603 | false -> 604 | "disterl" 605 | end, 606 | 607 | %% Get benchmark configuration. 608 | BenchConfig = ?config(bench_config, Config), 609 | 610 | %% Consult the benchmark file for benchmark terms. 611 | BenchConfigTerms = case file:consult(RootDir ++ "examples/" ++ BenchConfig) of 612 | {ok, BenchTerms} -> 613 | ct:pal("Read bench terms configuration: ~p", [BenchTerms]), 614 | BenchTerms; 615 | {error, BenchErrorReason} -> 616 | ct:fail("Could not open the bench terms configuration: ~p", [BenchErrorReason]), 617 | ok 618 | end, 619 | {fixed_bin, Size} = proplists:get_value(value_generator, BenchConfigTerms, undefined), 620 | TestType = proplists:get_value(type, BenchConfigTerms, undefined), 621 | 622 | %% Configure the echo terms. 623 | ConfigureFun = fun({_, N}) -> 624 | %% Store the echo binary. 625 | ct:pal("Storing ~p byte object in the echo binary storage.", [Size]), 626 | EchoBinary = rand_bits(Size * 8), 627 | ok = rpc:call(N, partisan_config, set, [echo_binary, EchoBinary]) 628 | end, 629 | lists:foreach(ConfigureFun, Nodes), 630 | 631 | %% Select the node configuration. 632 | SortedNodes = lists:usort([Node || {_Name, Node} <- Nodes]), 633 | 634 | %% Verify partisan connection is configured with the correct 635 | %% membership information. 636 | ct:pal("Waiting for partisan membership..."), 637 | ?assertEqual(ok, ?SUPPORT:wait_until_partisan_membership(SortedNodes)), 638 | 639 | %% Ensure we have the right number of connections. 640 | %% Verify appropriate number of connections. 641 | ct:pal("Waiting for partisan connections..."), 642 | ?assertEqual(ok, ?SUPPORT:wait_until_all_connections(SortedNodes)), 643 | 644 | %% Configure bench paths. 645 | BenchDir = RootDir ++ "_build/default/lib/lasp_bench/", 646 | 647 | %% Build bench. 648 | ct:pal("Building benchmarking suite..."), 649 | BuildCommand = "cd " ++ BenchDir ++ "; make all", 650 | _BuildOutput = os:cmd(BuildCommand), 651 | % ct:pal("~p => ~p", [BuildCommand, BuildOutput]), 652 | 653 | %% Register our process. 654 | yes = global:register_name(runner, self()), 655 | 656 | %% Run bench. 657 | SortedNodesString = lists:flatten(lists:join(",", lists:map(fun(N) -> atom_to_list(N) end, SortedNodes))), 658 | RunnerString = atom_to_list(node()), 659 | BenchCommand = "cd " ++ BenchDir ++ "; RUNNER=\"" ++ RunnerString ++ "\" NODES=\"" ++ SortedNodesString ++ "\" _build/default/bin/lasp_bench " ++ RootDir ++ "examples/" ++ BenchConfig, 660 | ct:pal("Executing benchmark: ~p", [BenchCommand]), 661 | BenchOutput = os:cmd(BenchCommand), 662 | ct:pal("Benchmark output: ~p => ~p", [BenchCommand, BenchOutput]), 663 | 664 | %% Generate results. 665 | ct:pal("Generating results..."), 666 | ResultsCommand = "cd " ++ BenchDir ++ "; make results", 667 | _ResultsOutput = os:cmd(ResultsCommand), 668 | % ct:pal("~p => ~p", [ResultsCommand, ResultsOutput]), 669 | 670 | %% Get priv dir. 671 | PrivDir = ?config(priv_dir, Config), 672 | 673 | case os:getenv("TRAVIS") of 674 | false -> 675 | %% Make results dir. 676 | ct:pal("Making results output directory..."), 677 | DirCommand = "mkdir " ++ RootDir ++ "results/", 678 | _DirOutput = os:cmd(DirCommand), 679 | % ct:pal("~p => ~p", [DirCommand, DirOutput]), 680 | 681 | %% Get full path to the results. 682 | ReadLinkCommand = "readlink " ++ BenchDir ++ "tests/current", 683 | ReadLinkOutput = os:cmd(ReadLinkCommand), 684 | FullResultsPath = string:substr(ReadLinkOutput, 1, length(ReadLinkOutput) - 1), 685 | ct:pal("~p => ~p", [ReadLinkCommand, ReadLinkOutput]), 686 | 687 | %% Get directory name. 688 | Directory = string:substr(FullResultsPath, string:rstr(FullResultsPath, "/") + 1, length(FullResultsPath)), 689 | ResultsDirectory = Directory ++ "-" ++ BenchConfig ++ "-" ++ ResultsParameters, 690 | 691 | %% Copy results. 692 | ct:pal("Copying results into output directory: ~p", [ResultsDirectory]), 693 | CopyCommand = "cp -rpv " ++ FullResultsPath ++ " " ++ RootDir ++ "results/" ++ ResultsDirectory, 694 | _CopyOutput = os:cmd(CopyCommand), 695 | % ct:pal("~p => ~p", [CopyCommand, CopyOutput]), 696 | 697 | %% Copy logs. 698 | ct:pal("Copying logs into output directory: ~p", [ResultsDirectory]), 699 | LogsCommand = "cp -rpv " ++ PrivDir ++ " " ++ RootDir ++ "results/" ++ ResultsDirectory, 700 | _LogOutput = os:cmd(LogsCommand), 701 | % ct:pal("~p => ~p", [CopyCommand, CopyOutput]), 702 | 703 | %% Receive results. 704 | %% TotalOpsMessages = receive_bench_operations(0), 705 | %% ct:pal("Total operations issued based on messages: ~p", [TotalOpsMessages]), 706 | 707 | TotalOpsSummary = get_total_ops(FullResultsPath), 708 | ct:pal("Total operations issued based on summary: ~p", [TotalOpsSummary]), 709 | 710 | %% Get busy errors. 711 | BusyErrorsRaw = os:cmd("grep -r busy_ " ++ PrivDir ++ " | wc -l"), 712 | BusyErrorsString = string:substr(BusyErrorsRaw, 1, length(BusyErrorsRaw) - 1), 713 | BusyErrors = list_to_integer(BusyErrorsString), 714 | ct:pal("Busy errors: ~p", [BusyErrors]), 715 | 716 | %% Write aggregate results. 717 | AggregateResultsFile = RootDir ++ "results/aggregate.csv", 718 | ct:pal("Writing aggregate results to: ~p", [AggregateResultsFile]), 719 | {ok, FileHandle} = file:open(AggregateResultsFile, [append]), 720 | Mode = case ?config(partisan_dispatch, Config) of 721 | true -> 722 | case ?config(parallelism, Config) of 723 | undefined -> 724 | partisan; 725 | Conns -> 726 | list_to_atom("partisan_" ++ integer_to_list(Conns)) 727 | end; 728 | _ -> 729 | disterl 730 | end, 731 | io:format(FileHandle, "~p,~p,~p,~p,~p~n", [TestType, Mode, Size, TotalOpsSummary, BusyErrors]), 732 | file:close(FileHandle); 733 | _ -> 734 | ok 735 | end, 736 | 737 | ?SUPPORT:stop(Nodes), 738 | 739 | ok. 740 | 741 | receive_bench_operations(TotalOps) -> 742 | receive 743 | {bench_operations, Ops} -> 744 | receive_bench_operations(TotalOps + Ops) 745 | after 10000 -> 746 | TotalOps 747 | end. 748 | 749 | get_total_ops(ResultsDir) -> 750 | {ok, Device} = file:open(ResultsDir ++ "/summary.csv", [read]), 751 | 752 | %% Dump header line. 753 | _ = io:get_line(Device, ""), 754 | 755 | try get_totals(Device, 0) 756 | after file:close(Device) 757 | end. 758 | 759 | get_totals(Device, Total) -> 760 | case io:get_line(Device, "") of 761 | eof -> 762 | Total; 763 | Line -> 764 | Tokens = string:tokens(Line, ","), 765 | RawOps = lists:nth(3, Tokens), 766 | TruncRawOps = string:sub_string(RawOps, 2), 767 | Ops = list_to_integer(TruncRawOps), 768 | get_totals(Device, Total + Ops) 769 | end. 770 | 771 | %% @private 772 | rand_bits(Bits) -> 773 | Bytes = (Bits + 7) div 8, 774 | <> = crypto:strong_rand_bytes(Bytes), 775 | Result. 776 | 777 | %% @private 778 | echo_sender(BenchPid, _SenderNum, _EchoBinary, 0) -> 779 | BenchPid ! done, 780 | ok; 781 | echo_sender(BenchPid, SenderNum, EchoBinary, Count) -> 782 | unir:echo(EchoBinary), 783 | echo_sender(BenchPid, SenderNum, EchoBinary, Count - 1). 784 | 785 | %% @private 786 | init_echo_sender(BenchPid, SenderNum, EchoBinary, Count) -> 787 | receive 788 | start -> 789 | ok 790 | end, 791 | echo_sender(BenchPid, SenderNum, EchoBinary, Count). 792 | 793 | %% @private 794 | fsm_sender(BenchPid, _SenderNum, _EchoBinary, Success, Failure, 0) -> 795 | BenchPid ! {done, Success, Failure}, 796 | ok; 797 | fsm_sender(BenchPid, SenderNum, EchoBinary, Success, Failure, Count) -> 798 | %% Normal distribution over 10000 keys. 799 | RandomNumber = trunc((rand:normal() + 1) * 5000), 800 | 801 | %% Craft object name. 802 | ObjectName = list_to_binary("object" ++ integer_to_list(RandomNumber)), 803 | 804 | %% 50/50 read/write workload. 805 | case Count rem 2 == 0 of 806 | true -> 807 | case unir:fsm_put(ObjectName, EchoBinary) of 808 | {ok, _Val} -> 809 | fsm_sender(BenchPid, SenderNum, EchoBinary, Success + 1, Failure, Count - 1); 810 | {error, timeout} -> 811 | fsm_sender(BenchPid, SenderNum, EchoBinary, Success, Failure + 1, Count - 1) 812 | end; 813 | false -> 814 | case unir:fsm_get(ObjectName) of 815 | {ok, _Val} -> 816 | fsm_sender(BenchPid, SenderNum, EchoBinary, Success + 1, Failure, Count - 1); 817 | {error, timeout} -> 818 | fsm_sender(BenchPid, SenderNum, EchoBinary, Success, Failure + 1, Count - 1) 819 | end 820 | end. 821 | 822 | %% @private 823 | init_fsm_sender(BenchPid, SenderNum, EchoBinary, Count) -> 824 | receive 825 | start -> 826 | ok 827 | end, 828 | fsm_sender(BenchPid, SenderNum, EchoBinary, 0, 0, Count). 829 | 830 | %% @private 831 | root_path(Config) -> 832 | DataDir = proplists:get_value(data_dir, Config, ""), 833 | DataDir ++ "../../../../../../". 834 | 835 | %% @private 836 | root_dir(Config) -> 837 | RootCommand = "cd " ++ root_path(Config) ++ "; pwd", 838 | RootOutput = os:cmd(RootCommand), 839 | RootDir = string:substr(RootOutput, 1, length(RootOutput) - 1) ++ "/", 840 | ct:pal("RootDir: ~p", [RootDir]), 841 | RootDir. 842 | 843 | %% @private 844 | parallelism() -> 845 | case os:getenv("PARALLELISM", "1") of 846 | false -> 847 | [{parallelism, list_to_integer("1")}]; 848 | "1" -> 849 | [{parallelism, list_to_integer("1")}]; 850 | Config -> 851 | [{parallelism, list_to_integer(Config)}] 852 | end. 853 | 854 | %% @private 855 | channels() -> 856 | [{channels, [undefined, gossip, broadcast, vnode]}]. 857 | 858 | %% @private 859 | monotonic_channels() -> 860 | [{monotonic_channels, true}, {channels, [undefined, {monotonic, gossip}, broadcast, vnode]}]. 861 | 862 | %% @private 863 | bench_receiver(Count) -> 864 | bench_receiver(0, 0, Count). 865 | 866 | %% @private 867 | bench_receiver(Success, Failure, 0) -> 868 | ct:pal("Success: ~p, Failure: ~p", [Success, Failure]), 869 | ok; 870 | bench_receiver(Success, Failure, Count) -> 871 | ct:pal("Waiting for ~p processes to finish...", [Count]), 872 | 873 | receive 874 | done -> 875 | ct:pal("Received, but still waiting for ~p", [Count -1]), 876 | bench_receiver(Success, Failure, Count - 1); 877 | {done, S, F} -> 878 | ct:pal("Received; success: ~p, failure: ~p; but still waiting for ~p", [S, F, Count -1]), 879 | bench_receiver(Success + S, Failure + F, Count - 1) 880 | end. 881 | 882 | %% @private 883 | receiver(_Manager, BenchPid, 0) -> 884 | BenchPid ! done, 885 | ok; 886 | receiver(Manager, BenchPid, Count) -> 887 | receive 888 | {_Message, _SourceNode, _SourcePid} -> 889 | receiver(Manager, BenchPid, Count - 1) 890 | end. 891 | 892 | %% @private 893 | sender(_EchoBinary, _Manager, _DestinationNode, _DestinationPid, _PartitionKey, 0) -> 894 | ok; 895 | sender(EchoBinary, Manager, DestinationNode, DestinationPid, PartitionKey, Count) -> 896 | Manager:forward_message(DestinationNode, undefined, DestinationPid, {EchoBinary, node(), self()}, [{partition_key, PartitionKey}]), 897 | sender(EchoBinary, Manager, DestinationNode, DestinationPid, PartitionKey, Count - 1). 898 | 899 | %% @private 900 | init_sender(EchoBinary, Manager, DestinationNode, DestinationPid, PartitionKey, Count) -> 901 | receive 902 | start -> 903 | ok 904 | end, 905 | sender(EchoBinary, Manager, DestinationNode, DestinationPid, PartitionKey, Count). 906 | --------------------------------------------------------------------------------