├── .gitignore ├── .travis.yml ├── LICENSE ├── Makefile ├── README.md ├── bin ├── all.sh ├── build-releases.sh ├── env ├── env_test ├── launch-nodes.sh └── stop-nodes.sh ├── config ├── benchmark_short.config ├── benchmark_standard.config ├── fmke.config ├── vars.config ├── vars_test.config ├── vm.args └── vm_test.args ├── doc └── FMK_DataModel.pdf ├── elvis.config ├── fmke-http-api.md ├── include ├── fmke.hrl ├── fmke_http.hrl └── fmke_kv.hrl ├── priv ├── build_schema.aql ├── build_schema.cql └── build_schema_fk.aql ├── rebar.config ├── rebar.config.script ├── rebar.lock ├── scripts ├── aws │ ├── 1-setup-vms.sh │ ├── 2-start-antidote-nodes.sh │ ├── 3-create-antidote-cluster.sh │ ├── 3a-create-multi-dc-cluster.sh │ ├── 4-start-fmke-nodes.sh │ ├── 5-start-benchmarks.sh │ ├── 6-prepare-results.sh │ ├── 7-fetch-and-merge-results.sh │ ├── build_instances.sh │ ├── get_public_address.sh │ └── src │ │ └── bin │ │ ├── compile-and-compress-results.sh │ │ ├── fmk_setup_script.erl │ │ ├── join_antidote_cluster.erl │ │ ├── join_dcs_script.erl │ │ ├── worker-configure-benchmark.sh │ │ ├── worker-setup-machine.sh │ │ ├── worker-start-antidote.sh │ │ ├── worker-start-basho-bench.sh │ │ └── worker-start-fmk.sh ├── compile_basho_bench.sh ├── config │ ├── change_conn_pool_size.sh │ ├── change_db.sh │ ├── change_db_addresses.sh │ ├── change_db_ports.sh │ ├── change_http_port.sh │ └── set_param.sh ├── copy_bench_driver.sh ├── run_benchmark.sh ├── run_ct_suite.sh ├── run_fmke_operations.sh ├── start_data_store.sh ├── start_fmke.sh ├── stop_data_store.sh └── stop_fmke.sh ├── src ├── fmke.app.src ├── fmke.erl ├── fmke_app.erl ├── fmke_client_bench_driver.erl ├── fmke_db_conn_manager.erl ├── fmke_db_conn_sup.erl ├── fmke_db_connection.erl ├── fmke_db_driver_redis.erl ├── fmke_driver_config.erl ├── fmke_driver_ets.erl ├── fmke_driver_opt_antidote.erl ├── fmke_driver_opt_aql.erl ├── fmke_driver_opt_cassandra.erl ├── fmke_driver_opt_redis_cluster.erl ├── fmke_driver_opt_redis_crdb.erl ├── fmke_driver_opt_riak_kv.erl ├── fmke_gen_driver.erl ├── fmke_gen_http_handler.erl ├── fmke_gen_simplified_kv_driver.erl ├── fmke_http_handler_app.erl ├── fmke_http_handler_facilities.erl ├── fmke_http_handler_patients.erl ├── fmke_http_handler_pharmacies.erl ├── fmke_http_handler_prescriptions.erl ├── fmke_http_handler_staff.erl ├── fmke_http_utils.erl ├── fmke_json.erl ├── fmke_kv_adapter.erl ├── fmke_setup_sup.erl ├── fmke_sup.erl ├── gen_fmke_adapter.erl └── gen_fmke_kv_driver.erl └── test ├── fmke_antidote_transactions_SUITE.erl ├── fmke_configs ├── antidote_non_nested_data_model.config ├── aql_non_nested_data_model.config ├── cassandra_non_nested_data_model.config ├── ets_nested_data_model.config ├── ets_non_nested_data_model.config ├── redis_cluster_non_nested_data_model.config ├── redis_crdb_non_nested_data_model.config ├── riak_non_nested_data_model.config ├── riak_simple_nested.config └── riak_simple_non_nested.config ├── fmke_core_unit_test_SUITE.erl ├── fmke_core_unit_test_SUITE_data └── default.config ├── fmke_db_conn_manager_SUITE.erl ├── fmke_http_api_SUITE.erl ├── fmke_http_api_SUITE_data └── default.config ├── fmke_test_setup.erl ├── fmke_test_utils.erl └── fmke_unstable_db_conn_SUITE.erl /.gitignore: -------------------------------------------------------------------------------- 1 | .rebar3 2 | _* 3 | .eunit 4 | *.o 5 | *.beam 6 | *.plt 7 | *.swp 8 | *.swo 9 | .erlang.cookie 10 | ebin 11 | log 12 | erl_crash.dump 13 | .rebar 14 | logs 15 | _build 16 | deps/ 17 | TEST-* 18 | rebar3.crashdump 19 | tests/ 20 | .DS_Store 21 | .idea/ 22 | config/fmke.config 23 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: xenial 2 | language: erlang 3 | otp_release: 4 | - 22.0 5 | services: 6 | - docker 7 | addons: 8 | apt: 9 | packages: 10 | - g++ 11 | - make 12 | - cmake 13 | - libuv1-dev 14 | - libssl-dev 15 | before_script: 16 | - epmd -daemon 17 | - sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6' 18 | - rebar3 --version 19 | script: 20 | - rebar3 as test compile 21 | - rebar3 xref 22 | - rebar3 dialyzer 23 | - rebar3 as lint lint 24 | - make coverage 25 | - rebar3 as test coveralls send 26 | after_failure: 27 | - cat /home/travis/build/goncalotomas/FMKe/_build/test/logs/ct.latest.log 28 | - cat /home/travis/build/goncalotomas/FMKe/_build/test/logs/*/log/console.log 29 | - cat /home/travis/build/goncalotomas/FMKe/_build/test/logs/*/log/crash.log 30 | - cat /home/travis/build/goncalotomas/FMKe/_build/test/logs/*/log/error.log 31 | - cat /home/travis/build/goncalotomas/FMKe/_build/test/logs/*/log/error.log 32 | - cat /home/travis/build/goncalotomas/FMKe/_build/test/logs/*/lib.fmke.logs/suite.log 33 | cache: 34 | directories: 35 | - "$HOME/.cache/rebar3/hex/default" 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------------- 2 | 3 | Copyright (c) 2014 SyncFree Consortium. All Rights Reserved. 4 | 5 | This file is provided to you under the Apache License, 6 | Version 2.0 (the "License"); you may not use this file 7 | except in compliance with the License. You may obtain 8 | a copy of the License at 9 | 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, 13 | software distributed under the License is distributed on an 14 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | KIND, either express or implied. See the License for the 16 | specific language governing permissions and limitations 17 | under the License. 18 | 19 | ------------------------------------------------------------------- -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | REBAR=rebar3 2 | BENCH=_build/test/lib/lasp_bench 3 | 4 | all: compile rel 5 | 6 | attach: 7 | ./_build/default/rel/fmke/bin/env attach 8 | 9 | bench-results: 10 | Rscript --vanilla _build/test/lib/lasp_bench/priv/summary.r -i tests/current 11 | 12 | compile: 13 | ${REBAR} as test compile 14 | 15 | console: rel 16 | ./_build/default/rel/fmke/bin/env console 17 | 18 | coverage: eunit ct 19 | ${REBAR} cover --verbose 20 | 21 | ct: 22 | ${REBAR} ct --suite fmke_antidote_transactions_SUITE.erl --cover --cover_export_name=antidote_txn_check 23 | ${REBAR} ct --suite fmke_db_conn_manager_SUITE.erl --cover --cover_export_name=db_conn_manager 24 | ${REBAR} ct --suite fmke_unstable_db_conn_SUITE.erl --cover --cover_export_name=unstable_db_conn 25 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/antidote_non_nested_data_model.config --cover --cover_export_name=core_antidote_non_nested_opt 26 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/cassandra_non_nested_data_model.config --cover --cover_export_name=core_cassandra_non_nested_opt 27 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/ets_nested_data_model.config --cover --cover_export_name=core_ets_nested 28 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/ets_non_nested_data_model.config --cover --cover_export_name=core_ets_non_nested 29 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/redis_cluster_non_nested_data_model.config --cover --cover_export_name=core_redis_cluster_non_nested_opt 30 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/redis_crdb_non_nested_data_model.config --cover --cover_export_name=core_redis_crdb_non_nested_opt 31 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/riak_non_nested_data_model.config --cover --cover_export_name=core_riak_non_nested_opt 32 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/aql_non_nested_data_model.config --cover --cover_export_name=core_aql_non_nested_opt 33 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/antidote_non_nested_data_model.config --cover --cover_export_name=http_antidote_non_nested_opt 34 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/cassandra_non_nested_data_model.config --cover --cover_export_name=http_cassandra_non_nested_opt 35 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/ets_nested_data_model.config --cover --cover_export_name=http_ets_nested 36 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/ets_non_nested_data_model.config --cover --cover_export_name=http_ets_non_nested 37 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/redis_cluster_non_nested_data_model.config --cover --cover_export_name=http_redis_cluster_non_nested_opt 38 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/redis_crdb_non_nested_data_model.config --cover --cover_export_name=http_redis_crdb_non_nested_opt 39 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/riak_non_nested_data_model.config --cover --cover_export_name=http_riak_non_nested_opt 40 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/aql_non_nested_data_model.config --cover --cover_export_name=http_aql_non_nested_opt 41 | 42 | ct-antidote: 43 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/antidote_non_nested_data_model.config 44 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/antidote_non_nested_data_model.config 45 | 46 | ct-cassandra: 47 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/cassandra_non_nested_data_model.config 48 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/cassandra_non_nested_data_model.config 49 | 50 | ct-ets: 51 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/ets_nested_data_model.config 52 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/ets_non_nested_data_model.config 53 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/ets_nested_data_model.config 54 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/ets_non_nested_data_model.config 55 | 56 | ct-redis-cluster: 57 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/redis_cluster_non_nested_data_model.config 58 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/redis_cluster_non_nested_data_model.config 59 | 60 | ct-redis: 61 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/redis_crdb_non_nested_data_model.config 62 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/redis_crdb_non_nested_data_model.config 63 | 64 | ct-riak: 65 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/riak_non_nested_data_model.config 66 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/riak_non_nested_data_model.config 67 | 68 | ct-aql: 69 | ${REBAR} ct --suite fmke_core_unit_test_SUITE.erl --config test/fmke_configs/aql_non_nested_data_model.config 70 | ${REBAR} ct --suite fmke_http_api_SUITE.erl --config test/fmke_configs/aql_non_nested_data_model.config 71 | 72 | dialyzer: 73 | ${REBAR} dialyzer 74 | 75 | eunit: 76 | ${REBAR} eunit 77 | 78 | lint: 79 | rebar3 as lint lint 80 | 81 | rel: relclean 82 | rm -rf _build/default/rel/ 83 | ${REBAR} release -n fmke 84 | 85 | relclean: 86 | rm -rf _build/default/rel 87 | 88 | select-antidote: 89 | ./scripts/config/change_db.sh antidote 90 | 91 | select-antidote-norm: 92 | ./scripts/config/change_db.sh antidote_norm 93 | 94 | select-redis: 95 | ./scripts/config/change_db.sh redis 96 | 97 | select-riak: 98 | ./scripts/config/change_db.sh riak 99 | 100 | select-riak-norm: 101 | ./scripts/config/change_db.sh riak_norm 102 | 103 | shell: 104 | ${REBAR} shell --apps fmke --name fmke@127.0.0.1 --setcookie fmke 105 | 106 | shell-antidote: rel 107 | ./scripts/start_data_store.sh antidote 108 | ./_build/default/rel/fmke/bin/env console 109 | 110 | shell-redis: rel 111 | ./scripts/start_data_store.sh redis 112 | ./_build/default/rel/fmke/bin/env console 113 | 114 | shell-riak: rel 115 | ./scripts/start_data_store.sh riak 116 | ./_build/default/rel/fmke/bin/env console 117 | 118 | start: rel 119 | ./scripts/start_fmke.sh 120 | 121 | start-antidote: select-antidote 122 | ./scripts/start_data_store.sh antidote 123 | 124 | start-antidote-norm: select-antidote-norm 125 | ./scripts/start_data_store.sh antidote 126 | 127 | start-redis: select-redis 128 | ./scripts/start_data_store.sh redis 129 | 130 | start-riak: select-riak 131 | ./scripts/start_data_store.sh riak 132 | 133 | start-riak-norm: select-riak-norm 134 | ./scripts/start_data_store.sh riak 135 | 136 | stop: 137 | ./scripts/stop_fmke.sh 138 | 139 | stop-antidote: 140 | ./scripts/stop_data_store.sh antidote 141 | 142 | stop-redis: 143 | ./scripts/stop_data_store.sh redis 144 | 145 | stop-riak: 146 | ./scripts/stop_data_store.sh riak 147 | 148 | test: all eunit ct 149 | 150 | xref: 151 | rebar3 xref 152 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FMKe 2 | ![Erlang Version](https://img.shields.io/badge/Erlang%2FOTP-%E2%89%A521-brightgreen.svg) 3 | [![Build Status](https://travis-ci.org/goncalotomas/FMKe.svg?branch=master)](https://travis-ci.org/goncalotomas/FMKe) 4 | [![Coverage Status](https://coveralls.io/repos/github/goncalotomas/FMKe/badge.svg?branch=master)](https://coveralls.io/github/goncalotomas/FMKe?branch=master) 5 | ![Dialyzer Enabled](https://img.shields.io/badge/dialyzer-enabled-brightgreen.svg) 6 | 7 | FMKe is an extendable real world benchmark for distributed key-value stores. 8 | This repository contains code for the application server and a set of scripts for orchestrating deployment and local execution of micro-benchmarks. 9 | 10 | ## Why? 11 | Here is a comparison of available benchmark specifications that we analyzed, with FMKe for comparison: 12 | 13 | | Benchmark | Target Systems | Workload type | 14 | | ------------- |:-------------:| -----:| 15 | | [TPC-C][6] | SQL-Based databases ❌ | **realistic ✔️ | 16 | | [TPC-E][7] | SQL-Based databases ❌ | **realistic ✔️ | 17 | | [YCSB][5] | Key-value stores ✔️ | synthetic ❌ | 18 | | FMKe | Key-value stores ✔️ | **realistic ✔️ | 19 | 20 | ** Emulates real application patterns 21 | 22 | ## Backing the realistic claims 23 | 24 | FMKe was one of the final contributions of the [SyncFree][3] European research project. It was designed to benchmark its reference platform, [AntidoteDB][2], by closely emulating a realistic application. One of the industrial partners of the project, Trifork, provided statistical data about _Fælles Medicinkort_ (FMK), a sub-system relative to the Danish National Joint Medicine Card. The real system is backed by a distributed key value store to ensure high availability, which validates the decision to use it as a benchmark (originally) for AntidoteDB. 25 | 26 | ## System description 27 | The real world FMK system, and FMKe alike are designed to store patient health data, mostly revolving around medical prescriptions. Here is the ER diagram: 28 | 29 | ![Build Status](http://i.imgur.com/q6ByEFs.png) 30 | 31 | There are 4 core entities: **treatment facilities**, **patients**, and **pharmacies**. Other records appear as relations between these entities, but it will become apparent that the workload focuses heavily on prescription records. More information about the system operations and data model can be found in [this document][8]. 32 | 33 | ## Architecture 34 | ![Build Status](http://i.imgur.com/rLZSFMb.png) 35 | Consider FMKe as a general application server that contains the logic mimicking the real FMK system. We decided not to release FMKe as a single monolithic application, since there are multiple benefits in separating it in these 3 components. 36 | Firstly, separating the application server from the workload generation component doesn't require us to reinvent the wheel, since many good workload generation tools already exist. On the other hand, making the application logic independent of the database allows for collaboration in supporting a broader set of data stores. 37 | We have a generic interface for key-value stores (implemented as an Erlang behaviour) that is well specified, which makes supporting a new database as simple as writing a driver for it. Furthermore, pull requests with new drivers or optimizations for existing ones are accepted and welcomed. 38 | 39 | ## Supported data stores 40 | - AntidoteDB (the SQL-like interface offered by [AQL][11] is also supported) 41 | - Cassandra 42 | - Redis 43 | - Riak KV 44 | 45 | ### Note about AQL schema: 46 | 47 | When running the benchmark to evaluate the performance of AQL you have two options regarding the database schema. The file [priv/build_schema.aql](priv/build_schema.aql) creates the tables without foreign keys, and thus, the referential integrity mechanism of AQL is not used. To use the referential integrity mechanism, use the file [priv/build_schema_fk.aql](priv/build_schema_fk.aql), this version creates the tables with foreign keys. 48 | 49 | ## How the benchmark is deployed 50 | By default FMKe keeps a connection pool to a single database node, and the workload generation is performed by [Lasp Bench][4]. 51 | To benchmark clustered databases with _n_ nodes, _n_ FMKe instances can be deployed, or alternatively one FMKe node can connect to multiple nodes (the exact number is dependent on the connection pool size). 52 | To avoid network and CPU bottlenecks that could impact the result of the benchmark, it is advised to use different servers for each one of the components. Having said that, a number of scripts are available for development that enable local execution of micro benchmarks. 53 | 54 | ### Use case: AntidoteDB evaluation 55 | FMKe was used in January 2017 to evaluate the performance of AntidoteDB. The evaluation took place in Amazon Web Services using `m3.xlarge` instances which have 4 vCPUs, 15GB RAM and 2x40GB SSD storage. 56 | The biggest test case used 36 AntidoteDB instances spread across 3 data centers (Germany, Ireland and United States), 9 instances of FMKe and 18 instances of (former Basho Bench) Lasp Bench that simulated 1024 concurrent clients performing operations as quickly as possible. 57 | Before the benchmark, AntidoteDB was populated with over 1 million patient keys, 50 hospitals, 10.000 doctors and 300 pharmacies. 58 | 59 | ## Testing out FMKe locally 60 | FMKe requires [Erlang/OTP][9] and [rebar3][10]. You need at least Erlang 20, FMKe will not compile in previous versions. 61 | 62 | Please check [the wiki](https://github.com/goncalotomas/FMKe/wiki) for detailed instructions on how to run FMKe with a particular database. 63 | 64 | [1]: https://syncfree.lip6.fr/ 65 | [2]: https://antidotedb.eu 66 | [3]: https://github.com/SyncFree 67 | [4]: https://github.com/lasp-lang/lasp-bench 68 | [5]: https://b9f6702a-a-62cb3a1a-s-sites.googlegroups.com/site/brianfrankcooper/home/publications/ycsb.pdf?attachauth=ANoY7cplFQg1yGsPe1xDRwV2JKPCI7OffNZnUyNOVBMecaBZIlPPuWBV0oB4T5RJEIPJLn3OwUP_Tlawws8YIeHYdTLEf3E1lcJGYqzFIxIVEXxHujMqxEyioMP_w4dRMlxUPpjx6nlwOW6R9Di9f30VKXnEX5a6qwJgAaUhSEN_zbTAuzZs_VONffsO7jSa8Hr-24O1kkMwPFWot8ouhbmJSHwSE0F44V_AYEV7sAsvbWp9iWD9Kp0%3D&attredirects=0 69 | [6]: http://www.tpc.org/tpcc/default.asp 70 | [7]: http://www.tpc.org/tpce/ 71 | [8]: https://github.com/goncalotomas/FMKe/blob/master/doc/FMK_DataModel.pdf 72 | [9]: http://www.erlang.org/downloads 73 | [10]: http://www.rebar3.org/ 74 | [11]: https://github.com/mrshankly/secure-aql 75 | -------------------------------------------------------------------------------- /bin/all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export RELX_REPLACE_OS_VARS=true 4 | i=$1 5 | echo "stopping FMKe instances..." 6 | ./bin/stop-nodes.sh ${i} 7 | echo "cleanning old FMKe releases and building new ones..." 8 | ./bin/build-releases.sh ${i} 9 | echo "launching FMKe releases..." 10 | ./bin/launch-nodes.sh ${i} 11 | 12 | -------------------------------------------------------------------------------- /bin/build-releases.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export RELX_REPLACE_OS_VARS=true 4 | make relclean 5 | for i in `seq 1 $1`; 6 | do 7 | rebar3 release -n fmk${i} 8 | done 9 | -------------------------------------------------------------------------------- /bin/env: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # HTTP port. 4 | if [ -z "$HTTP_PORT" ]; then 5 | export HTTP_PORT=9090 6 | fi 7 | 8 | # HTTP IP address. 9 | if [ -z "$IP" ]; then 10 | export IP="127.0.0.1" 11 | fi 12 | 13 | # Receive the host name from the parameters 14 | if [ -z "$INSTANCE_NAME" ]; then 15 | export INSTANCE_NAME=fmke 16 | fi 17 | 18 | # Choose the hostname for the epmd long name if the hostname exists 19 | # and if it resolves through the resolver; using a resolvable name 20 | # that's only resolvable with resolv.conf won't work for long names. 21 | if [ ! -z "$HOSTNAME" ]; then 22 | if /usr/bin/dig ${HOSTNAME} | grep -q 'NXDOMAIN' 23 | export NODE_NAME=${INSTANCE_NAME}@${HOSTNAME} 24 | then 25 | export NODE_NAME=${INSTANCE_NAME}@${IP} 26 | fi 27 | fi 28 | # Else, default to IP. 29 | if [ -z "$NODE_NAME" ]; then 30 | export NODE_NAME=${INSTANCE_NAME}@${IP} 31 | fi 32 | 33 | # Assume 127.0.0.1 as bind host. 34 | if [ -z "$IP" ]; then 35 | echo "IP address not set; defaulting to 127.0.0.1." 36 | export IP=127.0.0.1 37 | fi 38 | 39 | if [ -z "$NODE_NAME" ]; then 40 | export NODE_NAME=${INSTANCE_NAME}@${IP} 41 | fi 42 | 43 | if [ -z "$COOKIE" ]; then 44 | export COOKIE=fmke 45 | fi 46 | 47 | export RELX_REPLACE_OS_VARS=true 48 | 49 | echo "NODE_NAME: ${NODE_NAME}" 50 | echo "COOKIE: ${COOKIE}" 51 | echo "IP: ${IP}" 52 | echo "HTTP_PORT: ${HTTP_PORT}" 53 | 54 | RELNAME="`dirname \"$0\"`"/${INSTANCE_NAME} 55 | exec ${RELNAME} "$@" 56 | -------------------------------------------------------------------------------- /bin/env_test: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # HTTP port. 4 | if [ -z "$HTTP_PORT" ]; then 5 | export HTTP_PORT=9190 6 | fi 7 | 8 | # HTTP IP address. 9 | if [ -z "$IP" ]; then 10 | export IP="127.0.0.1" 11 | fi 12 | 13 | # Receive the host name from the parameters 14 | if [ -z "$INSTANCE_NAME" ]; then 15 | export INSTANCE_NAME=fmke_test 16 | fi 17 | 18 | # Choose the hostname for the epmd long name if the hostname exists 19 | # and if it resolves through the resolver; using a resolvable name 20 | # that's only resolvable with resolv.conf won't work for long names. 21 | if [ ! -z "$HOSTNAME" ]; then 22 | if /usr/bin/dig ${HOSTNAME} | grep -q 'NXDOMAIN' 23 | export NODE_NAME=${INSTANCE_NAME}@${HOSTNAME} 24 | then 25 | export NODE_NAME=${INSTANCE_NAME}@${IP} 26 | fi 27 | fi 28 | # Else, default to IP. 29 | if [ -z "$NODE_NAME" ]; then 30 | export NODE_NAME=${INSTANCE_NAME}@${IP} 31 | fi 32 | 33 | # Assume 127.0.0.1 as bind host. 34 | if [ -z "$IP" ]; then 35 | echo "IP address not set; defaulting to 127.0.0.1." 36 | export IP=127.0.0.1 37 | fi 38 | 39 | if [ -z "$NODE_NAME" ]; then 40 | export NODE_NAME=${INSTANCE_NAME}@${IP} 41 | fi 42 | 43 | if [ -z "$COOKIE" ]; then 44 | export COOKIE=fmke 45 | fi 46 | 47 | export RELX_REPLACE_OS_VARS=true 48 | 49 | echo "NODE_NAME: ${NODE_NAME}" 50 | echo "COOKIE: ${COOKIE}" 51 | echo "IP: ${IP}" 52 | echo "HTTP_PORT: ${HTTP_PORT}" 53 | 54 | RELNAME="`dirname \"$0\"`"/${INSTANCE_NAME} 55 | exec ${RELNAME} "$@" 56 | -------------------------------------------------------------------------------- /bin/launch-nodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export RELX_REPLACE_OS_VARS=true 4 | for i in `seq 1 $1`; 5 | do 6 | HTTP_PORT=9${i}90 INSTANCE_NAME=fmk${i} ANTIDOTE_PB_PORT=8${i}87 COOKIE=fmke _build/default/rel/fmk${i}/bin/env foreground & 7 | done 8 | -------------------------------------------------------------------------------- /bin/stop-nodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export RELX_REPLACE_OS_VARS=true 4 | for i in `seq 1 $1`; 5 | do 6 | _build/default/rel/fmk${i}/bin/fmk${i} stop 7 | done 8 | -------------------------------------------------------------------------------- /config/benchmark_short.config: -------------------------------------------------------------------------------- 1 | % Run at max, i.e.: as quickly as possible 2 | {mode, max}. 3 | 4 | %% test duration 5 | {duration, 1}. 6 | 7 | % Run 16 concurrent clients 8 | {concurrent, 16}. 9 | 10 | %% The module name of the driver that Basho Bench will use to generate load. 11 | {driver, lasp_bench_driver_fmke}. 12 | 13 | %% necessary code to run the client 14 | {code_paths, 15 | [ 16 | "_build/test/lib/lager", 17 | "_build/test/lib/unicode_util_compat", 18 | "_build/test/lib/jsx", 19 | "_build/test/lib/lasp_bench", 20 | "_build/test/lib/hackney", 21 | "_build/test/lib/idna", 22 | "_build/test/lib/mimerl", 23 | "_build/test/lib/metrics", 24 | "_build/test/lib/certifi", 25 | "_build/test/lib/ssl_verify_fun" 26 | ] 27 | }. 28 | 29 | %% 30 | {key_generator, {int_to_bin_bigendian, {uniform_int, 5000000}}}. 31 | 32 | %% 33 | {value_generator, {fixed_bin, 10000}}. 34 | 35 | %% Configuration settings for FMK servers. 36 | %% List of server IPs 37 | {fmk_server_ips, ["127.0.0.1"]}. 38 | %% List of HTTP ports to connect (1-to-1 correspondence to above list) 39 | {fmk_server_ports, [9090]}. 40 | 41 | {numpatients,1000}. 42 | {numfacilities,100}. 43 | {numpharmacies,300}. 44 | {numstaff,500}. 45 | {numprescriptions,100}. 46 | {zipf_size,5000}. 47 | {zipf_skew,1}. 48 | 49 | {operations,[ 50 | {get_pharmacy_prescriptions, 27}, 51 | {get_prescription_medication, 27}, 52 | {get_staff_prescriptions, 14}, 53 | {create_prescription, 8}, 54 | {get_processed_prescriptions, 7}, 55 | {get_patient, 5}, 56 | {update_prescription, 4}, 57 | {update_prescription_medication, 4}, 58 | {get_prescription, 4} 59 | ]}. 60 | -------------------------------------------------------------------------------- /config/benchmark_standard.config: -------------------------------------------------------------------------------- 1 | % Run at max, i.e.: as quickly as possible 2 | {mode, max}. 3 | 4 | %% test duration 5 | {duration, 20}. 6 | 7 | % Run 16 concurrent clients 8 | {concurrent, 16}. 9 | 10 | %% The module name of the driver that Basho Bench will use to generate load. 11 | {driver, lasp_bench_driver_fmke}. 12 | 13 | %% necessary code to run the client 14 | {code_paths, 15 | [ 16 | "_build/test/lib/lager", 17 | "_build/test/lib/unicode_util_compat", 18 | "_build/test/lib/jsx", 19 | "_build/test/lib/lasp_bench", 20 | "_build/test/lib/hackney", 21 | "_build/test/lib/idna", 22 | "_build/test/lib/mimerl", 23 | "_build/test/lib/metrics", 24 | "_build/test/lib/certifi", 25 | "_build/test/lib/ssl_verify_fun" 26 | ] 27 | }. 28 | 29 | %% 30 | {key_generator, {int_to_bin_bigendian, {uniform_int, 5000000}}}. 31 | 32 | %% 33 | {value_generator, {fixed_bin, 10000}}. 34 | 35 | %% Configuration settings for FMK servers. 36 | %% List of server IPs 37 | {fmk_server_ips, ["127.0.0.1"]}. 38 | %% List of HTTP ports to connect (1-to-1 correspondence to above list) 39 | {fmk_server_ports, [9090]}. 40 | 41 | {numpatients,1000000}. 42 | {numfacilities,50}. 43 | {numpharmacies,300}. 44 | {numstaff,10000}. 45 | {numprescriptions,5000}. 46 | {zipf_size,5000}. 47 | {zipf_skew,1}. 48 | 49 | {operations,[ 50 | {get_pharmacy_prescriptions, 27}, 51 | {get_prescription_medication, 27}, 52 | {get_staff_prescriptions, 14}, 53 | {create_prescription, 8}, 54 | {get_processed_prescriptions, 7}, 55 | {get_patient, 5}, 56 | {update_prescription, 4}, 57 | {update_prescription_medication, 4}, 58 | {get_prescription, 4} 59 | ]}. 60 | -------------------------------------------------------------------------------- /config/fmke.config: -------------------------------------------------------------------------------- 1 | %% List of IP addresses where FMKe should try to connect at boot time. 2 | %% NOTE: Must use one of the following structures: 3 | %% (1): ["192.168.1.1", "192.168.1.2"] 4 | %% (2): [{192,168,1,1} {192,168,1,2}] 5 | %% (3): "192.168.1.1 192.168.1.2" 6 | %% If you're connecting to multiple nodes on the same port, you can list all of the different addresses here and leave 7 | %% a single entry in the database_ports option (e.g. [8087]). 8 | {database_addresses, ["127.0.0.1"]}. 9 | 10 | %% List of ports where FMKe should try to connect at boot time. 11 | %% NOTE: Must use one of the following structures: 12 | %% (1): ["8080", "8081"] 13 | %% (2): [8080, 8081] 14 | %% (3): "8080 8081" 15 | %% If you're connecting to multiple nodes on the same address but on different ports, you can list all of the different 16 | %% ports here and leave a single entry in the database_addresses option (e.g. ["127.0.0.1"]). 17 | {database_ports, [8087]}. 18 | 19 | %% Target back end data store. This is required in order for FMKe to load the 20 | %% correct drivers to connect to your desired data store. 21 | %% Currently FMKe supports the following data stores: 22 | %% antidote, riak, redis 23 | %% Please select one of the previous values in the form of an erlang atom 24 | %% (e.g. riak) or string (e.g. "riak") 25 | {target_database, antidote}. 26 | 27 | %% Uses an optimized driver that implements the entire FMKe API (only if implemented). 28 | %% Currently these are available for antidote, riak and redis. You're welcome to implement a new one and submit a 29 | %% pull request! 30 | %% Predictably, this defaults to false, so you only need to specify this option when you want to use an optimized driver 31 | %% implementation. 32 | {optimized_driver, true}. 33 | 34 | %% Changes the data model, if the available drivers support it. 35 | %% For key value stores, we consider the possible values to be nested or non_nested, depending on whether the database 36 | %% keeps references to other objects, or copies of other objects that must be updated on each update to the original. 37 | %% The gen_fmke_kv_driver behaviour specifies the start/1 callback for all drivers implemented with this behaviour, and 38 | %% the argument to the start function is the value of this option. We recommend you also implement the gen_server 39 | %% behaviour and keep it in the state, or put it inside an ETS table (but you must check it's value for each relevant 40 | %% get or put operation). 41 | %% When implementing drivers it might make sense to look at how other previous ones were made and attempt to replicate 42 | %% the logic inside them. 43 | %% NOTE: Usually optimized drivers only support one data model (the one that yields the best performance), so please 44 | %% keep this line commented when measuring performance with optimized drivers. 45 | % {data_model, nested}. 46 | 47 | %% When FMKe connects to the database you choose it opens a pool of connections. 48 | %% This parameter configures the connection pool size. 49 | %% Please note that in deployments with connections to multiple back end nodes, 50 | %% the number of connections will be equally shared among all nodes 51 | %% Example: connecting FMKe to 2 nodes with a connection pool size of 30 will 52 | %% open 15 connections to each database node. 53 | {connection_pool_size, 30}. 54 | 55 | %% The port on which the FMKe HTTP server binds to. Running on a system 56 | %% reserved port (0-1023) will require superuser privileges. 57 | {http_port, 9090}. 58 | -------------------------------------------------------------------------------- /config/vars.config: -------------------------------------------------------------------------------- 1 | {node, "fmke@127.0.0.1"}. 2 | {cookie, "fmke"}. 3 | -------------------------------------------------------------------------------- /config/vars_test.config: -------------------------------------------------------------------------------- 1 | {node, "fmke_test@127.0.0.1"}. 2 | {cookie, "fmke"}. 3 | -------------------------------------------------------------------------------- /config/vm.args: -------------------------------------------------------------------------------- 1 | ## Name of the node 2 | -name ${NODE_NAME} 3 | 4 | ## Cookie for distributed erlang 5 | -setcookie ${COOKIE} 6 | 7 | ## Directories with compiled modules 8 | -pa ./_build/default/lib/*/ebin releases/${REL_VSN}/consolidated 9 | -------------------------------------------------------------------------------- /config/vm_test.args: -------------------------------------------------------------------------------- 1 | ## Name of the node 2 | -name fmke_test@127.0.0.1 3 | 4 | ## Cookie for distributed erlang 5 | -setcookie fmke 6 | 7 | ## Directories with compiled modules 8 | -pa ./_build/default/lib/*/ebin 9 | -------------------------------------------------------------------------------- /doc/FMK_DataModel.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/goncalotomas/FMKe/654d3211ef57d841540e58033a397ce0f3dee0f7/doc/FMK_DataModel.pdf -------------------------------------------------------------------------------- /elvis.config: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | elvis, 4 | [ 5 | {config, 6 | [#{dirs => ["src"], 7 | filter => "*.erl", 8 | % ignore => ["src/oc_trace_pb.erl"], 9 | rules => [{elvis_style, line_length, #{limit => 120, skip_comments => false}}, 10 | {elvis_style, no_tabs}, 11 | {elvis_style, no_trailing_whitespace}, 12 | {elvis_style, macro_names, #{ignore => []}}, 13 | {elvis_style, macro_module_names}, 14 | {elvis_style, operator_spaces, #{rules => [ 15 | {right, "++"}, {left, "++"}, {right, "--"}, {left, "--"} 16 | ]}}, 17 | {elvis_style, nesting_level, #{level => 4}}, 18 | {elvis_style, god_modules, #{limit => 30, ignore => []}}, 19 | {elvis_style, no_if_expression}, 20 | {elvis_style, invalid_dynamic_call, #{ignore => [ 21 | fmke, fmke_db_connection, fmke_kv_adapter 22 | ]}}, 23 | {elvis_style, used_ignored_variable}, 24 | {elvis_style, no_behavior_info}, 25 | {elvis_style, module_naming_convention, #{regex => "^[a-z]([a-z0-9]*_?)*(_SUITE)?$", ignore => []}}, 26 | {elvis_style, function_naming_convention, #{regex => "^([a-z][a-z0-9]*_?)*$"}}, 27 | % {elvis_style, state_record_and_type, disable}, 28 | {elvis_style, state_record_and_type}, 29 | {elvis_style, no_spec_with_records}, 30 | {elvis_style, dont_repeat_yourself, #{min_complexity => 40}}, 31 | {elvis_style, no_debug_call, #{ignore => [fmke_test_setup]}}], 32 | %% sequential reporter calls other reporters dynamically 33 | % {elvis_style, invalid_dynamic_call, #{ignore => [oc_sequential_reporter]}}], 34 | 35 | ruleset => erl_files 36 | }, 37 | #{dirs => ["test"], 38 | filter => "*.erl", 39 | rules => [{elvis_style, dont_repeat_yourself, #{min_complexity => 30}}, 40 | {elvis_style, line_length, #{limit => 120}}, 41 | %% be a bit more lax on naming for tests 42 | {elvis_style, variable_naming_convention, #{regex => "^_{0,2}([A-Z][0-9a-zA-Z]*)$"}}, 43 | {elvis_style, state_record_and_type, disable}, 44 | {elvis_style, no_debug_call, #{ignore => [fmke_test_setup]}}], 45 | ruleset => erl_files 46 | }, 47 | #{dirs => ["."], 48 | filter => "Makefile", 49 | rules => [{elvis_project, 50 | protocol_for_deps_erlang_mk, 51 | #{regex => "(https://.*|[0-9]+([.][0-9]+)*)"}}], 52 | ruleset => makefiles 53 | }, 54 | #{dirs => ["."], 55 | filter => "rebar.config", 56 | ruleset => rebar_config 57 | }, 58 | #{dirs => ["."], 59 | filter => "elvis.config", 60 | ruleset => elvis_config 61 | } 62 | ] 63 | } 64 | ] 65 | } 66 | ]. 67 | -------------------------------------------------------------------------------- /fmke-http-api.md: -------------------------------------------------------------------------------- 1 | # FMKe HTTP API 2 | 3 | FMKe has a total of 7 entities: `patient`, `facility`, `pharmacy`, `staff`, `event`, `prescription` and `treatment`. 4 | Once an FMK node is running, an HTTP server will be started on the port defined for the environment variable `HTTP_PORT`. 5 | **Important notes:** 6 | 7 | 1. The order of the parameters listed in each request matters since pattern matching imposes order on the list of body parameters that is passed in. 8 | 2. The casing (upper/lower case) for each of the parameters here listed matters. 9 | 10 | The HTTP endpoints that will be available are: 11 | 12 | ## Patients ```/patients[/:id]``` 13 | 14 | ### GET 15 | A patient with id `7` can be fetched by sending a GET request to `/patients/7`. 16 | ### POST 17 | It's possible to create a patient by sending a POST request to `/patients` with the following parameters: 18 | 19 | | Parameter | Type | 20 | | ------------- |:-------------:| 21 | | id | integer | 22 | | name | string | 23 | | address | string | 24 | ### PUT 25 | You can update an existing patient by sending a PUT request to `/patients/:patient_id`, where `:patient_id` is an ID of an already existent patient. You need to supply the following parameters: 26 | 27 | | Parameter | Type | 28 | | ------------- |:-------------:| 29 | | name | string | 30 | | address | string | 31 | 32 | ## Facilities ```/facilities[/:id]``` 33 | ### GET 34 | A facility with id `7` can be fetched by sending a GET request to `/facilities/7`. 35 | ### POST 36 | It's possible to create a facility by sending a POST request to `/facilities` with the following parameters: 37 | 38 | | Parameter | Type | 39 | | ------------- |:-------------:| 40 | | id | integer | 41 | | name | string | 42 | | address | string | 43 | | type | string | 44 | ### PUT 45 | You can update an existing facility by sending a PUT request to `/facilities/:facility_id`, where `:facility_id` is an ID of an already existent facility. You need to supply the following parameters: 46 | 47 | | Parameter | Type | 48 | | ------------- |:-------------:| 49 | | name | string | 50 | | address | string | 51 | | type | string | 52 | 53 | ## Pharmacies ```/pharmacies[/:id]``` 54 | ### GET 55 | A pharmacy with id `7` can be fetched by sending a GET request to `/pharmacies/7`. 56 | ### POST 57 | It's possible to create a pharmacy by sending a POST request to `/pharmacies` with the following parameters: 58 | 59 | | Parameter | Type | 60 | | ------------- |:-------------:| 61 | | id | integer | 62 | | name | string | 63 | | address | string | 64 | ### PUT 65 | You can update an existing pharmacy by sending a PUT request to `/pharmacies/:pharmacy_id`, where `:pharmacy_id` is an ID of an already existent pharmacy. You need to supply the following parameters: 66 | 67 | | Parameter | Type | 68 | | ------------- |:-------------:| 69 | | name | string | 70 | | address | string | 71 | 72 | ## Staff ```/staff[/:id]``` 73 | ### GET 74 | A staff member with id `7` can be fetched by sending a GET request to `/staff/7`. 75 | ### POST 76 | It's possible to create a staff member by sending a POST request to `/staff` with the following parameters: 77 | 78 | | Parameter | Type | 79 | | ------------- |:-------------:| 80 | | id | integer | 81 | | name | string | 82 | | address | string | 83 | | speciality | string | 84 | 85 | ### PUT 86 | You can update an existing staff member by sending a PUT request to `/staff/:staff_id`, where `:staff_id` is an ID of an already existent staff member. You need to supply the following parameters: 87 | 88 | | Parameter | Type | 89 | | ------------- |:-------------:| 90 | | name | string | 91 | | address | string | 92 | | speciality | string | 93 | 94 | ## Events ```/events[/:id]``` 95 | ### GET 96 | An event with id `7` can be fetched by sending a GET request to `/events/7`. 97 | ### POST 98 | It's possible to create an event by sending a POST request to `/events` with the following parameters: 99 | 100 | | Parameter | Type | 101 | | ------------- |:-------------:| 102 | | id | integer | 103 | | treatment_id | integer | 104 | | staff_id | integer | 105 | | timestamp | string | 106 | | description | string | 107 | 108 | ### PUT 109 | Events cannot currently be updated. 110 | 111 | ## Prescriptions ```/prescriptions[/:id]``` 112 | ### GET 113 | A prescription with id `7` can be fetched by sending a GET request to `/prescriptions/7`. 114 | ### POST 115 | It's possible to create a prescription by sending a POST request to `/prescriptions` with the following parameters: 116 | 117 | | Parameter | Type | 118 | | ------------- |:-------------:| 119 | | id | integer | 120 | | patient_id | integer | 121 | | prescriber_id | integer | 122 | | pharmacy_id | integer | 123 | | facility_id | integer | 124 | | date_prescribed | string | 125 | | drugs | string | 126 | 127 | **Important note:** `drugs` is a String made up of comma separated values (CSV) for each drug in the prescription. 128 | 129 | ### PUT 130 | You can update an existing staff member by sending a PUT request to `/prescriptions/:prescription_id`, where `:prescription_id` is an ID of an already existent prescription. You need to supply the following parameters: 131 | 132 | | Parameter | Type | 133 | | ------------- |:-------------:| 134 | | date_processed| string | 135 | | drugs | string | 136 | 137 | **Important note:** `drugs` is a String made up of comma separated values (CSV) for each drug in the prescription. 138 | Despite both parameters having to be present in order for it to be considered a valid PUT request, the system will behave in the following way: 139 | 140 | - If `date_processed` is set to a value, it will assume that you're trying to process the prescription. 141 | - If `date_processed` is empty and `drugs` is not, it will add all CSV to as new drugs of the prescription. 142 | 143 | ## Treatments ```/treatments[/:id]``` 144 | ### GET 145 | A treatment with id `7` can be fetched by sending a GET request to `/treatments/7`. 146 | ### POST 147 | It's possible to create a treatment by sending a POST request to `/treatments` with the following parameters: 148 | 149 | | Parameter | Type | 150 | | ------------- |:-------------:| 151 | | id | integer | 152 | | patient_id | integer | 153 | | prescriber_id | integer | 154 | | facility_id | integer | 155 | | date_prescribed | string | 156 | 157 | ### PUT 158 | Treatments cannot currently be updated. 159 | -------------------------------------------------------------------------------- /include/fmke.hrl: -------------------------------------------------------------------------------- 1 | -define(APP, fmke). 2 | -define(OPTIONS, [ 3 | target_database, connection_pool_size, database_addresses, database_ports, http_port, driver 4 | ]). 5 | -define(DEFAULTS, #{ 6 | connection_pool_size => 64, 7 | database_addresses => ["127.0.0.1"], 8 | http_port => 9090, 9 | data_model => non_nested 10 | }). 11 | -define(DEFAULT(Opt), maps:get(Opt, ?DEFAULTS, undefined)). 12 | 13 | -define(TIMEOUT, 60000). 14 | 15 | -define(CONFIG_FILE_PATH, "/config/fmke.config"). 16 | -define(ETS_TABLE_NAME, fmke_ets). 17 | 18 | %% TODO move this to an ETS table 19 | -define(SUPPORTED_DBS, [antidote, antidote_norm, riak_kv, riak_kv_norm, redis]). 20 | -define(SUPPORTED_KVS, [antidote, antidote_norm, riak_kv, riak_kv_norm, redis]). 21 | 22 | -record(prescription, { 23 | id :: id() | binary() 24 | ,patient_id :: id() | binary() 25 | ,pharmacy_id :: id() | binary() 26 | ,prescriber_id :: id() | binary() 27 | ,date_prescribed :: field() 28 | ,date_processed = <<"undefined">> :: field() 29 | ,drugs :: list(string() | binary()) 30 | ,is_processed = <<"prescription_not_processed">> :: field() 31 | }). 32 | 33 | -record(patient, { 34 | id :: id() 35 | ,name :: string() 36 | ,address :: string() 37 | ,prescriptions = [] :: list(#prescription{} | key()) 38 | % ,treatments=[] :: list(#treatment{}) 39 | % ,events=[] :: list(#event{}) 40 | }). 41 | 42 | -record(pharmacy, { 43 | id :: id() 44 | ,name :: string() 45 | ,address :: string() 46 | ,prescriptions = [] :: list(#prescription{} | key()) 47 | }). 48 | 49 | -record(facility, { 50 | id :: id() 51 | ,name :: string() 52 | ,address :: string() 53 | ,type :: string() 54 | % ,treatments=[] :: list(#treatment{}) 55 | % ,events=[] :: list(#event{}) 56 | }). 57 | 58 | -record(staff, { 59 | id :: id() 60 | ,name :: string() 61 | ,address :: string() 62 | ,speciality :: string() 63 | ,prescriptions = [] :: list(#prescription{} | key()) 64 | }). 65 | 66 | -type id() :: non_neg_integer(). 67 | -type field() :: binary(). 68 | -type reason() :: term(). 69 | -type crdt() :: term(). 70 | -type key() :: binary(). 71 | -type app_record() :: #facility{} | 72 | #patient{} | 73 | #pharmacy{} | 74 | #prescription{} | 75 | #staff{} | 76 | list(key()). 77 | 78 | -type entity() :: facility | patient | pharmacy | prescription | staff. 79 | 80 | -type facility() :: #facility{}. 81 | -type patient() :: #patient{}. 82 | -type pharmacy() :: #pharmacy{}. 83 | -type prescription() :: #prescription{}. 84 | -type staff() :: #staff{}. 85 | -------------------------------------------------------------------------------- /include/fmke_http.hrl: -------------------------------------------------------------------------------- 1 | -define (ERR_MISSING_BODY, <<"Missing request body or incomplete list of fields.">>). 2 | -define (ERR_BODY_IN_A_GET_REQUEST, <<"Get requests with body don't make sense.">>). 3 | -define (ERR_INVALID_PATIENT_ID, <<"Invalid user id.">>). 4 | -define (ERR_INVALID_FACILITY_ID, <<"Invalid facility id.">>). 5 | -define (ERR_INVALID_PHARMACY_ID, <<"Invalid pharmacy id.">>). 6 | -define (ERR_INVALID_PRESCRIPTION_ID, <<"Invalid prescription id.">>). 7 | -define (ERR_INVALID_TREATMENT_ID, <<"Invalid treatment id.">>). 8 | -define (ERR_INVALID_STAFF_ID, <<"Invalid staff member id.">>). 9 | -define (ERR_INVALID_EVENT_ID, <<"Invalid event id.">>). 10 | -define (MIN_ID, 0). 11 | -define (BINDING_PATIENT_ID, id). 12 | -define (BINDING_FACILITY_ID, id). 13 | -define (BINDING_EVENT_ID, id). 14 | -define (BINDING_PRESCRIPTION_ID, id). 15 | -define (BINDING_PHARMACY_ID, id). 16 | -define (BINDING_TREATMENT_ID, id). 17 | -define (BINDING_STAFF_ID, id). 18 | -define (CONT_TYPE_JSON, #{<<"content-type">> => <<"application/json">>}). 19 | -define (ENCODE_FAIL(Reason), jsx:encode([{success,false},{result,Reason}])). 20 | -define (ENCODE_RESPONSE(Success,Result), jsx:encode([{success,Success},{result,Result}])). 21 | -define (ENCODE_SRV_ERR, ?ENCODE_FAIL(<<"Woops! This request failed. That's all we know!">>)). 22 | -define (ENCODE_SUCCESS(Result), jsx:encode([{success,true},{result,Result}])). 23 | -define (REPLY_WITH_SRV_ERR(Req), cowboy_req:reply(500, ?CONT_TYPE_JSON, ?ENCODE_SRV_ERR, Req)). 24 | -define (REPLY_WITH_MISS_BODY(Req), cowboy_req:reply(400, ?CONT_TYPE_JSON, ?ENCODE_FAIL("Missing HTTP body!"), Req)). 25 | -define (REPLY_WITH_FAIL(Req,Reason), cowboy_req:reply(400, ?CONT_TYPE_JSON, ?ENCODE_FAIL(Reason), Req)). 26 | -define (REPLY_WITH_OK(Req,Success,Result), cowboy_req:reply(200, ?CONT_TYPE_JSON, ?ENCODE_RESPONSE(Success,Result), Req)). 27 | -------------------------------------------------------------------------------- /include/fmke_kv.hrl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% @author goncalotomas 3 | %%% @copyright (C) 2017, 4 | %%% @doc 5 | %%% 6 | %%% @end 7 | %%% Created : 15. Mar 2017 11:18 8 | %%%------------------------------------------------------------------- 9 | -author("goncalotomas"). 10 | 11 | %% FMKe entity keys 12 | -define (PATIENT_ID_KEY, <<"patient_id">>). 13 | -define (PATIENT_NAME_KEY, <<"patient_name">>). 14 | -define (PATIENT_ADDRESS_KEY, <<"patient_address">>). 15 | -define (PATIENT_TREATMENTS_KEY, <<"patient_treatments">>). 16 | -define (PATIENT_PRESCRIPTIONS_KEY, <<"patient_prescriptions">>). 17 | -define (PATIENT_EVENTS_KEY, <<"patient_events">>). 18 | 19 | %% Pharmacy macros 20 | -define (PHARMACY_ID_KEY, <<"pharmacy_id">>). 21 | -define (PHARMACY_NAME_KEY, <<"pharmacy_name">>). 22 | -define (PHARMACY_ADDRESS_KEY, <<"pharmacy_address">>). 23 | -define (PHARMACY_PRESCRIPTIONS_KEY, <<"pharmacy_prescriptions">>). 24 | 25 | %% Prescription macros 26 | -define (PRESCRIPTION_ID_KEY, <<"prescription_id">>). 27 | -define (PRESCRIPTION_PATIENT_ID_KEY, <<"prescription_patient_id">>). 28 | -define (PRESCRIPTION_PRESCRIBER_ID_KEY, <<"prescription_prescriber_id">>). 29 | -define (PRESCRIPTION_PHARMACY_ID_KEY, <<"prescription_pharmacy_id">>). 30 | -define (PRESCRIPTION_FACILITY_ID_KEY, <<"prescription_facility_id">>). 31 | -define (PRESCRIPTION_DATE_PRESCRIBED_KEY, <<"prescription_date_prescribed">>). 32 | -define (PRESCRIPTION_IS_PROCESSED_KEY, <<"prescription_is_processed">>). 33 | -define (PRESCRIPTION_DATE_PROCESSED_KEY, <<"prescription_date_processed">>). 34 | -define (PRESCRIPTION_DRUGS_KEY, <<"prescription_drugs">>). 35 | -define (PRESCRIPTION_NOT_PROCESSED_VALUE, <<"prescription_not_processed">>). 36 | -define (PRESCRIPTION_PROCESSED_VALUE, <<"prescription_processed">>). 37 | 38 | %% Treatment macros 39 | -define (TREATMENT_ID_KEY, <<"treatment_id">>). 40 | -define (TREATMENT_PATIENT_ID_KEY, <<"treatment_patient_id">>). 41 | -define (TREATMENT_PRESCRIBER_ID_KEY, <<"treatment_prescriber_id">>). 42 | -define (TREATMENT_FACILITY_ID_KEY, <<"treatment_facility_id">>). 43 | -define (TREATMENT_DATE_PRESCRIBED_KEY, <<"treatment_date_prescribed">>). 44 | -define (TREATMENT_HAS_ENDED_KEY, <<"treatment_has_ended">>). 45 | -define (TREATMENT_DATE_ENDED_KEY, <<"treatment_date_ended">>). 46 | -define (TREATMENT_PRESCRIPTIONS_KEY, <<"treatment_prescriptions">>). 47 | -define (TREATMENT_EVENTS_KEY, <<"treatment_events">>). 48 | -define (TREATMENT_ONGOING_KEY, "ongoing_treatment"). 49 | -define (TREATMENT_ENDED_KEY, "finished_treatment"). 50 | 51 | %% Medical Staff macros 52 | -define (STAFF_ID_KEY, <<"staff_id">>). 53 | -define (STAFF_NAME_KEY, <<"staff_name">>). 54 | -define (STAFF_ADDRESS_KEY, <<"staff_address">>). 55 | -define (STAFF_SPECIALITY_KEY, <<"staff_speciality">>). 56 | -define (STAFF_PRESCRIPTIONS_KEY, <<"staff_prescriptions">>). 57 | -define (STAFF_TREATMENTS_KEY, <<"staff_treatments">>). 58 | 59 | %% Facility macros 60 | -define (FACILITY_ID_KEY, <<"facility_id">>). 61 | -define (FACILITY_NAME_KEY, <<"facility_name">>). 62 | -define (FACILITY_ADDRESS_KEY, <<"facility_address">>). 63 | -define (FACILITY_TYPE_KEY, <<"facility_type">>). 64 | -define (FACILITY_PRESCRIPTIONS_KEY, <<"facility_prescriptions">>). 65 | -define (FACILITY_TREATMENTS_KEY, <<"facility_treatments">>). 66 | 67 | %% Event macros 68 | -define (EVENT_ID_KEY, <<"event_id">>). 69 | -define (EVENT_PATIENT_ID_KEY, <<"event_patient_id">>). 70 | -define (EVENT_DESCRIPTION_KEY, <<"event_description">>). 71 | -define (EVENT_TIMESTAMP_KEY, <<"event_timestamp">>). 72 | -define (EVENT_STAFF_ID_KEY, <<"event_staff_id">>). -------------------------------------------------------------------------------- /priv/build_schema.aql: -------------------------------------------------------------------------------- 1 | --------------------------------------------------------------- 2 | -- Database schema used when benchmarking AQL. Based on the 3 | -- schema used for cassandra, see build_schema.cql. 4 | -- 5 | -- AQL is an SQL-like interface for the AntidoteDB data store. 6 | -- For more information and documentation see AQL's repository: 7 | -- https://github.com/mrshankly/secure-aql 8 | --------------------------------------------------------------- 9 | 10 | CREATE UPDATE-WINS TABLE FmkePatients ( 11 | ID int PRIMARY KEY, 12 | Name varchar, 13 | Address varchar 14 | ); 15 | 16 | CREATE UPDATE-WINS TABLE FmkePharmacies ( 17 | ID int PRIMARY KEY, 18 | Name varchar, 19 | Address varchar 20 | ); 21 | 22 | CREATE UPDATE-WINS TABLE FmkeMedicalStaff ( 23 | ID int PRIMARY KEY, 24 | Name varchar, 25 | Address varchar, 26 | Speciality varchar 27 | ); 28 | 29 | CREATE UPDATE-WINS TABLE FmkeTreatmentFacilities ( 30 | ID int PRIMARY KEY, 31 | Name varchar, 32 | Address varchar, 33 | Type varchar 34 | ); 35 | 36 | CREATE UPDATE-WINS TABLE FmkePrescriptions ( 37 | ID int PRIMARY KEY, 38 | PatID int, 39 | DocID int, 40 | PharmID int, 41 | DatePrescribed varchar, 42 | DateProcessed varchar 43 | ); 44 | 45 | CREATE UPDATE-WINS TABLE FmkePatientPrescriptions ( 46 | ID int PRIMARY KEY, 47 | PatientID int, 48 | PrescriptionID int 49 | ); 50 | CREATE INDEX FmkePatientPrescriptionsPatientIdx ON FmkePatientPrescriptions (PatientID); 51 | 52 | CREATE UPDATE-WINS TABLE FmkePharmacyPrescriptions ( 53 | ID int PRIMARY KEY, 54 | PharmacyID int, 55 | PrescriptionID int 56 | ); 57 | CREATE INDEX FmkePharmacyPrescriptionsPharmacyIdx ON FmkePharmacyPrescriptions (PharmacyID); 58 | 59 | CREATE UPDATE-WINS TABLE FmkeStaffPrescriptions ( 60 | ID int PRIMARY KEY, 61 | StaffID int, 62 | PrescriptionID int 63 | ); 64 | CREATE INDEX FmkeStaffPrescriptionsStaffIdx ON FmkeStaffPrescriptions (StaffID); 65 | 66 | CREATE UPDATE-WINS TABLE FmkePrescriptionDrugs ( 67 | ID int PRIMARY KEY, 68 | PrescriptionID int, 69 | Drug varchar 70 | ); 71 | CREATE INDEX FmkePrescriptionDrugsPrescriptionIdx ON FmkePrescriptionDrugs (PrescriptionID); 72 | -------------------------------------------------------------------------------- /priv/build_schema.cql: -------------------------------------------------------------------------------- 1 | CREATE KEYSPACE IF NOT EXISTS fmke 2 | WITH REPLICATION = { 3 | 'class': 'SimpleStrategy', 4 | 'replication_factor': 1 5 | }; 6 | 7 | CREATE TABLE IF NOT EXISTS fmke.patients ( 8 | ID int PRIMARY KEY, 9 | Name text, 10 | Address text, 11 | ); 12 | 13 | CREATE TABLE IF NOT EXISTS fmke.pharmacies ( 14 | ID int PRIMARY KEY, 15 | Name text, 16 | Address text, 17 | ); 18 | 19 | CREATE TABLE IF NOT EXISTS fmke.medical_staff ( 20 | ID int PRIMARY KEY, 21 | Name text, 22 | Address text, 23 | Speciality text, 24 | ); 25 | 26 | CREATE TABLE IF NOT EXISTS fmke.treatment_facilities ( 27 | ID int PRIMARY KEY, 28 | Name text, 29 | Address text, 30 | Type text, 31 | ); 32 | 33 | CREATE TABLE IF NOT EXISTS fmke.prescriptions ( 34 | ID int, 35 | PatID int, 36 | DocID int, 37 | PharmID int, 38 | DatePrescribed timestamp, 39 | DateProcessed timestamp, 40 | PRIMARY KEY (ID) 41 | ); 42 | 43 | CREATE TABLE IF NOT EXISTS fmke.patient_prescriptions ( 44 | PatientID int, 45 | PrescriptionID int, 46 | PRIMARY KEY (PatientID, PrescriptionID) 47 | ); 48 | 49 | CREATE TABLE IF NOT EXISTS fmke.pharmacy_prescriptions ( 50 | PharmacyID int, 51 | PrescriptionID int, 52 | PRIMARY KEY (PharmacyID, PrescriptionID) 53 | ); 54 | 55 | CREATE TABLE IF NOT EXISTS fmke.staff_prescriptions ( 56 | StaffID int, 57 | PrescriptionID int, 58 | PRIMARY KEY (StaffID, PrescriptionID) 59 | ); 60 | 61 | CREATE TABLE IF NOT EXISTS fmke.prescription_drugs ( 62 | PrescriptionID int, 63 | Drug text, 64 | PRIMARY KEY (PrescriptionID, Drug) 65 | ); 66 | 67 | exit 68 | -------------------------------------------------------------------------------- /priv/build_schema_fk.aql: -------------------------------------------------------------------------------- 1 | --------------------------------------------------------------- 2 | -- Database schema used when benchmarking AQL. Based on the 3 | -- schema used for cassandra, see build_schema.cql. 4 | -- 5 | -- This schema version uses foreign keys and the referential 6 | -- integrity mechanism of AQL. 7 | -- 8 | -- AQL is an SQL-like interface for the AntidoteDB data store. 9 | -- For more information and documentation see AQL's repository: 10 | -- https://github.com/mrshankly/secure-aql 11 | --------------------------------------------------------------- 12 | 13 | CREATE UPDATE-WINS TABLE FmkePatients ( 14 | ID int PRIMARY KEY, 15 | Name varchar, 16 | Address varchar 17 | ); 18 | 19 | CREATE UPDATE-WINS TABLE FmkePharmacies ( 20 | ID int PRIMARY KEY, 21 | Name varchar, 22 | Address varchar 23 | ); 24 | 25 | CREATE UPDATE-WINS TABLE FmkeMedicalStaff ( 26 | ID int PRIMARY KEY, 27 | Name varchar, 28 | Address varchar, 29 | Speciality varchar 30 | ); 31 | 32 | CREATE UPDATE-WINS TABLE FmkeTreatmentFacilities ( 33 | ID int PRIMARY KEY, 34 | Name varchar, 35 | Address varchar, 36 | Type varchar 37 | ); 38 | 39 | CREATE UPDATE-WINS TABLE FmkePrescriptions ( 40 | ID int PRIMARY KEY, 41 | PatID int FOREIGN KEY UPDATE-WINS REFERENCES FmkePatients(ID), 42 | DocID int FOREIGN KEY UPDATE-WINS REFERENCES FmkeMedicalStaff(ID), 43 | PharmID int FOREIGN KEY UPDATE-WINS REFERENCES FmkePharmacies(ID), 44 | DatePrescribed varchar, 45 | DateProcessed varchar 46 | ); 47 | 48 | CREATE UPDATE-WINS TABLE FmkePatientPrescriptions ( 49 | ID int PRIMARY KEY, 50 | PatientID int FOREIGN KEY UPDATE-WINS REFERENCES FmkePatients(ID), 51 | PrescriptionID int FOREIGN KEY UPDATE-WINS REFERENCES FmkePrescriptions(ID) 52 | ); 53 | CREATE INDEX FmkePatientPrescriptionsPatientIdx ON FmkePatientPrescriptions (PatientID); 54 | 55 | CREATE UPDATE-WINS TABLE FmkePharmacyPrescriptions ( 56 | ID int PRIMARY KEY, 57 | PharmacyID int FOREIGN KEY UPDATE-WINS REFERENCES FmkePharmacies(ID), 58 | PrescriptionID int FOREIGN KEY UPDATE-WINS REFERENCES FmkePrescriptions(ID) 59 | ); 60 | CREATE INDEX FmkePharmacyPrescriptionsPharmacyIdx ON FmkePharmacyPrescriptions (PharmacyID); 61 | 62 | CREATE UPDATE-WINS TABLE FmkeStaffPrescriptions ( 63 | ID int PRIMARY KEY, 64 | StaffID int FOREIGN KEY UPDATE-WINS REFERENCES FmkeMedicalStaff(ID), 65 | PrescriptionID int FOREIGN KEY UPDATE-WINS REFERENCES FmkePrescriptions(ID) 66 | ); 67 | CREATE INDEX FmkeStaffPrescriptionsStaffIdx ON FmkeStaffPrescriptions (StaffID); 68 | 69 | CREATE UPDATE-WINS TABLE FmkePrescriptionDrugs ( 70 | ID int PRIMARY KEY, 71 | PrescriptionID int FOREIGN KEY UPDATE-WINS REFERENCES FmkePrescriptions(ID), 72 | Drug varchar 73 | ); 74 | CREATE INDEX FmkePrescriptionDrugsPrescriptionIdx ON FmkePrescriptionDrugs (PrescriptionID); 75 | -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | {deps, [ 2 | {jsx, "~>2.9"}, 3 | {lager, "~>3.6"}, 4 | {poolboy, "~>1.5"}, 5 | {cowboy, "~>2.4"}, 6 | %% AntidoteDB 7 | {antidotec_pb, "~>0.2"}, 8 | %% Cassandra 9 | {erlcass, "~>3.2"}, 10 | %% Redis Cluster 11 | {eredis_cluster, {git, "https://github.com/adrienmo/eredis_cluster", {tag, "0.5.12"}}}, 12 | %% Riak KV 13 | {riak_client, "~>2.5"}, 14 | %% AQL client 15 | {aqlc, "~>1.0"} 16 | ]}. 17 | 18 | 19 | {eunit_opts, [ 20 | verbose, 21 | {report, {eunit_surefire, [{dir,"."}]}} 22 | ]}. 23 | 24 | {erl_opts, [ 25 | debug_info, 26 | warn_untyped_record, 27 | warnings_as_errors, 28 | {platform_define, "^[0-9]+", namespaced_types}, 29 | {parse_transform, lager_transform} 30 | ]}. 31 | 32 | {plugins, [coveralls]}. 33 | {cover_enabled, true}. 34 | {cover_export_enabled, true}. 35 | {coveralls_coverdata, "_build/test/cover/*.coverdata"}. 36 | {coveralls_service_name, "travis-ci"}. 37 | 38 | {profiles, [ 39 | {prod, [ 40 | {erl_opts, [no_debug_info, warnings_as_errors]}, 41 | {relx, [{dev_mode, false}]} 42 | ]}, 43 | {lint, [ 44 | {plugins, [ 45 | {rebar3_lint, {git, "https://github.com/project-fifo/rebar3_lint.git", {tag, "v0.1.9"}}} 46 | ]} 47 | ]}, 48 | {test, [ 49 | {deps, [ 50 | {rand_str, "~>1.0"}, 51 | {cmd, "~>1.0"} 52 | ]} 53 | ]} 54 | ]}. 55 | 56 | {relx, [{release, {fmke, "0.1.0"}, [fmke], [ 57 | {vm_args, "config/vm.args"}, 58 | {dev_mode, false}, 59 | {include_erts, true}, 60 | {overlay, [ 61 | {copy, "bin/env", "bin"}, 62 | {copy, "config/fmke.config", "config/fmke.config"} 63 | ]}, 64 | {overlay_vars, "config/vars.config"}, 65 | {extended_start_script, true}]} 66 | ]}. 67 | 68 | {xref_checks, [ 69 | deprecated_function_calls, deprecated_functions, undefined_function_calls, undefined_functions 70 | ]}. 71 | 72 | {dist_node, [ 73 | {name, 'fmke@127.0.0.1'}, 74 | {setcookie, fmke} 75 | ]}. 76 | -------------------------------------------------------------------------------- /rebar.config.script: -------------------------------------------------------------------------------- 1 | case os:getenv("TRAVIS") of 2 | "true" -> 3 | lists:keystore(coveralls_service_job_id, 1, CONFIG, {coveralls_service_job_id, os:getenv("TRAVIS_JOB_ID")}); 4 | _ -> 5 | CONFIG 6 | end. 7 | -------------------------------------------------------------------------------- /rebar.lock: -------------------------------------------------------------------------------- 1 | {"1.2.0", 2 | [{<<"antidote_pb_codec">>,{pkg,<<"antidote_pb_codec">>,<<"0.1.1">>},1}, 3 | {<<"antidotec_pb">>,{pkg,<<"antidotec_pb">>,<<"0.2.8">>},0}, 4 | {<<"aqlc">>,{pkg,<<"aqlc">>,<<"1.0.2">>},0}, 5 | {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.6.3">>},0}, 6 | {<<"cowlib">>,{pkg,<<"cowlib">>,<<"2.7.3">>},1}, 7 | {<<"eredis">>,{pkg,<<"eredis">>,<<"1.2.0">>},1}, 8 | {<<"eredis_cluster">>, 9 | {git,"https://github.com/adrienmo/eredis_cluster", 10 | {ref,"69dd345867bd14353890c559ed07820e6c001ef2"}}, 11 | 0}, 12 | {<<"erlcass">>,{pkg,<<"erlcass">>,<<"3.2.4">>},0}, 13 | {<<"goldrush">>,{pkg,<<"goldrush">>,<<"0.1.9">>},1}, 14 | {<<"hamcrest">>,{pkg,<<"basho_hamcrest">>,<<"0.4.1">>},2}, 15 | {<<"jsx">>,{pkg,<<"jsx">>,<<"2.10.0">>},0}, 16 | {<<"lager">>,{pkg,<<"lager">>,<<"3.7.0">>},0}, 17 | {<<"paillier">>,{pkg,<<"paillier">>,<<"1.0.0">>},1}, 18 | {<<"poolboy">>,{pkg,<<"poolboy">>,<<"1.5.2">>},0}, 19 | {<<"ranch">>,{pkg,<<"ranch">>,<<"1.7.1">>},1}, 20 | {<<"riak_client">>,{pkg,<<"riak_client">>,<<"2.5.3">>},0}, 21 | {<<"riak_pb">>,{pkg,<<"riak_pb">>,<<"2.3.2">>},1}]}. 22 | [ 23 | {pkg_hash,[ 24 | {<<"antidote_pb_codec">>, <<"29891F77FB5DC8240495C425160EA152E9CA5753B7137E1866D9BA279F0F997B">>}, 25 | {<<"antidotec_pb">>, <<"70350652AE269BC561DA6DCC54DB6A1CFF203E01686E735C1341EFD925DEE598">>}, 26 | {<<"aqlc">>, <<"65299AB2878126EBE582DC52D6FE6044C03B24141ED0A89079DEADF71F114B81">>}, 27 | {<<"cowboy">>, <<"99AA50E94E685557CAD82E704457336A453D4ABCB77839AD22DBE71F311FCC06">>}, 28 | {<<"cowlib">>, <<"A7FFCD0917E6D50B4D5FB28E9E2085A0CEB3C97DEA310505F7460FF5ED764CE9">>}, 29 | {<<"eredis">>, <<"0B8E9CFC2C00FA1374CD107EA63B49BE08D933DF2CF175E6A89B73DD9C380DE4">>}, 30 | {<<"erlcass">>, <<"BE383601A8F29CE645B428BC25D0F5E9F490630811D4BD5C1DE8A7AD6C6AB04B">>}, 31 | {<<"goldrush">>, <<"F06E5D5F1277DA5C413E84D5A2924174182FB108DABB39D5EC548B27424CD106">>}, 32 | {<<"hamcrest">>, <<"FB7B2C92D252A1E9DB936750B86089ADDAEBEB8F87967FB4BBDDA61E8863338E">>}, 33 | {<<"jsx">>, <<"77760560D6AC2B8C51FD4C980E9E19B784016AA70BE354CE746472C33BEB0B1C">>}, 34 | {<<"lager">>, <<"563AB17CD32134A3DD17EC3B3622E6D8F827506AA4F8C489158879BED87D980B">>}, 35 | {<<"paillier">>, <<"65295A561BD3379961563B4F1E1EC41096139EF6E701AB61BF0D117B009C7DE4">>}, 36 | {<<"poolboy">>, <<"392B007A1693A64540CEAD79830443ABF5762F5D30CF50BC95CB2C1AAAFA006B">>}, 37 | {<<"ranch">>, <<"6B1FAB51B49196860B733A49C07604465A47BDB78AA10C1C16A3D199F7F8C881">>}, 38 | {<<"riak_client">>, <<"6B78373E576D80C7020BCBDA3B2C102221D32863580404F59D8791E5CDC19E79">>}, 39 | {<<"riak_pb">>, <<"48FFBF66DBB3F136AB9A7134BAC4E496754BAA5EF58C4F50A61326736D996390">>}]}, 40 | {pkg_hash_ext,[ 41 | {<<"antidote_pb_codec">>, <<"F5FE77DBBDFDAD0B01613C72EF1AE96B49BE8AA15F0A4F3B5B6B01727FFD677C">>}, 42 | {<<"antidotec_pb">>, <<"1EE55D1187EC37A7840845F11C2AA349AA82C998E301A9EC8744774D0D57909B">>}, 43 | {<<"aqlc">>, <<"8FBF65EB08A6A584D547437723F61D8A3BB62E00DC70681828680467EB95A929">>}, 44 | {<<"cowboy">>, <<"E5580029080F3F1AD17436FB97B0D5ED2ED4E4815A96BAC36B5A992E20F58DB6">>}, 45 | {<<"cowlib">>, <<"1E1A3D176D52DAEBBECBBCDFD27C27726076567905C2A9D7398C54DA9D225761">>}, 46 | {<<"eredis">>, <<"D9B5ABEF2C2C8ABA8F32AA018203E0B3DC8B1157773B254AB1D4C2002317F1E1">>}, 47 | {<<"erlcass">>, <<"E9F16196872FB6E46AD829FFB365E0EADE67B3D4E6D05B78D9A002BFC53654D9">>}, 48 | {<<"goldrush">>, <<"99CB4128CFFCB3227581E5D4D803D5413FA643F4EB96523F77D9E6937D994CEB">>}, 49 | {<<"hamcrest">>, <<"26974025BC61BC09EF5B13BE5DCE5035CA11BF37BF4A865E9D86C455C942298F">>}, 50 | {<<"jsx">>, <<"9A83E3704807298016968DB506F9FAD0F027DE37546EB838B3AE1064C3A0AD62">>}, 51 | {<<"lager">>, <<"97DC7E1C9E7289C3167F417E71FFE1DF28218537967E076800ECEDB1C28C9E48">>}, 52 | {<<"paillier">>, <<"C4C91422E4EE60152717D0EC0D30E77EE3502F4ED73CDB01A24ACE6650F2333D">>}, 53 | {<<"poolboy">>, <<"DAD79704CE5440F3D5A3681C8590B9DC25D1A561E8F5A9C995281012860901E3">>}, 54 | {<<"ranch">>, <<"451D8527787DF716D99DC36162FCA05934915DB0B6141BBDAC2EA8D3C7AFC7D7">>}, 55 | {<<"riak_client">>, <<"B792DB3CF207BE19778EE1E3F1DC37EA14A55DF8F9B148D19E9A095661A5DACC">>}, 56 | {<<"riak_pb">>, <<"7C0644BCBA8A423DD9C1AB85B6E7FF4DF0712D8A350F9698647CFF1996F0348C">>}]} 57 | ]. 58 | -------------------------------------------------------------------------------- /scripts/aws/1-setup-vms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # author goncalotomas 3 | # This script prepares several amazon virtual machines for later use. 4 | # You should pass in a list of (public) IP addresses as arguments to the script, 5 | # as well as the following environment variables: 6 | # PRIVATEKEY: used to ssh into the amazon virtual machines. Every machine is 7 | # assumed to be accessible using one key. 8 | 9 | set -e # Any subsequent(*) commands which fail will cause the shell script 10 | # to exit immediately 11 | 12 | # args checking 13 | if [[ $# -lt 2 ]]; then 14 | echo "Error: usage $0 ..." 15 | exit 2 16 | fi; 17 | 18 | if [[ ! -e $1 ]]; then 19 | echo "Error: $1: no such file" 20 | exit 2 21 | fi; 22 | 23 | # env 24 | PRIVATEKEY=$1 25 | KEY_FILE_NAME=$(basename $PRIVATEKEY) 26 | 27 | IP_ADDR_LIST=$(echo $* | cut -d' ' -f2-) 28 | 29 | INSTALL_SOFTWARE_SCRIPT="./src/bin/worker-setup-machine.sh" 30 | REMOTE_INSTALL_SOFTWARE_SCRIPT="~/worker-setup-machine.sh" 31 | 32 | SSH_USERNAME=ubuntu 33 | SSH_OPTIONS="-i $PRIVATEKEY -o StrictHostKeyChecking=no" 34 | 35 | echo "[SCRIPT] RUNNING SCRIPT TO SETUP MULTIPLE REMOTE MACHINES..." 36 | 37 | # copy scripts to remote machines and add execute permission 38 | echo "[SCRIPT] 1/2: COPYING REQUIRED SCRIPTS TO REMOTE MACHINES..." 39 | for IP_ADDR in $IP_ADDR_LIST; do 40 | scp $SSH_OPTIONS $INSTALL_SOFTWARE_SCRIPT $SSH_USERNAME@$IP_ADDR:$REMOTE_INSTALL_SOFTWARE_SCRIPT 41 | ssh $SSH_OPTIONS $SSH_USERNAME@$IP_ADDR chmod u+x $REMOTE_INSTALL_SOFTWARE_SCRIPT 42 | done 43 | 44 | # install required software on remote machines 45 | echo "[SCRIPT] 2/2: INSTALLING NECESSARY SOFTWARE AND REPOSITORIES ON REMOTE MACHINES..." 46 | for IP_ADDR in $IP_ADDR_LIST; do 47 | ssh $SSH_OPTIONS $SSH_USERNAME@$IP_ADDR nohup $REMOTE_INSTALL_SOFTWARE_SCRIPT & 48 | pids="$pids $!" 49 | done 50 | 51 | echo "[SCRIPT] Waiting for SSH processes to finish their work..." 52 | for pid in $pids; do 53 | wait $pid || let "RESULT=1" 54 | done 55 | 56 | if [ "$RESULT" == "1" ]; then 57 | echo "[SCRIPT] Something went wrong in installing all the software!" 58 | exit 1 59 | else 60 | echo "[SCRIPT] Done. All remote machines have the required software stack and repositories." 61 | fi 62 | -------------------------------------------------------------------------------- /scripts/aws/2-start-antidote-nodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # author goncalotomas 3 | # This script starts N replicas of antidote on previously configured remote vms. 4 | # You should pass in a list of (public) IP addresses as arguments to the script, 5 | # as well as the following environment variables: 6 | # PRIVATEKEY: used to ssh into the amazon virtual machines. Every machine is 7 | # assumed to be accessible using one key. 8 | # GITBRANCH: antidote branch to start up 9 | # CLEANMAKE = TRUE/FALSE: make relclean && make rel or not 10 | 11 | set -e # Any subsequent(*) commands which fail will cause the shell script 12 | # to exit immediately 13 | 14 | # args checking 15 | if [[ $# -lt 2 ]]; then 16 | echo "Error: usage $0 ..." 17 | exit 2 18 | fi; 19 | 20 | if [[ ! -e $1 ]]; then 21 | echo "Error: $1: no such file" 22 | exit 2 23 | fi; 24 | if [ -z "$CLEANMAKE" ]; then 25 | CLEANMAKE=TRUE 26 | fi 27 | if [ -z "$GITBRANCH" ]; then 28 | GITBRANCH="build-local-cluster-aws" 29 | fi 30 | 31 | # env 32 | PRIVATEKEY=$1 33 | KEY_FILE_NAME=$(basename $PRIVATEKEY) 34 | 35 | IP_ADDR_LIST=$(echo $* | cut -d' ' -f2-) 36 | 37 | SSH_OPTIONS="-i $PRIVATEKEY -o StrictHostKeyChecking=no" 38 | SSH_USERNAME=ubuntu 39 | 40 | echo "[SCRIPT] RUNNING SCRIPT TO START MULTIPLE ANTIDOTE REPLICAS..." 41 | 42 | ANTIDOTE_SCRIPT="./src/bin/worker-start-antidote.sh" 43 | REMOTE_ANTIDOTE_SCRIPT="/home/ubuntu/worker-start-antidote.sh" 44 | 45 | # copy scripts to remote machines and add execute permission 46 | echo "[SCRIPT] COPYING REQUIRED SCRIPTS TO REMOTE MACHINES..." 47 | for IP_ADDR in $IP_ADDR_LIST; do 48 | scp $SSH_OPTIONS $ANTIDOTE_SCRIPT $SSH_USERNAME@$IP_ADDR:$REMOTE_ANTIDOTE_SCRIPT 49 | ssh $SSH_OPTIONS $SSH_USERNAME@$IP_ADDR chmod u+x $REMOTE_ANTIDOTE_SCRIPT 50 | done 51 | 52 | echo "[SCRIPT] COPIED ALL WORKER SCRIPTS." 53 | 54 | echo "[SCRIPT] STARTING REMOTE ANTIDOTE NODES..." 55 | for IP_ADDR in $IP_ADDR_LIST; do 56 | Command="ssh $SSH_OPTIONS $SSH_USERNAME@$IP_ADDR GITBRANCH=${GITBRANCH} CLEANMAKE=${CLEANMAKE} IP=${IP_ADDR} $REMOTE_ANTIDOTE_SCRIPT" 57 | echo "[SCRIPT] Starting antidote on node ${IP_ADDR}, using the following command:" 58 | echo "[SCRIPT] ${Command}" 59 | eval $Command & 60 | done 61 | 62 | # making sure the message is visible, antidote takes a few seconds to turn on 63 | sleep 10 64 | echo "[SCRIPT] Done. Antidote has been launched on the specified replicas." 65 | -------------------------------------------------------------------------------- /scripts/aws/3-create-antidote-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # author goncalotomas 3 | # This script joins N already running replicas of antidote into a cluster (DC). 4 | # You should pass in a list of (public) IP addresses as arguments to the script, 5 | # as well as the following environment variables: 6 | # PRIVATEKEY: used to ssh into the amazon virtual machines. Every machine is 7 | # assumed to be accessible using one key. 8 | 9 | set -e # Any subsequent(*) commands which fail will cause the shell script 10 | # to exit immediately 11 | 12 | # args checking 13 | if [[ $# -lt 2 ]]; then 14 | echo "Error: usage $0 ..." 15 | exit 2 16 | fi; 17 | 18 | if [[ ! -e $1 ]]; then 19 | echo "Error: $1: no such file" 20 | exit 2 21 | fi; 22 | if [ -z "$CLEANMAKE" ]; then 23 | CLEANMAKE=TRUE 24 | fi 25 | if [ -z "$GITBRANCH" ]; then 26 | GITBRANCH="build-local-cluster" 27 | fi 28 | 29 | # env 30 | PRIVATEKEY=$1 31 | KEY_FILE_NAME=$(basename $PRIVATEKEY) 32 | 33 | SSH_USERNAME=ubuntu 34 | SSH_OPTIONS="-i $PRIVATEKEY -o StrictHostKeyChecking=no" 35 | 36 | IP_ADDR_LIST=$(echo $* | cut -d' ' -f2-) 37 | 38 | for IP_ADDR in $IP_ADDR_LIST; do 39 | ACCUM="$ACCUM antidote@$IP_ADDR" 40 | REQUESTER=$IP_ADDR 41 | done 42 | 43 | echo "[SCRIPT] RUNNING SCRIPT TO JOIN MULTIPLE ANTIDOTE REPLICAS IN A CLUSTER..." 44 | 45 | echo "[SCRIPT] RUNNING THE JOIN CLUSTER SCRIPT FROM $REQUESTER..." 46 | 47 | JOIN_CLUSTER_SCRIPT="./src/bin/join_antidote_cluster.erl" 48 | REMOTE_JOIN_CLUSTER_SCRIPT="/home/ubuntu/join_antidote_cluster.erl" 49 | 50 | scp $SSH_OPTIONS $JOIN_CLUSTER_SCRIPT $SSH_USERNAME@$REQUESTER:$REMOTE_JOIN_CLUSTER_SCRIPT 51 | ssh $SSH_OPTIONS $SSH_USERNAME@$IP_ADDR chmod u+x $REMOTE_JOIN_CLUSTER_SCRIPT 52 | Command="ssh $SSH_OPTIONS $SSH_USERNAME@$REQUESTER $REMOTE_JOIN_CLUSTER_SCRIPT $ACCUM" 53 | echo "Requesting antidote cluster join on node $REQUESTER, using the following command:" 54 | echo "${Command}" 55 | eval $Command 56 | 57 | # cluster creation may take a while 58 | echo "[SCRIPT] Done. The specified antidote replicas are now joined in a cluster." 59 | -------------------------------------------------------------------------------- /scripts/aws/3a-create-multi-dc-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # author goncalotomas 3 | # This script joins N already running replicas of antidote into a cluster (DC). 4 | # You should pass in a list of (public) IP addresses as arguments to the script, 5 | # as well as the following environment variables: 6 | # PRIVATEKEY: used to ssh into the amazon virtual machines. Every machine is 7 | # assumed to be accessible using one key. 8 | 9 | set -e # Any subsequent(*) commands which fail will cause the shell script 10 | # to exit immediately 11 | 12 | # args checking 13 | if [[ $# -lt 2 ]]; then 14 | echo "Error: usage $0 ..." 15 | exit 2 16 | fi; 17 | 18 | if [[ ! -e $1 ]]; then 19 | echo "Error: $1: no such file" 20 | exit 2 21 | fi; 22 | if [ -z "$CLEANMAKE" ]; then 23 | CLEANMAKE=TRUE 24 | fi 25 | if [ -z "$GITBRANCH" ]; then 26 | GITBRANCH="build-local-cluster" 27 | fi 28 | 29 | # env 30 | PRIVATEKEY=$1 31 | KEY_FILE_NAME=$(basename $PRIVATEKEY) 32 | 33 | SSH_USERNAME=ubuntu 34 | SSH_OPTIONS="-i $PRIVATEKEY -o StrictHostKeyChecking=no" 35 | 36 | IP_ADDR_LIST=$(echo $* | cut -d' ' -f2-) 37 | 38 | for IP_ADDR in $IP_ADDR_LIST; do 39 | ACCUM="$ACCUM antidote@$IP_ADDR" 40 | REQUESTER=$IP_ADDR 41 | done 42 | 43 | echo "[SCRIPT] RUNNING SCRIPT TO JOIN MULTIPLE ANTIDOTE CLUSTERS..." 44 | 45 | echo "[SCRIPT] RUNNING THE JOIN CLUSTER SCRIPT FROM $REQUESTER..." 46 | 47 | JOIN_CLUSTER_SCRIPT="./src/bin/join_dcs_script.erl" 48 | REMOTE_JOIN_CLUSTER_SCRIPT="/home/ubuntu/join_dcs_script.erl" 49 | 50 | scp $SSH_OPTIONS $JOIN_CLUSTER_SCRIPT $SSH_USERNAME@$REQUESTER:$REMOTE_JOIN_CLUSTER_SCRIPT 51 | ssh $SSH_OPTIONS $SSH_USERNAME@$IP_ADDR chmod u+x $REMOTE_JOIN_CLUSTER_SCRIPT 52 | Command="ssh $SSH_OPTIONS $SSH_USERNAME@$REQUESTER $REMOTE_JOIN_CLUSTER_SCRIPT $ACCUM" 53 | echo "Requesting antidote cluster join on node $REQUESTER, using the following command:" 54 | echo "${Command}" 55 | eval $Command 56 | 57 | # cluster creation may take a while 58 | echo "[SCRIPT] Done. The specified antidote replicas are now joined in a cluster." 59 | -------------------------------------------------------------------------------- /scripts/aws/4-start-fmke-nodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # author goncalotomas 3 | # This script starts N replicas of FMKe on previously configured remote vms. 4 | # You should pass in a list of (public) IP addresses as arguments to the script, 5 | # as well as the following environment variables: 6 | # PRIVATEKEY: used to ssh into the amazon virtual machines. Every machine is 7 | # assumed to be accessible using one key. 8 | # GITBRANCH: FMKe branch to start up 9 | # ANTIDOTE_ADDRESS: list of CSV antidote IP addresses 10 | # ANTIDOTE_PB_PORT: list of CSV antidote PB ports (must be 1-to-1 with previous list) 11 | 12 | set -e # Any subsequent(*) commands which fail will cause the shell script 13 | # to exit immediately 14 | 15 | USAGE="Usage: ANTIDOTE_ADDRESS= ANTIDOTE_PB_PORT= $0 " 16 | 17 | # args checking 18 | if [[ $# -lt 2 ]]; then 19 | echo $USAGE 20 | exit 2 21 | fi; 22 | 23 | if [[ ! -e $1 ]]; then 24 | echo "Error: $1: no such file" 25 | exit 2 26 | fi; 27 | if [ -z "$ANTIDOTE_ADDRESS" ]; then 28 | echo $USAGE 29 | exit 2 30 | fi 31 | if [ -z "$ANTIDOTE_PB_PORT" ]; then 32 | echo $USAGE 33 | exit 2 34 | fi 35 | 36 | if [ -z "$CLEANMAKE" ]; then 37 | CLEANMAKE=TRUE 38 | fi 39 | if [ -z "$GITBRANCH" ]; then 40 | GITBRANCH="perf-and-errors" 41 | fi 42 | 43 | # env 44 | PRIVATEKEY=$1 45 | KEY_FILE_NAME=$(basename $PRIVATEKEY) 46 | 47 | IP_ADDR_LIST=$(echo $* | cut -d' ' -f2-) 48 | 49 | 50 | echo "[SCRIPT] RUNNING SCRIPT TO START MULTIPLE FMKe REPLICAS..." 51 | 52 | SSH_USERNAME=ubuntu 53 | SSH_OPTIONS="-i $PRIVATEKEY -o StrictHostKeyChecking=no" 54 | 55 | FMK_SCRIPT="./src/bin/worker-start-fmk.sh" 56 | REMOTE_FMK_SCRIPT="/home/ubuntu/worker-start-fmk.sh" 57 | 58 | # copy scripts to remote machines and add execute permission 59 | echo "[SCRIPT] COPYING REQUIRED SCRIPTS TO REMOTE MACHINES..." 60 | for IP_ADDR in $IP_ADDR_LIST; do 61 | scp $SSH_OPTIONS $FMK_SCRIPT $SSH_USERNAME@$IP_ADDR:$REMOTE_FMK_SCRIPT 62 | ssh $SSH_OPTIONS $SSH_USERNAME@$IP_ADDR chmod u+x $REMOTE_FMK_SCRIPT 63 | done 64 | echo "[SCRIPT] COPIED ALL WORKER SCRIPTS." 65 | 66 | ANTIDOTE_ADDRESS_ARR=(`echo ${ANTIDOTE_ADDRESS}`); 67 | ANTIDOTE_ADDRESS_ARR_SIZE=${#ANTIDOTE_ADDRESS_ARR[@]} 68 | ANTIDOTE_PORT_ARR=(`echo ${ANTIDOTE_PB_PORT}`); 69 | IP_ARR=(`echo ${IP_ADDR_LIST}`); 70 | 71 | for index in "${!IP_ARR[@]}"; do 72 | ## ASSIGN EACH FMK TO EACH ANTIDOTE IN A ROUND ROBIN FASHION 73 | ## SO THAT WE ALLOW THE CASE WHERE #FMKNODES > #ANTIDOTENODES 74 | Command="ssh $SSH_OPTIONS $SSH_USERNAME@${IP_ARR[$index]} GITBRANCH=${GITBRANCH} CLEANMAKE=${CLEANMAKE} IP=${IP_ARR[$index]} ANTIDOTE_ADDRESS=${ANTIDOTE_ADDRESS_ARR[$(($index % $ANTIDOTE_ADDRESS_ARR_SIZE))]} ANTIDOTE_PB_PORT=${ANTIDOTE_PORT_ARR[$(($index % $ANTIDOTE_ADDRESS_ARR_SIZE))]} ${REMOTE_FMK_SCRIPT}" 75 | echo "[SCRIPT] Starting antidote on node ${IP_ARR[$index]}, using the following command:" 76 | echo "[SCRIPT] ${Command}" 77 | eval $Command & 78 | done 79 | 80 | sleep 10 # fmk takes a while to boot up. 81 | echo "[SCRIPT] Done. FMKe has been launched on the specified replicas." 82 | -------------------------------------------------------------------------------- /scripts/aws/5-start-benchmarks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # author goncalotomas 3 | 4 | PRIVATEKEY=$1 5 | KEY_FILE_NAME=$(basename $PRIVATEKEY) 6 | IP_ADDR_LIST=$(echo $* | cut -d' ' -f2-) 7 | SSH_OPTIONS="-o StrictHostKeyChecking=no -i $PRIVATEKEY" 8 | USER=ubuntu 9 | 10 | if [ -z "$BENCHDURATION" ]; then 11 | BENCHDURATION=1 12 | fi 13 | if [ -z "$POPULATE_ANTIDOTE" ]; then 14 | POPULATE_ANTIDOTE=FALSE 15 | fi 16 | 17 | USAGE="FMK_HTTP_ADDRESSES= FMK_HTTP_PORTS= NUM_CLIENTS= [POPULATE_ANTIDOTE=] [BENCHDURATION=] $0 " 18 | 19 | # check that the script was called with the right parameters 20 | if [[ ! -e $1 ]]; then 21 | echo "Private key error: $1: no such file" 22 | exit 2 23 | fi; 24 | if [ -z "$FMK_HTTP_ADDRESSES" ]; then 25 | echo "Missing list of FMKe server addresses" 26 | echo ${USAGE} 27 | exit 1 28 | fi 29 | if [ -z "$FMK_HTTP_PORTS" ]; then 30 | echo "Missing list of FMKe server ports" 31 | echo ${USAGE} 32 | exit 1 33 | fi 34 | if [ -z "$NUM_CLIENTS" ]; then 35 | echo "Missing number of basho bench clients" 36 | echo ${USAGE} 37 | exit 1 38 | fi 39 | 40 | ########################################################## 41 | # Verify that all nodes are reachable 42 | ########################################################## 43 | echo "[SCRIPT]: STEP 1/4: TESTING REQUIREMENTS FOR EVERY BASHO BENCH NODE..." 44 | 45 | for IP_ADDR in $IP_ADDR_LIST; do 46 | ssh ${SSH_OPTIONS} ${USER}@${IP_ADDR} exit 47 | if [ "$?" = 0 ]; then 48 | echo "[SCRIPT] SSH connection OK for ${IP_ADDR}" 49 | else 50 | echo "[SCRIPT] SSH connection NOT OK for ${IP_ADDR}, aborting..." 51 | exit 1 52 | fi 53 | done 54 | 55 | echo "[SCRIPT] All nodes are reachable." 56 | 57 | REMOTE_FILE_PATH="/home/ubuntu/basho_bench/_build/default/bin/basho_bench" 58 | 59 | ######################################################### 60 | # CONNECTION TEST STAGE # 61 | ######################################################### 62 | for IP_ADDR in $IP_ADDR_LIST; do 63 | ssh ${SSH_OPTIONS} ${USER}@${IP_ADDR} cat $REMOTE_FILE_PATH > /dev/null 2>&1 64 | if [ "$?" = 0 ]; then 65 | echo "[SCRIPT] Basho bench escript is present for ${IP_ADDR}" 66 | else 67 | echo "[SCRIPT] Basho bench is not compiled in node ${IP_ADDR}, aborting..." 68 | exit 1 69 | fi 70 | done 71 | 72 | echo "[SCRIPT]: STEP 1/4: Done. All nodes contain a compiled version of basho_bench." 73 | 74 | ######################################################### 75 | # BENCHMARK CONFIGURATION STAGE # 76 | ######################################################### 77 | FMK_ADDRESS_ARR=(`echo ${FMK_HTTP_ADDRESSES}`); 78 | FMK_ADDRESS_ARR_SIZE=${#FMK_ADDRESS_ARR[@]} 79 | FMK_PORT_ARR=(`echo ${FMK_HTTP_PORTS}`); 80 | IP_ARR=(`echo ${IP_ADDR_LIST}`); 81 | 82 | echo "[SCRIPT]: STEP 2/4: Editing configuration files for each bench node..." 83 | REMOTE_CONFIG_FILE="/home/ubuntu/basho_bench/examples/fmkclient.config" 84 | WORKER_SCRIPT="./src/bin/worker-configure-benchmark.sh" 85 | REMOTE_WORKER_SCRIPT="/home/ubuntu/worker-configure-benchmark.sh" 86 | WORKER_BENCH_SCRIPT="./src/bin/worker-start-basho-bench.sh" 87 | REMOTE_WORKER_BENCH_SCRIPT="/home/ubuntu/worker-start-basho-bench.sh" 88 | 89 | for index in "${!IP_ARR[@]}"; do 90 | ## ASSIGN EACH BASHO BENCH TO EACH FMK IN A ROUND ROBIN FASHION 91 | ## SO THAT WE ALLOW THE CASE WHERE #BENCHENODES > #FMKNODES 92 | echo "[SCRIPT]: Copying configuration script to remote machine..." 93 | scp ${SSH_OPTIONS} ${WORKER_SCRIPT} ${USER}@${IP_ARR[$index]}:${REMOTE_WORKER_SCRIPT} 94 | ssh ${SSH_OPTIONS} ${USER}@${IP_ARR[$index]} chmod u+x ${REMOTE_WORKER_SCRIPT} 95 | echo "[SCRIPT]: Configuration script copied successfully." 96 | echo "[SCRIPT]: Copying runnable worker script to remote machine..." 97 | scp ${SSH_OPTIONS} ${WORKER_BENCH_SCRIPT} ${USER}@${IP_ARR[$index]}:${REMOTE_WORKER_BENCH_SCRIPT} 98 | ssh ${SSH_OPTIONS} ${USER}@${IP_ARR[$index]} chmod u+x ${REMOTE_WORKER_BENCH_SCRIPT} 99 | echo "[SCRIPT]: Runnable worker script copied successfully." 100 | echo "[SCRIPT]: Running configuration script..." 101 | ssh ${SSH_OPTIONS} ${USER}@${IP_ARR[$index]} NUM_CLIENTS=${NUM_CLIENTS} BENCHDURATION=${BENCHDURATION} IP_ADDR=${IP_ARR[$index]} FMK_HTTP_ADDRESSES=${FMK_ADDRESS_ARR[$(($index % $FMK_ADDRESS_ARR_SIZE))]} FMK_HTTP_PORTS=${FMK_PORT_ARR[$(($index % $FMK_ADDRESS_ARR_SIZE))]} REMOTE_CONFIG_FILE=${REMOTE_CONFIG_FILE} ${REMOTE_WORKER_SCRIPT} 102 | done 103 | 104 | ######################################################### 105 | # ANTIDOTE POPULATION STAGE # 106 | ######################################################### 107 | echo "[SCRIPT]: STEP 3/4: Checking if antidote population has been requested..." 108 | if [ "$POPULATE_ANTIDOTE" = TRUE ] 109 | then 110 | echo "[SCRIPT] Antidote population has been requested." 111 | for IP_ADDR in $IP_ADDR_LIST; do 112 | REQUESTER=${IP_ADDR} ## This is dumb but it works, so I'll leave it 113 | done; 114 | for IP_ADDR in $FMK_HTTP_ADDRESSES; do 115 | POPULATION_ADDRESS=${IP_ADDR} ## This is dumb but it works, so I'll leave it 116 | done; 117 | 118 | echo "[SCRIPT] Running antidote population worker script..." 119 | LOCAL_POPULATION_SCRIPT="./src/bin/fmk_setup_script.erl" 120 | REMOTE_POPULATION_SCRIPT="/home/ubuntu/fmk_setup_script.erl" 121 | POPULATOR_NODE_REF="populate@${REQUESTER}" 122 | FMK_NODE_REF="fmk@${POPULATION_ADDRESS}" 123 | scp $SSH_OPTIONS $LOCAL_POPULATION_SCRIPT $USER@$IP_ADDR:$REMOTE_POPULATION_SCRIPT 124 | ssh $SSH_OPTIONS $USER@$IP_ADDR chmod u+x $REMOTE_POPULATION_SCRIPT 125 | ssh $SSH_OPTIONS $USER@$IP_ADDR $REMOTE_POPULATION_SCRIPT $POPULATOR_NODE_REF $FMK_NODE_REF 126 | else 127 | echo "[SCRIPT] No request for antidote population found. Continuing..." 128 | fi 129 | echo "STEP 3/4: Done." 130 | 131 | ######################################################### 132 | # BENCHMARKING STAGE # 133 | ######################################################### 134 | REMOTE_BB_SCRIPT=${REMOTE_WORKER_BENCH_SCRIPT} 135 | echo "[SCRIPT]: STEP 4/4: Starting benchmarks..." 136 | for IP_ADDR in $IP_ADDR_LIST; do 137 | echo "[SCRIPT]: Starting benchmark in node ${IP_ADDR}..." 138 | ssh $SSH_OPTIONS $USER@${IP_ADDR} GITBRANCH=${GITBRANCH} CLEANMAKE=${CLEANMAKE} ${REMOTE_BB_SCRIPT} & 139 | done 140 | echo "[SCRIPT]: BENCHMARKS STARTED IN ALL NODES." 141 | 142 | echo "[SCRIPT]: I'm just gonna sleep for ${BENCHDURATION} minutes, ok? BRB." 143 | 144 | sleep $((${BENCHDURATION}*60)) 145 | 146 | echo "ZzzzzZZZzzzzzZZZzzzzz...." 147 | 148 | sleep 45 149 | 150 | echo "[SCRIPT]: Done!" 151 | -------------------------------------------------------------------------------- /scripts/aws/6-prepare-results.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # author goncalotomas 3 | set -e 4 | 5 | if [ -z "$PRIVATEKEY" ]; then 6 | PRIVATEKEY=~/.ssh/fmke_experiments.pem 7 | fi 8 | if [[ ! -e $PRIVATEKEY ]]; then 9 | echo "Error: $PRIVATEKEY: no such file" 10 | exit 2 11 | fi; 12 | 13 | BENCHMARKS_DIR=benchmarks 14 | KEY_FILE_NAME=$(basename $PRIVATEKEY) 15 | IP_ADDR_LIST=$(echo $* | cut -d' ' -f2-) 16 | SSH_OPTIONS="-o StrictHostKeyChecking=no -i $PRIVATEKEY" 17 | USER=ubuntu 18 | 19 | if [ ! -d "$BENCHMARKS_DIR" ]; then 20 | mkdir $BENCHMARKS_DIR 21 | fi 22 | 23 | IP_ARR=(`echo ${IP_ADDR_LIST}`); 24 | 25 | WORKER_SCRIPT="${PWD}/src/bin/compile-and-compress-results.sh" 26 | REMOTE_WORKER_SCRIPT="/home/ubuntu/compile-and-compress-results.sh" 27 | 28 | cd $BENCHMARKS_DIR 29 | 30 | for index in "${!IP_ARR[@]}"; do 31 | ## COMPILE RESULTS, TAR, FETCH TO LOCAL dir 32 | echo "[SCRIPT]: Copying worker script to remote machine..." 33 | scp ${SSH_OPTIONS} ${WORKER_SCRIPT} ${USER}@${IP_ARR[$index]}:${REMOTE_WORKER_SCRIPT} 34 | ssh ${SSH_OPTIONS} ${USER}@${IP_ARR[$index]} chmod u+x ${REMOTE_WORKER_SCRIPT} & 35 | echo "[SCRIPT]: Worker script copied successfully." 36 | echo "[SCRIPT]: Running worker script..." 37 | ssh ${SSH_OPTIONS} ${USER}@${IP_ARR[$index]} WORKERID=$(($index + 1)) ${REMOTE_WORKER_SCRIPT} & 38 | pids="$pids $!" 39 | done 40 | 41 | echo "[SCRIPT] Waiting for SSH processes to finish their work..." 42 | for pid in $pids; do 43 | wait $pid || let "RESULT=1" 44 | done 45 | 46 | if [ "$RESULT" == "1" ]; then 47 | echo "[SCRIPT] Something went wrong in installing all the software!" 48 | exit 1 49 | else 50 | echo "[SCRIPT] Done. All remote machines have the required software stack and repositories." 51 | fi 52 | -------------------------------------------------------------------------------- /scripts/aws/7-fetch-and-merge-results.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # author goncalotomas 3 | set -e 4 | 5 | if [ -z "$PRIVATEKEY" ]; then 6 | PRIVATEKEY=~/.ssh/fmke_experiments.pem 7 | fi 8 | if [[ ! -e $PRIVATEKEY ]]; then 9 | echo "Error: $PRIVATEKEY: no such file" 10 | exit 2 11 | fi; 12 | 13 | BENCHMARKS_DIR=${PWD}/benchmarks 14 | KEY_FILE_NAME=$(basename $PRIVATEKEY) 15 | IP_ADDR_LIST=$(echo $* | cut -d' ' -f2-) 16 | SSH_OPTIONS="-o StrictHostKeyChecking=no -i $PRIVATEKEY" 17 | USER=ubuntu 18 | 19 | if [ ! -d "$BENCHMARKS_DIR" ]; then 20 | mkdir $BENCHMARKS_DIR 21 | fi 22 | 23 | IP_ARR=(`echo ${IP_ADDR_LIST}`); 24 | 25 | WORKER_SCRIPT="${PWD}/src/bin/compile-and-compress-results.sh" 26 | REMOTE_RESULTS="/home/ubuntu/compile-and-compress-results.sh" 27 | 28 | cd $BENCHMARKS_DIR 29 | 30 | for index in "${!IP_ARR[@]}"; do 31 | ## FETCH TO LOCAL dir 32 | echo "[SCRIPT]: Copying results from remote machine..." 33 | WORKERID=$(($index + 1)) 34 | FILENAME="results-${WORKERID}.tar.gz" 35 | scp ${SSH_OPTIONS} ${USER}@${IP_ARR[$index]}:${FILENAME} . 36 | echo "[SCRIPT]: Copied results from worker ${WORKERID}." 37 | done 38 | 39 | BASHO_BENCH_DIR=".." 40 | BenchResultsDirectory=$BENCHMARKS_DIR 41 | cd .. 42 | #################################################### 43 | # Merge results in the test directory into a single one and create the results file image TODO 44 | #################################################### 45 | #Call the merge results script 46 | CommandToRunMergeScript="BenchResultsDirectory=$BenchResultsDirectory ../master-mergeResults.sh" 47 | echo "[SCRIPT]: Calling merge script..." 48 | eval $CommandToRunMergeScript 49 | 50 | # Create an image with the summary 51 | CommandToBuildPng="Rscript --vanilla priv/summary.r -i $BenchResultsDirectory/summary" 52 | echo "--##--Master ${MY_IP}: Processing results into a summary.png file..." 53 | echo "--##--Master ${MY_IP}: $CommandToBuildPng" 54 | cd $BASHO_BENCH_DIR/../../ 55 | eval $CommandToBuildPng 56 | echo "--##--Master ${MY_IP}: DONE, see your results!!!" 57 | -------------------------------------------------------------------------------- /scripts/aws/build_instances.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e # Any subsequent(*) commands which fail will cause the shell script 3 | # to exit immediately 4 | 5 | # args checking 6 | if [[ $# -lt 2 ]]; then 7 | echo "Error: usage $0 ..." 8 | exit 2 9 | fi; 10 | 11 | if [[ ! -e $1 ]]; then 12 | echo "Error: $1: no such file" 13 | exit 2 14 | fi; 15 | 16 | # env 17 | KEY_FILE_PATH=$1 18 | KEY_FILE_NAME=$(basename $KEY_FILE_PATH) 19 | BUILD_SCRIPT_PATH="./src/bin/worker_instance_build.sh" 20 | BUILD_SCRIPT_REMOTE_PATH="~/worker_instance_build.sh" 21 | 22 | IP_ADDR_LIST=$(echo $* | cut -d' ' -f2-) 23 | IP_ADDR_FILE="./ip_addr_list.txt" 24 | echo $IP_ADDR_LIST | tr " " "\n" > $IP_ADDR_FILE 25 | 26 | SSH_USERNAME=ubuntu 27 | SSH_OPTIONS="-i $KEY_FILE_PATH -o StrictHostKeyChecking=no" 28 | 29 | # prepare the images for the build 30 | for IP_ADDR in $IP_ADDR_LIST; do 31 | scp $SSH_OPTIONS $KEY_FILE_PATH $SSH_USERNAME@$IP_ADDR:~/.ssh 32 | scp $SSH_OPTIONS $BUILD_SCRIPT_PATH $SSH_USERNAME@$IP_ADDR:$BUILD_SCRIPT_REMOTE_PATH 33 | scp $SSH_OPTIONS $IP_ADDR_FILE $SSH_USERNAME@$IP_ADDR:~/ 34 | ssh $SSH_OPTIONS $SSH_USERNAME@$IP_ADDR chmod +x $BUILD_SCRIPT_REMOTE_PATH 35 | ssh $SSH_OPTIONS $SSH_USERNAME@$IP_ADDR chmod 600 "~/.ssh/$KEY_FILE_NAME" 36 | done 37 | 38 | # build images 39 | for IP_ADDR in $IP_ADDR_LIST; do 40 | ssh $SSH_OPTIONS $SSH_USERNAME@$IP_ADDR nohup $BUILD_SCRIPT_REMOTE_PATH & 41 | done; 42 | 43 | -------------------------------------------------------------------------------- /scripts/aws/get_public_address.sh: -------------------------------------------------------------------------------- 1 | curl -s checkip.dyndns.org | sed -e 's/.*Current IP Address: //' -e 's/<.*$//' 2 | -------------------------------------------------------------------------------- /scripts/aws/src/bin/compile-and-compress-results.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | cd basho_bench 3 | Rscript --vanilla priv/summary.r -i ../tests/current 4 | cd - 5 | tar hczf "results-${WORKERID}.tar.gz" tests/current 6 | echo "[SCRIPT]: Compiled and tar'd results for worker ${WORKERID}." 7 | -------------------------------------------------------------------------------- /scripts/aws/src/bin/fmk_setup_script.erl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env escript 2 | %% -*- erlang -*- 3 | %%! -smp enable -name setup@127.0.0.1 -cookie antidote -mnesia debug verbose 4 | -mode(compile). 5 | -define(ZIPF_SKEW, 1). 6 | -define(NUMTHREADS, 10). 7 | 8 | 9 | -record(fmkconfig, { 10 | numpatients, 11 | numpharmacies, 12 | numfacilities, 13 | numstaff, 14 | numprescriptions 15 | }). 16 | 17 | main([MyNodeRef, FmkNodeRef]) -> 18 | % io:format("Loading external libs...~n"), 19 | % true = code:add_pathz(filename:dirname(escript:script_name()) 20 | % ++ "/../_build/default/lib/hackney/ebin"), 21 | % true = code:add_pathz(filename:dirname(escript:script_name()) 22 | % ++ "/../_build/default/lib/idna/ebin"), 23 | % true = code:add_pathz(filename:dirname(escript:script_name()) 24 | % ++ "/../_build/default/lib/mimerl/ebin"), 25 | % true = code:add_pathz(filename:dirname(escript:script_name()) 26 | % ++ "/../_build/default/lib/certifi/ebin"), 27 | % true = code:add_pathz(filename:dirname(escript:script_name()) 28 | % ++ "/../_build/default/lib/metrics/ebin"), 29 | % true = code:add_pathz(filename:dirname(escript:script_name()) 30 | % ++ "/../_build/default/lib/ssl_verify_fun/ebin"), 31 | % io:format("Loaded external libs.~n"), 32 | DirName = filename:dirname(escript:script_name()), 33 | FileName = DirName ++ "/basho_bench/examples/fmkclient.config", 34 | io:format("Checking configuration file ~p~n",[FileName]), 35 | {ok, FmkConfigProps} = file:consult(FileName), 36 | FmkConfig = #fmkconfig{ 37 | numpatients = proplists:get_value(numpatients, FmkConfigProps), 38 | numpharmacies = proplists:get_value(numpharmacies, FmkConfigProps), 39 | numfacilities = proplists:get_value(numfacilities, FmkConfigProps), 40 | numstaff = proplists:get_value(numstaff, FmkConfigProps), 41 | numprescriptions = proplists:get_value(numprescriptions, FmkConfigProps) 42 | }, 43 | % io:format("initializing hackney HTTP client...~n",[]), 44 | % hackney:start(), 45 | % io:format("initialized hackney HTTP client."), 46 | MyNodeName = list_to_atom(MyNodeRef), 47 | FmkNode = list_to_atom(FmkNodeRef), 48 | io:format("client node is ~p.\n", [MyNodeName]), 49 | io:format("fmk node target set as ~p.\n", [FmkNode]), 50 | net_kernel:start([MyNodeName, longnames]), 51 | erlang:set_cookie(node(), antidote), 52 | %% check if fmk is running 53 | case net_adm:ping(FmkNode) of 54 | pang -> 55 | io:format("cannot connect to fmk.\n", []); 56 | pong -> 57 | ok 58 | end, 59 | io:format("populating antidote...\n", []), 60 | add_patients(FmkNode, FmkConfig#fmkconfig.numpatients), 61 | add_pharmacies(FmkNode, FmkConfig#fmkconfig.numpharmacies), 62 | add_facilities(FmkNode, FmkConfig#fmkconfig.numfacilities), 63 | add_staff(FmkNode, FmkConfig#fmkconfig.numstaff), 64 | add_prescription(FmkNode, FmkConfig#fmkconfig.numprescriptions, FmkConfig), 65 | io:format("finished populating antidote.\n", []); 66 | main(_) -> 67 | usage(). 68 | 69 | usage() -> 70 | io:format("usage: node_name fmk_node_name\n"), 71 | halt(1). 72 | 73 | 74 | parallel_create(Name, First, Last, NumThreads, Fun) -> 75 | Count = 1 + Last - First, 76 | PerDivision = Count div NumThreads, 77 | NumDivisions = Count div PerDivision, 78 | Divisions = [{First + I * PerDivision, case I + 1 of NumDivisions -> Last; _ -> 79 | First + (I + 1) * PerDivision - 1 end} || I <- lists:seq(0, NumDivisions - 1)], 80 | [{F1, L1} | OtherDivisions] = Divisions, 81 | parallel_create_h(Name, F1, L1, self(), Fun), 82 | [parallel_create_h(F, L, self(), Fun) || {F, L} <- OtherDivisions], 83 | [receive {done, F, L} -> ok end || {F, L} <- Divisions], 84 | ok. 85 | 86 | parallel_create_h(Name, First, Last, Pid, Fun) -> 87 | Count = (1 + Last - First), 88 | case Count > 0 of 89 | false -> ok; 90 | true -> 91 | spawn( 92 | fun() -> 93 | Fun2 = 94 | fun(I) -> 95 | case (I - First) rem max(1, Count div 100) of 96 | 0 -> 97 | io:format("Creating ~p ~p%~n", [Name, 100 * (I - First) / Count]); 98 | _ -> 99 | ok 100 | end, 101 | Fun(I) 102 | end, 103 | lists:map(Fun2, lists:seq(First, Last)), 104 | Pid ! {done, First, Last} 105 | end) 106 | end. 107 | 108 | parallel_create_h(First, Last, Pid, Fun) -> 109 | spawn( 110 | fun() -> 111 | lists:map(Fun, lists:seq(First, Last)), 112 | Pid ! {done, First, Last} 113 | end). 114 | 115 | 116 | 117 | add_pharmacies(FmkNode, Amount) -> 118 | parallel_create(pharmacies, 1, Amount, ?NUMTHREADS, 119 | fun(I) -> 120 | case I rem 3 of 121 | 0 -> run_op(FmkNode, create_pharmacy, [I, "Chai Pharmacy", "Costa da Caparica, Portugal"]); 122 | 1 -> run_op(FmkNode, create_pharmacy, [I, "Carlos Pharmacy", "Costa da Caparica, Portugal"]); 123 | 2 -> run_op(FmkNode, create_pharmacy, [I, "Shanghai Central Pharmacy", "Shanghai, China"]) 124 | end 125 | end). 126 | 127 | add_facilities(FmkNode, Amount) -> 128 | parallel_create(facilities, 1, Amount, ?NUMTHREADS, 129 | fun(I) -> 130 | case I rem 10 of 131 | 0 -> run_op(FmkNode, create_facility, [I, "Amager Hospital", "Amager Island, DK", "Hospital"]); 132 | 1 -> run_op(FmkNode, create_facility, [I, "Bispebjerg Hospital", "Copenhagen, DK", "Hospital"]); 133 | 2 -> run_op(FmkNode, create_facility, [I, "Bornholms Hospital", "Bornholms Island, DK", "Hospital"]); 134 | 3 -> run_op(FmkNode, create_facility, [I, "Gentofte Hospital", "Gentofte, DK", "Hospital"]); 135 | 4 -> run_op(FmkNode, create_facility, [I, "Glostrup Hospital", "Glostrup, DK", "Hospital"]); 136 | 5 -> run_op(FmkNode, create_facility, [I, "Herlev Hospital", "Herlev, DK", "Hospital"]); 137 | 6 -> run_op(FmkNode, create_facility, [I, "Nordsjællands Hospital", "Esbønderup, DK", "Hospital"]); 138 | 7 -> run_op(FmkNode, create_facility, [I, "Privathospitalet Danmark", "Charlottenlund, DK", "Hospital"]); 139 | 8 -> run_op(FmkNode, create_facility, [I, "Rigshospitalet", "Copenhagen, DK", "Hospital"]); 140 | 9 -> run_op(FmkNode, create_facility, [I, "Sct. Hans Hospital", "Zealand Island, DK", "Hospital"]) 141 | end 142 | end). 143 | 144 | add_patients(FmkNode, Amount) -> 145 | parallel_create(patient, 1, Amount, 10, 146 | fun(I) -> 147 | run_op(FmkNode, create_patient, [I, "Phineas Gage", "New Hampshire, United States"]) 148 | end). 149 | 150 | add_staff(FmkNode, Amount) -> 151 | parallel_create(staff, 1, Amount, ?NUMTHREADS, 152 | fun(I) -> 153 | run_op(FmkNode, create_staff, [I, "Alexander Fleming", "London, UK", "Pharmacologist"]) 154 | end). 155 | 156 | add_prescription(_FmkNode, 0, _FmkConfig) -> ok; 157 | add_prescription(FmkNode, Amount, FmkConfig) when Amount > 0 -> 158 | ListPatientIds = gen_sequence(FmkConfig#fmkconfig.numpatients, ?ZIPF_SKEW, FmkConfig#fmkconfig.numprescriptions), 159 | add_prescription_rec(FmkNode, Amount, ListPatientIds, FmkConfig). 160 | 161 | add_prescription_rec(_FmkNode, 0, _ListPatients, _FmkConfig) -> ok; 162 | add_prescription_rec(FmkNode, PrescriptionId, ListPatientIds, FmkConfig) -> 163 | [CurrentId | Tail] = ListPatientIds, 164 | PharmacyId = rand:uniform(FmkConfig#fmkconfig.numpharmacies), 165 | PrescriberId = rand:uniform(FmkConfig#fmkconfig.numstaff), 166 | FacilityId = rand:uniform(FmkConfig#fmkconfig.numfacilities), 167 | run_op(FmkNode, create_prescription, [PrescriptionId, CurrentId, PrescriberId, PharmacyId, FacilityId, "1/1/2017", ["Acetaminophen"]]), 168 | add_prescription_rec(FmkNode, PrescriptionId - 1, Tail, FmkConfig). 169 | 170 | run_op(FmkNode, create_pharmacy, Params) -> 171 | [_Id, _Name, _Address] = Params, 172 | run_rpc_op(FmkNode, create_pharmacy, Params); 173 | run_op(FmkNode, create_facility, Params) -> 174 | [_Id, _Name, _Address, _Type] = Params, 175 | run_rpc_op(FmkNode, create_facility, Params); 176 | run_op(FmkNode, create_patient, Params) -> 177 | [_Id, _Name, _Address] = Params, 178 | run_rpc_op(FmkNode, create_patient, Params); 179 | run_op(FmkNode, create_staff, Params) -> 180 | [_Id, _Name, _Address, _Speciality] = Params, 181 | run_rpc_op(FmkNode, create_staff, Params); 182 | run_op(FmkNode, create_prescription, Params) -> 183 | [_PrescriptionId, _PatientId, _PrescriberId, _PharmacyId, _FacilityId, _DatePrescribed, _Drugs] = Params, 184 | run_rpc_op(FmkNode, create_prescription, Params). 185 | 186 | run_rpc_op(FmkNode, Op, Params) -> 187 | ok = case rpc:call(FmkNode, fmk_core, Op, Params) of 188 | {error, Reason} -> 189 | io:format("Error in ~p with params ~p\n", [Op, Params]), 190 | {error, Reason}; 191 | ok -> ok 192 | end. 193 | 194 | gen_sequence(Size, Skew, SequenceSize) -> 195 | Bottom = 1 / (lists:foldl(fun(X, Sum) -> Sum + (1 / math:pow(X, Skew)) end, 0, lists:seq(1, Size))), 196 | lists:map(fun(_X) -> 197 | zipf_next(Size, Skew, Bottom) 198 | end, lists:seq(1, SequenceSize)). 199 | 200 | zipf_next(Size, Skew, Bottom) -> 201 | Dice = rand:uniform(), 202 | next(Dice, Size, Skew, Bottom, 0, 1). 203 | 204 | next(Dice, _Size, _Skew, _Bottom, Sum, CurrRank) when Sum >= Dice -> CurrRank - 1; 205 | next(Dice, Size, Skew, Bottom, Sum, CurrRank) -> 206 | NextRank = CurrRank + 1, 207 | Sumi = Sum + (Bottom / math:pow(CurrRank, Skew)), 208 | next(Dice, Size, Skew, Bottom, Sumi, NextRank). 209 | -------------------------------------------------------------------------------- /scripts/aws/src/bin/worker-configure-benchmark.sh: -------------------------------------------------------------------------------- 1 | FMK_HTTP_ADDRESSES="\"${FMK_HTTP_ADDRESSES}\"" 2 | 3 | sed -ie 's#{fmk_server_ips, \[\(\"\([0-9]\{1,3\}\.\)\{3\}\([0-9]\{1,3\}\)\{1\}\"\)\(,\(\"\([0-9]\{1,3\}\.\)\{3\}\([0-9]\{1,3\}\)\{1\}\"\)\)*\]}.#{fmk_server_ips, ['"${FMK_HTTP_ADDRESSES}"']}.#g' ${REMOTE_CONFIG_FILE} 4 | if [ "$?" = 0 ]; then 5 | echo "[SCRIPT]: Configured FMK server addresses." 6 | else 7 | echo "[SCRIPT]: Could not write FMK server addresses to node ${IP_ADDR}, aborting..." 8 | exit 1 9 | fi 10 | 11 | sed -ie 's#{fmk_server_ports, \[[0-9]\+[,[0-9]\+]*\]}.#{fmk_server_ports, ['"${FMK_HTTP_PORTS}"']}.#g' ${REMOTE_CONFIG_FILE} 12 | if [ "$?" = 0 ]; then 13 | echo "[SCRIPT]: Configured FMK server ports." 14 | else 15 | echo "[SCRIPT]: Could not write FMK server ports to node ${IP_ADDR}, aborting..." 16 | exit 1 17 | fi 18 | 19 | sed -ie 's#{concurrent, [0-9]\+}.#{concurrent, '"${NUM_CLIENTS}"'}.#g' ${REMOTE_CONFIG_FILE} 20 | if [ "$?" = 0 ]; then 21 | echo "[SCRIPT]: Configured number of basho bench clients." 22 | else 23 | echo "[SCRIPT]: Could not write number of basho bench clients in node ${IP_ADDR}, aborting..." 24 | exit 1 25 | fi 26 | 27 | sed -ie 's#{duration, [0-9]\+}.#{duration, '"${BENCHDURATION}"'}.#g' ${REMOTE_CONFIG_FILE} 28 | if [ "$?" = 0 ]; then 29 | echo "[SCRIPT]: Configured benchmark duration." 30 | else 31 | echo "[SCRIPT]: Could not write benchmark duration in node ${IP_ADDR}, aborting..." 32 | exit 1 33 | fi 34 | echo "[SCRIPT]: Node ${IP_ADDR} has been successfully configured." 35 | -------------------------------------------------------------------------------- /scripts/aws/src/bin/worker-setup-machine.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script is going to be executed inside an amazon virtual machine and will 3 | # clone and build a list of required git repositories. 4 | 5 | set -e # Any subsequent(*) commands which fail will cause the shell script 6 | # to exit immediately 7 | 8 | # add ESL as a repository 9 | sudo wget -c -O- http://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc | sudo apt-key add - 10 | sudo echo "deb http://packages.erlang-solutions.com/ubuntu xenial contrib" | sudo tee -a /etc/apt/sources.list.d/erlang_solutions.list > /dev/null 11 | 12 | sudo apt-get update 13 | sudo apt-get --assume-yes upgrade 14 | sudo apt-get --assume-yes install build-essential autoconf git r-base erlang 15 | 16 | # needed to later build the PNG image 17 | sudo chown ubuntu /usr/local/lib/R/site-library/ 18 | 19 | # env 20 | BIN_DIR=`pwd` 21 | cd $BIN_DIR 22 | 23 | ############################# antidote @############################ 24 | ANTIDOTE_DIR=$HOME/antidote 25 | 26 | if [ -d "$ANTIDOTE_DIR" ]; then 27 | echo "[SCRIPT] Antidote directory has been found in this node. Pulling latest changes..." 28 | # Control will enter here if $DIRECTORY exists. 29 | cd $ANTIDOTE_DIR 30 | 31 | git checkout build-local-cluster-aws 32 | git pull 33 | else 34 | echo "[SCRIPT] Antidote repository not found. Cloning repository..." 35 | git clone https://github.com/goncalotomas/antidote 36 | 37 | cd $ANTIDOTE_DIR 38 | git checkout build-local-cluster-aws 39 | 40 | fi 41 | PUBLIC_NODE_IP=`curl checkip.amazonaws.com` 42 | echo "{public_ip, {$PUBLIC_NODE_IP}}" > ./config/node-address.config 43 | sed -ie 's/\./,/g' ./config/node-address.config 44 | echo "." >> ./config/node-address.config 45 | 46 | make rel 47 | cd $BIN_DIR 48 | 49 | ############################## FMKe ################################# 50 | FMKE_DIR=$HOME/FMKe 51 | 52 | if [ -d "$FMKE_DIR" ]; then 53 | echo "[SCRIPT] FMKe directory has been found in this node. Pulling latest changes..." 54 | 55 | cd $FMKE_DIR 56 | git checkout perf-and-errors 57 | git pull 58 | make rel 59 | else 60 | echo "[SCRIPT] FMKe repository not found. Cloning repository..." 61 | 62 | git clone https://github.com/goncalotomas/FMKe 63 | cd $FMKE_DIR 64 | git checkout perf-and-errors 65 | make rel 66 | fi 67 | 68 | cd $BIN_DIR 69 | 70 | ########################## basho_bensh ############################## 71 | BASHO_BENSH_DIR=$HOME/basho_bench 72 | 73 | if [ -d "$BASHO_BENSH_DIR" ]; then 74 | echo "[SCRIPT] FMKe directory has been found in this node. Pulling latest changes..." 75 | 76 | cd $BASHO_BENSH_DIR 77 | git checkout antidote_pb_fmk_aws 78 | git pull 79 | make all 80 | else 81 | echo "[SCRIPT] FMKe repository not found. Cloning repository..." 82 | git clone https://github.com/SyncFree/basho_bench 83 | cd $BASHO_BENSH_DIR 84 | git checkout antidote_pb_fmk_aws 85 | make all 86 | fi 87 | -------------------------------------------------------------------------------- /scripts/aws/src/bin/worker-start-antidote.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Call this like IP=1.2.3.4 worker-start-antidote.sh 3 | 4 | ANTIDOTE_DIR="/home/ubuntu/antidote" 5 | 6 | if [ -z "$CLEANMAKE" ]; then 7 | CLEANMAKE=TRUE 8 | fi 9 | if [ -z "$GITBRANCH" ]; then 10 | GITBRANCH="build-local-cluster" 11 | fi 12 | 13 | if [ CLEANMAKE=TRUE ]; then 14 | cd $ANTIDOTE_DIR 15 | echo "[SCRIPT] KILLING ALL ERLANG PROCESSES ON REMOTE MACHINES..." 16 | pkill beam 17 | if [ -f rebar.lock ]; then 18 | rm rebar.lock ## not doing this was causing issues 19 | fi 20 | 21 | echo "----Worker $IP ----: git checkout $GITBRANCH" 22 | git checkout $GITBRANCH 23 | echo "----Worker $IP ----: git pull" 24 | git pull 25 | echo "[SCRIPT] DELETING DATA FROM PREVIOUS BENCHMARKS, IF ANY..." 26 | echo "----Worker $IP ----: make relclean" 27 | make relclean 28 | echo "[SCRIPT] REGENERATING RELX RELEASE..." 29 | echo "----Worker $IP ----: make rel" 30 | make rel 31 | cd - 32 | fi 33 | 34 | echo "----Worker $IP ----: IP=$IP INSTANCE_NAME=antidote ~/antidote/_build/default/rel/antidote/bin/env foreground" 35 | IP=$IP INSTANCE_NAME=antidote nohup ~/antidote/_build/default/rel/antidote/bin/env foreground & 36 | -------------------------------------------------------------------------------- /scripts/aws/src/bin/worker-start-basho-bench.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Call this like IP=1.2.3.4 worker-start-fmk.sh 3 | 4 | BB_DIR="/home/ubuntu/basho-bench" 5 | 6 | if [ -z "$CLEANMAKE" ]; then 7 | CLEANMAKE=TRUE 8 | fi 9 | if [ -z "$GITBRANCH" ]; then 10 | GITBRANCH="antidote_pb_fmk_aws" 11 | fi 12 | 13 | if [ CLEANMAKE=TRUE ]; then 14 | cd $BB_DIR 15 | git checkout $GITBRANCH 16 | git pull 17 | make all 18 | fi 19 | 20 | ./_build/default/bin/basho_bench examples/fmkclient.config & 21 | -------------------------------------------------------------------------------- /scripts/aws/src/bin/worker-start-fmk.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Call this like IP=1.2.3.4 worker-start-fmk.sh 3 | 4 | FMK_DIR="/home/ubuntu/FMKe" 5 | 6 | if [ -z "$CLEANMAKE" ]; then 7 | CLEANMAKE=TRUE 8 | fi 9 | if [ -z "$GITBRANCH" ]; then 10 | GITBRANCH="perf-and-errors" 11 | fi 12 | if [ -z "$ANTIDOTE_ADDRESS" ]; then 13 | echo "Error: missing list of comma separated antidote IP addresses" 14 | exit 2 15 | fi 16 | if [ -z "$ANTIDOTE_PB_PORT" ]; then 17 | echo "Error: missing list of comma separated antidote PB ports" 18 | exit 2 19 | fi 20 | 21 | if [ CLEANMAKE=TRUE ]; then 22 | cd $FMK_DIR 23 | echo "[SCRIPT] KILLING ALL ERLANG PROCESSES ON REMOTE MACHINES..." 24 | pkill beam 25 | if [ -f rebar.lock ]; then 26 | rm rebar.lock ## not doing this was causing issues 27 | fi 28 | echo "----Worker $IP ----: git checkout $GITBRANCH" 29 | git checkout $GITBRANCH 30 | echo "----Worker $IP ----: git pull" 31 | git pull 32 | echo "[SCRIPT] DELETING DATA FROM PREVIOUS BENCHMARKS, IF ANY..." 33 | echo "----Worker $IP ----: make relclean" 34 | make relclean 35 | echo "[SCRIPT] REGENERATING RELX RELEASE..." 36 | echo "----Worker $IP ----: make rel" 37 | make rel 38 | 39 | fi 40 | echo "----Worker $IP ----: IP=$IP ANTIDOTE_ADDRESS=$ANTIDOTE_ADDRESS IP=$IP INSTANCE_NAME=fmk ./_build/default/rel/fmk/bin/env foreground" 41 | ANTIDOTE_ADDRESS=$ANTIDOTE_ADDRESS ANTIDOTE_PB_PORT=$ANTIDOTE_PB_PORT IP=$IP INSTANCE_NAME=fmk nohup ./_build/default/rel/fmk/bin/env foreground & 42 | -------------------------------------------------------------------------------- /scripts/compile_basho_bench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd _build/default/lib/basho_bench; make all; cd - 3 | -------------------------------------------------------------------------------- /scripts/config/change_conn_pool_size.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ./scripts/config/set_param.sh "connection_pool_size" $1 5 | -------------------------------------------------------------------------------- /scripts/config/change_db.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | show_supported_dbs() 5 | { 6 | echo "FMKe supports the following data stores:" 7 | for value in "antidote" "antidote_norm" "riak" "riak_norm" "redis" ; do 8 | echo "-$value" 9 | done 10 | } 11 | 12 | if [ "$#" -lt 1 ]; then 13 | echo "Error: no data store name supplied" 14 | show_supported_dbs 15 | echo "error" 16 | exit 1 17 | fi 18 | 19 | TARGETDB=$1 20 | 21 | ./scripts/config/set_param.sh "target_database" $TARGETDB 22 | if [[ $? -eq 0 ]]; then 23 | echo "success" 24 | fi 25 | -------------------------------------------------------------------------------- /scripts/config/change_db_addresses.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ./scripts/config/set_param.sh "database_addresses" "$1" 5 | -------------------------------------------------------------------------------- /scripts/config/change_db_ports.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [[ $1 == "antidote" || $1 == "antidote_norm" ]]; then 5 | ./scripts/config/set_param.sh "database_ports" [8087] 6 | elif [[ $1 == "redis" ]]; then 7 | ./scripts/config/set_param.sh "database_ports" [6379] 8 | elif [[ $1 == "riak" || $1 == "riak_norm" ]]; then 9 | ./scripts/config/set_param.sh "database_ports" [8087] 10 | else 11 | ./scripts/config/set_param.sh "database_ports" $1 12 | fi 13 | -------------------------------------------------------------------------------- /scripts/config/change_http_port.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ./scripts/config/set_param.sh "http_port" $1 5 | -------------------------------------------------------------------------------- /scripts/config/set_param.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | PARAM_NAME=$1 5 | PARAM_NEW_VAL=$2 6 | 7 | PARAM_CUR_VAL=$( 8 | grep '{$1, .*}' config/fmke.config | awk '{print $2;}' | sed 's/}.$//' 9 | ) 10 | 11 | if [[ "${PARAM_NEW_VAL}" == "${PARAM_CUR_VAL}" ]]; then 12 | echo "No changes needed, $PARAM_NAME already set to $PARAM_CUR_VAL." 13 | exit 0 14 | fi 15 | 16 | if [[ "$OSTYPE" == "linux-gnu" ]]; then 17 | sed -i -e "s/{$PARAM_NAME, .*}\./{$PARAM_NAME, $PARAM_NEW_VAL}\./g" ./config/fmke.config 18 | elif [[ "$OSTYPE" == "darwin"* ]]; then 19 | sed -i '' -e "s/{$PARAM_NAME, .*}\./{$PARAM_NAME, $PARAM_NEW_VAL}\./g" ./config/fmke.config 20 | elif [[ "$OSTYPE" == "freebsd"* ]]; then 21 | sed -i -e "s/{$PARAM_NAME, .*}\./{$PARAM_NAME, $PARAM_NEW_VAL}\./g" ./config/fmke.config 22 | fi 23 | 24 | if [ "$?" -ne 0 ]; then 25 | echo "Error: error changing ${PARAM_NAME} to ${PARAM_NEW_VAL}" 26 | exit 3 27 | fi 28 | 29 | echo "Changed ${PARAM_NAME} to ${PARAM_NEW_VAL}." 30 | -------------------------------------------------------------------------------- /scripts/copy_bench_driver.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # copy Basho Bench driver into correct folder before building 3 | cp test/basho_bench_driver_fmkclient.erl _build/default/lib/basho_bench/src 4 | cd _build/default/lib/basho_bench; make all; cd - 5 | -------------------------------------------------------------------------------- /scripts/run_benchmark.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | reset="\033[0m" 5 | red="\033[31m" 6 | green="\033[32m" 7 | yellow="\033[33m" 8 | cyan="\033[36m" 9 | white="\033[37m" 10 | 11 | printf "$green> Setting up FMKe to benchmark $2...$reset\n" 12 | ./scripts/config/change_db.sh $2 13 | ./scripts/config/change_db_ports.sh $2 14 | make rel 15 | ./scripts/start_data_store.sh $2 16 | ./scripts/start_fmke.sh 17 | 18 | # TODO make fmke node reference extensible 19 | # TODO make 'normal' mode opcional 20 | # Fill database with testdata: 21 | if [[ $1 = 'short' ]]; then 22 | echo "$green> Populating $2 via FMKe using short benchmark configuration...$reset\n" 23 | ./scripts/populate_fmke.escript $2 benchmark_short.config 'fmke@127.0.0.1' 24 | elif [[ $1 = 'normal' ]]; then 25 | echo "$green> Populating $2 via FMKe using standard benchmark configuration...$reset\n" 26 | ./scripts/populate_fmke.escript $2 benchmark_standard.config 'fmke@127.0.0.1' 27 | fi 28 | 29 | # Start benchmark 30 | printf "$yellow> Starting benchmark...$reset\n" 31 | if [[ $1 = 'short' ]]; then 32 | _build/test/lib/lasp_bench/_build/default/bin/lasp_bench config/benchmark_short.config 33 | elif [[ $1 = 'normal' ]]; then 34 | _build/test/lib/lasp_bench/_build/default/bin/lasp_bench config/benchmark_standard.config 35 | fi 36 | 37 | if [ -s tests/current/error.log ]; then 38 | ./scripts/stop_fmke.sh 39 | ./scripts/stop_data_store.sh $2 40 | printf "$red> Fatal error: benchmark exited prematurely with errors.$reset\n" 41 | exit 3 42 | fi 43 | 44 | ./scripts/stop_fmke.sh 45 | ./scripts/stop_data_store.sh $2 46 | printf "$green> Benchmark successful! You should now run 'make bench-results' to generate the graphs for the benchmark.$reset\n" 47 | -------------------------------------------------------------------------------- /scripts/run_ct_suite.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | reset="\033[0m" 5 | red="\033[31m" 6 | green="\033[32m" 7 | yellow="\033[33m" 8 | cyan="\033[36m" 9 | white="\033[37m" 10 | 11 | echo "" 12 | printf "$cyan===> Running Common Test unit tests with $1 back end...$reset\n" 13 | ./scripts/config/change_db.sh $1 14 | ./scripts/config/change_db_ports.sh $1 15 | ./scripts/start_data_store.sh $1 16 | printf "$cyan===> Generating FMKe release...$reset\n" 17 | make rel 18 | ./scripts/start_fmke.sh 19 | set +e 20 | rebar3 ct 21 | if [ $? -ne 0 ]; then 22 | set -e 23 | ./scripts/stop_fmke.sh 24 | ./scripts/stop_data_store.sh $1 25 | printf "$red===> One or more tests failed :(.$reset\n" 26 | exit 5 27 | fi 28 | ./scripts/stop_fmke.sh 29 | ./scripts/stop_data_store.sh $1 30 | printf "$green===> Successfully ran CT suite against $1!$reset\n" 31 | -------------------------------------------------------------------------------- /scripts/run_fmke_operations.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | echo "running FMKe unit tests with $1 back end..." 4 | ./scripts/config/change_db.sh $1 5 | ./scripts/config/change_db_ports.sh $1 6 | ./scripts/start_data_store.sh $1 7 | make rel 8 | ./scripts/start_fmke.sh 9 | set +e 10 | rebar3 eunit 11 | if [ $? -ne 0 ]; then 12 | set -e 13 | echo "fatal: one or more tests failed." 14 | ./scripts/stop_fmke.sh 15 | ./scripts/stop_data_store.sh $1 16 | exit 5 17 | fi 18 | ./scripts/stop_fmke.sh 19 | ./scripts/stop_data_store.sh $1 20 | echo "done" 21 | -------------------------------------------------------------------------------- /scripts/start_data_store.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | echo "trying to load $1 docker image..." 4 | if [[ $1 = "antidote" || $1 = "antidote_norm" ]]; then 5 | docker pull mweber/antidotedb 6 | if docker inspect antidote &> /dev/null; then 7 | set -e 8 | docker start antidote 9 | else 10 | set -e 11 | # setup new antidote docker container: 12 | docker run -d --name antidote -p "8087:8087" mweber/antidotedb 13 | fi 14 | sleep 15 15 | echo "antidote started." 16 | elif [ $1 = "redis" ]; then 17 | docker pull redis 18 | set +e 19 | docker run -d --name redis -p "6379:6379" redis 20 | sleep 15 21 | echo "redis started." 22 | elif [[ $1 = "riak" || $1 = "riak_norm" ]]; then 23 | docker pull goncalotomas/riak 24 | set +e 25 | if docker inspect riak &> /dev/null; then 26 | # start existing docker container: 27 | set -e 28 | docker start riak 29 | else 30 | set -e 31 | docker run -d --name riak -p "8087:8087" -p "8098:8098" -e NODE_NAME=riak@127.0.0.1 goncalotomas/riak 32 | fi 33 | sleep 15 34 | echo "riak started." 35 | else 36 | echo "fatal: data store not recognised. Cannot proceed." 37 | exit 1 38 | fi 39 | -------------------------------------------------------------------------------- /scripts/start_fmke.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | echo "compiling FMKe..." 5 | rebar3 compile 6 | # Start FMK: 7 | echo "starting FMKe..." 8 | _build/default/rel/fmke/bin/env start 9 | 10 | # wait some time for FMKe to start 11 | echo "waiting for FMKe to start..." 12 | sleep 3 13 | echo "FMKe started." 14 | -------------------------------------------------------------------------------- /scripts/stop_data_store.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | echo "stopping $1..." 4 | if [[ $1 = "antidote" || $1 = "antidote_norm" ]]; then 5 | docker stop antidote > /dev/null 6 | docker rm antidote 7 | elif [[ $1 = "riak" || $1 = "riak_norm" ]]; then 8 | docker stop riak > /dev/null 9 | docker rm riak 10 | else 11 | docker stop $1 > /dev/null 12 | echo "removing docker image to eliminate persistent storage..." 13 | docker rm $1 14 | fi 15 | echo "$1 stopped, docker image removed" 16 | -------------------------------------------------------------------------------- /scripts/stop_fmke.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Start FMK: 5 | echo "stopping FMKe..." 6 | _build/default/rel/fmke/bin/env stop 7 | 8 | echo "successful FMKe shutdown." 9 | -------------------------------------------------------------------------------- /src/fmke.app.src: -------------------------------------------------------------------------------- 1 | {application, fmke, 2 | [{description, "Benchmark for distributed key-value stores"}, 3 | {vsn, "0.1.0"}, 4 | {registered, []}, 5 | {mod, { fmke_app, []}}, 6 | {applications, 7 | [kernel, 8 | stdlib, 9 | runtime_tools, 10 | tools, 11 | poolboy, 12 | lager, 13 | cowboy, 14 | jsx 15 | ]}, 16 | {included_applications, [ 17 | aqlc, 18 | antidotec_pb, 19 | riak_client, 20 | eredis_cluster, 21 | erlcass 22 | ]}, 23 | {modules, []}, 24 | {maintainers, ["Goncalo Tomas"]}, 25 | {licenses, ["Apache 2"]}, 26 | {links, [{"Github","https://github.com/goncalotomas/FMKe"}]} 27 | ]}. 28 | -------------------------------------------------------------------------------- /src/fmke.erl: -------------------------------------------------------------------------------- 1 | %% TODO redefine types (we will in the future support databases without CRDTs) 2 | -module(fmke). 3 | -include("fmke.hrl"). 4 | 5 | -behaviour(gen_server). 6 | 7 | %%----------------------------------------------------------------------------- 8 | %% Public API for FMK Core 9 | %%----------------------------------------------------------------------------- 10 | -export([ 11 | start_link/1, 12 | create_patient/3, 13 | create_pharmacy/3, 14 | create_facility/4, 15 | create_staff/4, 16 | create_prescription/6, 17 | get_facility_by_id/1, 18 | get_patient_by_id/1, 19 | get_pharmacy_by_id/1, 20 | get_processed_pharmacy_prescriptions/1, 21 | get_pharmacy_prescriptions/1, 22 | get_prescription_by_id/1, 23 | get_prescription_medication/1, 24 | get_staff_by_id/1, 25 | get_staff_prescriptions/1, 26 | get_status/0, 27 | process_prescription/2, 28 | update_patient_details/3, 29 | update_pharmacy_details/3, 30 | update_facility_details/4, 31 | update_staff_details/4, 32 | update_prescription_medication/3 33 | ]). 34 | 35 | %% gen_server callbacks 36 | -export([ 37 | init/1, 38 | handle_cast/2, 39 | handle_call/3 40 | ]). 41 | 42 | -define (SERVER, ?MODULE). 43 | 44 | start_link(Args) -> 45 | gen_server:start_link({local, ?SERVER}, ?MODULE, Args, []). 46 | 47 | init([none]) -> 48 | lager:info("~p is booting in passthrough mode (no adapter)", [?MODULE]), 49 | {ok, undefined}; 50 | init([Adapter]) -> 51 | lager:info("~p will use the ~p adapter~n", [?MODULE, Adapter]), 52 | {ok, Adapter}. 53 | 54 | handle_cast(_Msg, State) -> 55 | {noreply, State}. 56 | 57 | handle_call(get_status, _From, Adapter) -> 58 | {ok, Pools} = application:get_env(?APP, pools), 59 | PoolStatuses = lists:map( 60 | fun(Pool) -> 61 | PoolUp = is_alive(Pool), 62 | {PoolStatus, CurrPoolSize, CurrOverflow, _Monitors} = gen_server:call(Pool, status), 63 | [ 64 | {pool_is_up, PoolUp}, {pool_status, PoolStatus}, 65 | {worker_pool_size, CurrPoolSize}, {current_overflow, CurrOverflow} 66 | ] 67 | end, Pools), 68 | PoolDetails = lists:zip(Pools, PoolStatuses), 69 | EnvOpts = [driver, target_database, database_addresses, database_ports, connection_pool_size, http_port, pools], 70 | EnvVals = lists:map( 71 | fun(Opt) -> 72 | case application:get_env(?APP, Opt) of 73 | {ok, Val} -> 74 | {Opt, Val}; 75 | undefined -> 76 | {Opt, undefined} 77 | end 78 | end, EnvOpts), 79 | Reply = [ 80 | {fmke_up, is_alive(fmke)}, 81 | {connection_manager_up, is_alive(fmke_db_conn_manager)}, 82 | {web_server_up, is_alive(cowboy_sup)}, 83 | {pool_details, PoolDetails} 84 | ] ++ EnvVals, 85 | {reply, Reply, Adapter}; 86 | 87 | handle_call(Op, From, State) -> 88 | Worker = poolboy:checkout(handlers), 89 | gen_server:cast(Worker, {Op, From}), 90 | {noreply, State}. 91 | 92 | %%----------------------------------------------------------------------------- 93 | %% Create functions - no transactional context 94 | %%----------------------------------------------------------------------------- 95 | 96 | %% Adds a patient to the FMK system, needing only an ID, Name and Address. 97 | %% A check is done to determine if a patient with the given ID already exists, 98 | %% and if so the operation fails. 99 | -spec create_patient(id(), string(), string()) -> ok | {error, reason()}. 100 | create_patient(Id, Name, Address) -> 101 | gen_server:call(?MODULE, {create, patient, [Id, Name, Address]}). 102 | 103 | %% Adds a pharmacy to the FMK-- system if the ID for the pharmacy has not yet been seen. 104 | -spec create_pharmacy(id(), string(), string()) -> ok | {error, reason()}. 105 | create_pharmacy(Id, Name, Address) -> 106 | gen_server:call(?MODULE, {create, pharmacy, [Id, Name, Address]}). 107 | 108 | %% Adds a facility to the FMK-- system if the ID for the facility has not yet been seen. 109 | -spec create_facility(id(), string(), string(), string()) -> ok | {error, reason()}. 110 | create_facility(Id, Name, Address, Type) -> 111 | gen_server:call(?MODULE, {create, facility, [Id, Name, Address, Type]}). 112 | 113 | %% Adds a staff member to the FMK-- system if the ID for the member has not yet been seen. 114 | -spec create_staff(id(), string(), string(), string()) -> ok | {error, reason()}. 115 | create_staff(Id, Name, Address, Speciality) -> 116 | gen_server:call(?MODULE, {create, staff, [Id, Name, Address, Speciality]}). 117 | 118 | %% Creates a prescription that is associated with a pacient, prescriber (medicall staff), 119 | %% pharmacy. The prescription also includes the prescription date and the list of drugs that should be administered. 120 | -spec create_prescription(id(), id(), id(), id(), string(), [crdt()]) -> ok | {error, reason()}. 121 | create_prescription(PrescriptionId, PatientId, PrescriberId, PharmacyId, DatePrescribed, Drugs) -> 122 | gen_server:call(?MODULE, 123 | {create, prescription, [PrescriptionId, PatientId, PrescriberId, PharmacyId, DatePrescribed, Drugs]} 124 | ). 125 | 126 | %%----------------------------------------------------------------------------- 127 | %% Read functions - no transactional context 128 | %%----------------------------------------------------------------------------- 129 | 130 | %% Fetches a patient by ID. 131 | -spec get_patient_by_id(id()) -> patient() | {error, reason()}. 132 | get_patient_by_id(Id) -> 133 | gen_server:call(?MODULE, {read, patient, Id}). 134 | 135 | %% Fetches a facility by id. 136 | -spec get_facility_by_id(id()) -> facility() | {error, reason()}. 137 | get_facility_by_id(Id) -> 138 | gen_server:call(?MODULE, {read, facility, Id}). 139 | 140 | %% Fetches a pharmacy by ID. 141 | -spec get_pharmacy_by_id(id()) -> pharmacy() | {error, reason()}. 142 | get_pharmacy_by_id(Id) -> 143 | gen_server:call(?MODULE, {read, pharmacy, Id}). 144 | 145 | %% Fetches a prescription by ID. 146 | -spec get_prescription_by_id(id()) -> prescription() | {error, reason()}. 147 | get_prescription_by_id(Id) -> 148 | gen_server:call(?MODULE, {read, prescription, Id}). 149 | 150 | %% Fetches a list of prescriptions given a certain pharmacy ID. 151 | -spec get_pharmacy_prescriptions(id()) -> list(prescription() | binary()) | {error, reason()}. 152 | get_pharmacy_prescriptions(Id) -> 153 | gen_server:call(?MODULE, {read, pharmacy, Id, prescriptions}). 154 | 155 | -spec get_processed_pharmacy_prescriptions(id()) -> list(prescription() | binary()) | {error, reason()}. 156 | get_processed_pharmacy_prescriptions(Id) -> 157 | gen_server:call(?MODULE, {read, pharmacy, Id, processed_prescriptions}). 158 | 159 | %% Fetches prescription medication by ID. 160 | -spec get_prescription_medication(id()) -> [binary()] | {error, reason()}. 161 | get_prescription_medication(Id) -> 162 | gen_server:call(?MODULE, {read, prescription, Id, [drugs]}). 163 | 164 | %% Fetches a staff member by ID. 165 | -spec get_staff_by_id(id()) -> staff() | {error, reason()}. 166 | get_staff_by_id(Id) -> 167 | gen_server:call(?MODULE, {read, staff, Id}). 168 | 169 | %% Fetches a list of prescriptions given a certain staff member ID. 170 | -spec get_staff_prescriptions(id()) -> list(prescription() | binary()) | {error, reason()}. 171 | get_staff_prescriptions(Id) -> 172 | gen_server:call(?MODULE, {read, staff, Id, prescriptions}). 173 | 174 | %%----------------------------------------------------------------------------- 175 | %% Update functions - no transactional context 176 | %%----------------------------------------------------------------------------- 177 | 178 | %% Updates the personal details of a patient with a certain ID. 179 | -spec update_patient_details(id(), string(), string()) -> ok | {error, reason()}. 180 | update_patient_details(Id, Name, Address) -> 181 | gen_server:call(?MODULE, {update, patient, [Id, Name, Address]}). 182 | 183 | %% Updates the details of a pharmacy with a certain ID. 184 | -spec update_pharmacy_details(id(), string(), string()) -> ok | {error, reason()}. 185 | update_pharmacy_details(Id, Name, Address) -> 186 | gen_server:call(?MODULE, {update, pharmacy, [Id, Name, Address]}). 187 | 188 | %% Updates the details of a facility with a certain ID. 189 | -spec update_facility_details(id(), string(), string(), string()) -> ok | {error, reason()}. 190 | update_facility_details(Id, Name, Address, Type) -> 191 | gen_server:call(?MODULE, {update, facility, [Id, Name, Address, Type]}). 192 | 193 | %% Updates the details of a staff member with a certain ID. 194 | -spec update_staff_details(id(), string(), string(), string()) -> ok | {error, reason()}. 195 | update_staff_details(Id, Name, Address, Speciality) -> 196 | gen_server:call(?MODULE, {update, staff, [Id, Name, Address, Speciality]}). 197 | 198 | -spec update_prescription_medication(id(), atom(), [string()]) -> ok | {error, reason()}. 199 | update_prescription_medication(Id, _Operation, Drugs) -> 200 | gen_server:call(?MODULE, {update, prescription, Id, {drugs, add, Drugs}}). 201 | 202 | process_prescription(Id, Date) -> 203 | gen_server:call(?MODULE, {update, prescription, Id, {date_processed, Date}}). 204 | 205 | %%----------------------------------------------------------------------------- 206 | %% Helper functions 207 | %%----------------------------------------------------------------------------- 208 | 209 | is_alive(Proc) -> 210 | undefined =/= whereis(Proc). 211 | 212 | get_status() -> 213 | gen_server:call(?MODULE, get_status). 214 | -------------------------------------------------------------------------------- /src/fmke_app.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %% @doc fmk public API 3 | %% @end 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(fmke_app). 7 | -include("fmke.hrl"). 8 | -behaviour(application). 9 | 10 | %% Application callbacks 11 | -export([start/2, stop/1]). 12 | 13 | start(_Type, StartArgs) -> 14 | fmke_sup:start_link(StartArgs). 15 | 16 | stop(_State) -> 17 | ok. 18 | -------------------------------------------------------------------------------- /src/fmke_db_conn_manager.erl: -------------------------------------------------------------------------------- 1 | -module(fmke_db_conn_manager). 2 | 3 | -include("fmke.hrl"). 4 | 5 | -behaviour(gen_server). 6 | 7 | -define (SERVER, ?MODULE). 8 | 9 | -export([start_link/0]). 10 | 11 | %% gen_server callbacks 12 | -export([init/1, handle_cast/2, handle_info/2, handle_call/3, terminate/2]). 13 | 14 | %% conn_manager API 15 | -export([checkout/0, checkin/1]). 16 | 17 | -record(state, { 18 | queue :: queue:queue(), 19 | pid_owners :: map() 20 | }). 21 | 22 | start_link() -> 23 | gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). 24 | 25 | init([]) -> 26 | process_flag(trap_exit, true), 27 | {ok, Pools} = application:get_env(?APP, pools), 28 | {ok, #state{queue = queue:from_list(Pools), pid_owners = #{}}}. 29 | 30 | checkout() -> 31 | gen_server:call(?MODULE, checkout). 32 | 33 | checkin(Pid) -> 34 | gen_server:call(?MODULE, {checkin, Pid}). 35 | 36 | handle_cast(_Msg, State) -> 37 | {noreply, State}. 38 | 39 | handle_call(checkout, _From, State) -> 40 | #state{pid_owners = PidOwners, 41 | queue = Queue} = State, 42 | {{value, Pool}, Q1} = queue:out(Queue), 43 | Pid = poolboy:checkout(Pool), 44 | true = link(Pid), 45 | {reply, Pid, #state{queue = queue:in(Pool, Q1), pid_owners = maps:put(Pid, Pool, PidOwners)}}; 46 | 47 | handle_call({checkin, Pid}, _From, State) -> 48 | #state{pid_owners = PidOwners} = State, 49 | case maps:get(Pid, PidOwners, no_such_pid) of 50 | no_such_pid -> 51 | lager:error("Cannot find owner of pid ~p~n", [Pid]), 52 | {reply, no_such_pid, State}; 53 | Owner -> 54 | MapState = maps:remove(Pid, PidOwners), 55 | Result = poolboy:checkin(Owner, Pid), 56 | {reply, Result, State#state{pid_owners = MapState}} 57 | end. 58 | 59 | terminate(Reason, State) -> 60 | #state{pid_owners = PidOwners, 61 | queue = Queue} = State, 62 | lager:critical("fmke db connection manager going down, reported reason: ~p~n", [Reason]), 63 | lager:error("fmke db connection manager crashing with next queue '~p' and ~p pids checked out.~n", 64 | [Queue, maps:size(PidOwners)]), 65 | ok. 66 | 67 | handle_info({'EXIT', Pid, _Reason}, State) -> 68 | #state{pid_owners = PidOwners} = State, 69 | MapState = maps:remove(Pid, PidOwners), 70 | {noreply, State#state{pid_owners = MapState}}. 71 | -------------------------------------------------------------------------------- /src/fmke_db_conn_sup.erl: -------------------------------------------------------------------------------- 1 | -module(fmke_db_conn_sup). 2 | 3 | -behaviour(supervisor). 4 | 5 | -include("fmke.hrl"). 6 | 7 | %% API 8 | -export([start_link/1]). 9 | 10 | %% Supervisor callbacks 11 | -export([init/1]). 12 | 13 | -define(SERVER, ?MODULE). 14 | 15 | start_link(Args) -> 16 | supervisor:start_link({local, ?SERVER}, ?MODULE, Args). 17 | 18 | init([Pools, Connections, Module, PoolSize]) -> 19 | ok = application:set_env(?APP, pools, Pools), 20 | RestartStrategy = #{strategy => one_for_one, intensity => 10, period => 10}, 21 | Children = [gen_conn_pool_mgr_spec()] ++ gen_pool_specs(Pools, Connections, Module, PoolSize), 22 | {ok, {RestartStrategy, Children}}. 23 | 24 | 25 | -spec gen_conn_pool_mgr_spec() -> supervisor:child_spec(). 26 | gen_conn_pool_mgr_spec() -> 27 | #{ 28 | id => fmke_db_conn_manager, 29 | start => {fmke_db_conn_manager, start_link, []}, 30 | restart => permanent, 31 | type => worker 32 | }. 33 | 34 | -spec gen_pool_specs(Pools::list(atom()), Connections::list({list(), non_neg_integer()}), Module::module(), 35 | PoolSize::non_neg_integer()) -> list(supervisor:child_spec()). 36 | gen_pool_specs(Pools, Connections, Module, PoolSize) -> 37 | lists:map( 38 | fun({Pool, Connection}) -> 39 | SizeArgs = [{size, PoolSize}, {max_overflow, 2 * PoolSize}], 40 | {Host, Port} = Connection, 41 | WorkerArgs = [{client_lib, Module}, {host, Host}, {port, Port}], 42 | PoolArgs = [{name, {local, Pool}}, {worker_module, fmke_db_connection}] ++ SizeArgs, 43 | poolboy:child_spec(Pool, PoolArgs, WorkerArgs) 44 | end, 45 | lists:zip(Pools, Connections)). 46 | -------------------------------------------------------------------------------- /src/fmke_db_connection.erl: -------------------------------------------------------------------------------- 1 | %% Heavily inspired in Peter Zeller's previous module antidote_pool. 2 | %% This module manages connections between databases and FMKe. 3 | -module(fmke_db_connection). 4 | -author("Gonçalo Tomás "). 5 | -include("fmke.hrl"). 6 | 7 | -behaviour(poolboy_worker). 8 | 9 | %% poolboy_worker callback 10 | -export([start_link/1]). 11 | 12 | start_link([{client_lib, Module}, {host, Host}, {port, Port}]) -> 13 | try_connect(Module, Host, Port, 100). 14 | 15 | try_connect(Module, Hostname, Port, Timeout) -> 16 | case Module:start_link(Hostname, Port) of 17 | {ok, Pid} -> 18 | lager:debug("Connected to ~p:~p --> ~p ~n", [Hostname, Port, Pid]), 19 | {ok, Pid}; 20 | {error, Reason} -> 21 | lager:error("Could not connect to ~p:~p, Reason: ~p~n", [Hostname, Port, Reason]), 22 | timer:sleep(Timeout), 23 | try_connect(Module, Hostname, Port, min(10000, Timeout*2)) 24 | end. 25 | -------------------------------------------------------------------------------- /src/fmke_driver_config.erl: -------------------------------------------------------------------------------- 1 | -module(fmke_driver_config). 2 | 3 | -include("fmke.hrl"). 4 | 5 | -type database() :: atom(). 6 | 7 | -define(KV_ADAPTER, fmke_kv_adapter). 8 | 9 | 10 | -export([ 11 | db_from_driver/1, 12 | default_driver/1, 13 | driver_adapter/1, 14 | get_client_lib/1, 15 | is_opt_driver/1, 16 | is_simple_kv_driver/1, 17 | requires_conn_manager/1, 18 | requires_ets_table/1, 19 | selected_driver/0, 20 | selected_adapter/0 21 | ]). 22 | 23 | %% Stores the default drivers for each database. 24 | %% These typically only get updated once a new database is supported. 25 | -define(DEFAULT_DRIVER, #{ 26 | aql => fmke_driver_opt_aql, 27 | antidote => fmke_driver_opt_antidote, 28 | cassandra => fmke_driver_opt_cassandra, 29 | ets => fmke_driver_ets, 30 | redis => fmke_driver_opt_redis_crdb, 31 | redis_crdb => fmke_driver_opt_redis_crdb, 32 | redis_cluster => fmke_driver_opt_redis_cluster, 33 | riak => fmke_driver_opt_riak_kv 34 | }). 35 | 36 | %% Add your driver to this list if you wish to use FMKe's connection manager 37 | -define(REQUIRE_CONN_MANAGER, [ 38 | fmke_driver_opt_aql, 39 | fmke_driver_opt_antidote, 40 | fmke_driver_opt_redis_crdb, 41 | fmke_driver_opt_riak_kv 42 | ]). 43 | 44 | %% Add your driver to this list if you wish to have an ETS table created at boot 45 | -define(REQUIRE_ETS, [ 46 | fmke_driver_ets 47 | ]). 48 | 49 | %% Add your driver to this list if your driver implements only the simple KV 50 | %% interface and not the entire FMKe API. Drivers that are not on this list 51 | %% or on the SIMPLE_SQL_DRIVERS are assumed to implement the entire FMKe API. 52 | -define(SIMPLE_KV_DRIVERS, [ 53 | fmke_driver_ets 54 | ]). 55 | 56 | -spec selected_driver() -> module(). 57 | selected_driver() -> 58 | {ok, Driver} = application:get_env(?APP, driver), 59 | Driver. 60 | 61 | -spec selected_adapter() -> module(). 62 | selected_adapter() -> 63 | driver_adapter(selected_driver()). 64 | 65 | -spec requires_conn_manager(Driver::module()) -> true | false. 66 | requires_conn_manager(Driver) -> 67 | lists:member(Driver, ?REQUIRE_CONN_MANAGER) . 68 | 69 | -spec default_driver(Database::database()) -> module(). 70 | default_driver(Database) -> 71 | maps:get(Database, ?DEFAULT_DRIVER). 72 | 73 | -spec is_opt_driver(Driver::module()) -> boolean(). 74 | is_opt_driver(Driver) -> 75 | not lists:member(Driver, ?SIMPLE_KV_DRIVERS). 76 | 77 | -spec is_simple_kv_driver(Driver::module()) -> boolean(). 78 | is_simple_kv_driver(Driver) -> 79 | lists:member(Driver, ?SIMPLE_KV_DRIVERS). 80 | 81 | -spec requires_ets_table(Driver::module()) -> true | false. 82 | requires_ets_table(Driver) -> 83 | lists:member(Driver, ?REQUIRE_ETS). 84 | 85 | -spec get_client_lib(Driver::module()) -> atom(). 86 | get_client_lib(fmke_driver_opt_aql) -> aqlc_tcp; 87 | get_client_lib(fmke_driver_opt_antidote) -> antidotec_pb_socket; 88 | get_client_lib(fmke_driver_opt_redis_crdb) -> eredis; 89 | get_client_lib(fmke_driver_opt_riak_kv) -> riakc_pb_socket. 90 | 91 | -spec driver_adapter(Driver::module()) -> module(). 92 | driver_adapter(Driver) -> 93 | case is_opt_driver(Driver) of 94 | false -> 95 | ?KV_ADAPTER; 96 | true -> 97 | none 98 | end. 99 | 100 | db_from_driver(fmke_driver_ets) -> ets; 101 | db_from_driver(fmke_driver_opt_aql) -> aql; 102 | db_from_driver(fmke_driver_opt_antidote) -> antidote; 103 | db_from_driver(fmke_driver_opt_riak_kv) -> riak; 104 | db_from_driver(fmke_driver_opt_cassandra) -> cassandra; 105 | db_from_driver(fmke_driver_opt_redis_crdb) -> redis_crdb; 106 | db_from_driver(fmke_driver_opt_redis_cluster) -> redis_cluster. 107 | -------------------------------------------------------------------------------- /src/fmke_driver_ets.erl: -------------------------------------------------------------------------------- 1 | -module(fmke_driver_ets). 2 | 3 | -behaviour(gen_fmke_kv_driver). 4 | 5 | -include("fmke.hrl"). 6 | 7 | 8 | %% gen_fmke_driver exports 9 | -export([ 10 | start_transaction/1, 11 | commit_transaction/2, 12 | get/2, 13 | put/2 14 | ]). 15 | 16 | start_transaction(_Opts) -> 17 | {ok, []}. 18 | 19 | commit_transaction(_Context, _Opts) -> 20 | ok. 21 | 22 | get(Keys, Context) -> 23 | {ok, DataModel} = application:get_env(?APP, data_model), 24 | {lists:map(fun({Key, Type}) -> 25 | case ets:lookup(?ETS_TABLE_NAME, Key) of 26 | [] -> {error, not_found}; 27 | [{Key, Value}] -> pack(DataModel, Value, Type) 28 | end 29 | end, Keys), Context}. 30 | 31 | put(Entries, Context) -> 32 | {lists:map(fun({Key, Type, Value}) -> 33 | case Type of 34 | prescription_ref -> 35 | Val = case ets:lookup(?ETS_TABLE_NAME, Key) of 36 | [] -> [Value]; 37 | [{Key, List}] -> [Value | List] 38 | end, 39 | true = ets:insert(?ETS_TABLE_NAME, {Key, Val}), 40 | ok; 41 | _Other -> 42 | true = ets:insert(?ETS_TABLE_NAME, {Key, unpack(nested, Type, Value)}), 43 | ok 44 | end 45 | end, Entries), Context}. 46 | 47 | pack(_, {Id, Name, Address, Type}, facility) -> 48 | #facility{id = Id, name = Name, address = Address, type = Type}; 49 | pack(_, {Id, Name, Address, Prescriptions}, patient) -> 50 | #patient{id = Id, name = Name, address = Address, prescriptions = Prescriptions}; 51 | pack(_, {Id, Name, Address, Prescriptions}, pharmacy) -> 52 | #pharmacy{id = Id, name = Name, address = Address, prescriptions = Prescriptions}; 53 | pack(_, {Id, PatientId, PrescriberId, PharmacyId, 54 | DatePrescribed, DateProcessed, Drugs, IsProcessed}, prescription) -> 55 | #prescription{ 56 | id = Id 57 | ,patient_id = PatientId 58 | ,prescriber_id = PrescriberId 59 | ,pharmacy_id = PharmacyId 60 | ,date_prescribed = DatePrescribed 61 | ,date_processed = DateProcessed 62 | ,drugs = Drugs 63 | ,is_processed = IsProcessed 64 | }; 65 | pack(_, {Id, Name, Address, Speciality, Prescriptions}, staff) -> 66 | #staff{id = Id, name = Name, address = Address, speciality = Speciality, prescriptions = Prescriptions}; 67 | pack(non_nested, List, prescription_ref) -> 68 | List. 69 | 70 | unpack(_, facility, #facility{id = Id, name = Name, address = Address, type = Type}) -> 71 | {Id, Name, Address, Type}; 72 | unpack(_, patient, #patient{id = Id, name = Name, address = Address, prescriptions = Prescriptions}) -> 73 | {Id, Name, Address, Prescriptions}; 74 | unpack(_, pharmacy, #pharmacy{id = Id, name = Name, address = Address, prescriptions = Prescriptions}) -> 75 | {Id, Name, Address, Prescriptions}; 76 | unpack(_, prescription, #prescription{id = Id, patient_id = PatientId, prescriber_id = PrescriberId, 77 | pharmacy_id = PharmacyId, date_prescribed = DatePrescribed, 78 | date_processed = DateProcessed, drugs = Drugs, 79 | is_processed = IsProcessed}) -> 80 | {Id, PatientId, PrescriberId, PharmacyId, DatePrescribed, DateProcessed, Drugs, IsProcessed}; 81 | unpack(nested, staff, #staff{id = Id, name = Name, address = Address, speciality = Speciality, 82 | prescriptions = Prescriptions}) -> 83 | {Id, Name, Address, Speciality, Prescriptions}. 84 | -------------------------------------------------------------------------------- /src/fmke_gen_driver.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2014 SyncFree Consortium. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | -module(fmke_gen_driver). 21 | 22 | -type id() :: non_neg_integer(). 23 | -type context() :: term(). 24 | -type crdt() :: term(). 25 | 26 | -callback start_link(term()) -> {ok, pid()} | {error, term()}. 27 | -callback stop(pid()) -> ok | {error, term()}. 28 | 29 | %%----------------------------------------------------------------------------- 30 | %% Create Operations 31 | %%----------------------------------------------------------------------------- 32 | 33 | -callback create_patient(Id::id(), Name::string(), Address::string()) -> 34 | {ok | {error, Reason::term()}, Context::context()}. 35 | 36 | -callback create_pharmacy(Id::id(), Name::string(), Address::string()) -> 37 | {ok | {error, Reason::term()}, Context::context()}. 38 | 39 | -callback create_facility(Id::id(), Name::string(), Address::string(), Type::string()) -> 40 | {ok | {error, Reason::term()}, Context::context()}. 41 | 42 | -callback create_staff(Id::id(), Name::string(), Address::string(), Speciality::string()) -> 43 | {ok | {error, Reason::term()}, Context::context()}. 44 | 45 | -callback create_prescription(PrescriptionId::id(), PatientId::id(), PrescriberId::id(), 46 | PharmacyId::id(), DatePrescribed::string(), Drugs::list(crdt())) -> 47 | {ok | {error, Reason::term()}, Context::context()}. 48 | 49 | % -callback create_event(EventId::id(), TreatmentId::id(), StaffMemberId::id(), Timestamp::string(), 50 | % Description::string()) -> 51 | % {ok | {error, Reason::term()}, Context::context()}. 52 | % 53 | % -callback create_treatment(TreatmentId::id(), PatientId::id(), StaffId::id(), FacilityId::id(), 54 | % DateStarted::string()) -> 55 | % {ok | {error, Reason::term()}, Context::context()}. 56 | 57 | %%----------------------------------------------------------------------------- 58 | %% Get Operations 59 | %%----------------------------------------------------------------------------- 60 | 61 | % -callback get_event_by_id(Id::id()) -> 62 | % {{ok, Object::crdt()} | {error, Reason::term()}, Context::context()}. 63 | 64 | -callback get_facility_by_id(Id::id()) -> 65 | {{ok, Object::crdt()} | {error, Reason::term()}, Context::context()}. 66 | 67 | -callback get_patient_by_id(Id::id()) -> 68 | {{ok, Object::crdt()} | {error, Reason::term()}, Context::context()}. 69 | 70 | -callback get_pharmacy_by_id(Id::id()) -> 71 | {{ok, Object::crdt()} | {error, Reason::term()}, Context::context()}. 72 | 73 | -callback get_prescription_by_id(Id::id()) -> 74 | {{ok, Object::crdt()} | {error, Reason::term()}, Context::context()}. 75 | 76 | -callback get_staff_by_id(Id::id()) -> 77 | {{ok, Object::crdt()} | {error, Reason::term()}, Context::context()}. 78 | 79 | % -callback get_treatment_by_id(Id::id()) -> 80 | % {{ok, Object::crdt()} | {error, Reason::term()}, Context::context()}. 81 | 82 | % -callback get_facility_treatments(Id::id()) -> 83 | % {{ok, ListObjects::list(crdt())} | {error, Reason::term()}, Context::context()}. 84 | 85 | -callback get_processed_pharmacy_prescriptions(Id::id()) -> 86 | {{ok, ListObjects::list(crdt())} | {error, Reason::term()}, Context::context()}. 87 | 88 | -callback get_pharmacy_prescriptions(Id::id()) -> 89 | {{ok, ListObjects::list(crdt())} | {error, Reason::term()}, Context::context()}. 90 | 91 | -callback get_staff_prescriptions(Id::id()) -> 92 | {{ok, ListObjects::list(crdt())} | {error, Reason::term()}, Context::context()}. 93 | 94 | % -callback get_staff_treatments(Id::id()) -> 95 | % {{ok, ListObjects::list(crdt())} | {error, Reason::term()}, Context::context()}. 96 | 97 | %%----------------------------------------------------------------------------- 98 | %% Update Operations 99 | %%----------------------------------------------------------------------------- 100 | 101 | -callback process_prescription(Id::id(), DateProcessed::string()) -> 102 | {ok | {error, Reason::term()}, Context::context()}. 103 | 104 | -callback update_patient_details(Id::id(), Name::string(), Address::string()) -> 105 | {ok | {error, Reason::term()}, Context::context()}. 106 | 107 | -callback update_pharmacy_details(Id::id(), Name::string(), Address::string()) -> 108 | {ok | {error, Reason::term()}, Context::context()}. 109 | 110 | -callback update_facility_details(Id::id(), Name::string(), Address::string(), Type::string()) -> 111 | {ok | {error, Reason::term()}, Context::context()}. 112 | 113 | -callback update_staff_details(Id::id(), Name::string(), Address::string(), Speciality::string()) -> 114 | {ok | {error, Reason::term()}, Context::context()}. 115 | 116 | -callback update_prescription_medication(Id::id(), Operation::atom(), Drugs::list(crdt())) -> 117 | {ok | {error, Reason::term()}, Context::context()}. 118 | -------------------------------------------------------------------------------- /src/fmke_gen_http_handler.erl: -------------------------------------------------------------------------------- 1 | %% Default behaviour for a generic HTTP handler. 2 | -module (fmke_gen_http_handler). 3 | 4 | -include ("fmke_http.hrl"). 5 | 6 | -export ([init/3, handle_req/5, handle_reply/5]). 7 | 8 | -callback init(Req::cowboy_req:req(), State::any()) -> {ok, cowboy_req:req(), any()}. 9 | -callback handle_req(Method::binary(), HasBody::boolean(), Req::cowboy_req:req()) -> cowboy_req:req(). 10 | -callback perform_operation(Method::binary(), Req::cowboy_req:req(), 11 | UrlParamsFound::list({atom(), binary()}), 12 | BodyParamsFound::list({atom(), any()})) 13 | -> cowboy_req:req(). 14 | 15 | %% Every requests starts being processed at the init function, and processing is 16 | %% identical throughout all modules that implement this behaviour, so this function 17 | %% reduces code duplication. 18 | init(Mod, Req, State) -> 19 | try 20 | Method = cowboy_req:method(Req), 21 | HasBody = cowboy_req:has_body(Req), 22 | Req1 = Mod:handle_req(Method, HasBody, Req), 23 | {ok, Req1, State} 24 | catch 25 | Class:Reason -> 26 | lager:error(io_lib:format("Error ~p:~p in request from ~p~n", [Class, Reason, Mod])), 27 | Req2 = handle_reply(Mod, Req, {error, internal}, false, Reason), 28 | {ok, Req2, State} 29 | end. 30 | 31 | %% Processing any request has a pattern that involves acquiring necessary parameters 32 | %% from the URL and/or HTTP body, executing an operation on the `fmke` module and 33 | %% replying back with an answer. 34 | -spec handle_req(Mod::atom(), Method::binary(), Req::cowboy_req:req(), 35 | UrlParams::list(atom()), 36 | BodyParams::list({atom(), integer | string})) -> cowboy_req:req(). 37 | handle_req(Mod, <<"GET">>, Req, UrlParams, _) -> 38 | try 39 | Bindings = cowboy_req:bindings(Req), 40 | UrlParamsFound = lists:foldl(fun(Param, Accum) -> 41 | case maps:get(Param, Bindings, undefined) of 42 | undefined -> Accum; 43 | Val -> [{Param, Val} | Accum] 44 | end 45 | end, [], UrlParams), 46 | Mod:perform_operation(<<"GET">>, Req, lists:reverse(UrlParamsFound), []) 47 | catch 48 | error:ErrReason -> 49 | handle_reply(Mod, Req, {error, internal}, false, ErrReason); 50 | _:ExReason -> 51 | handle_reply(Mod, Req, {error, internal}, false, ExReason) 52 | end; 53 | 54 | handle_req(Mod, Method, Req, UrlParams, BodyParams) -> 55 | try 56 | {ok, Body, Req1} = cowboy_req:read_body(Req), 57 | Bindings = cowboy_req:bindings(Req1), 58 | UrlParamsFound = lists:foldl(fun(Param, Accum) -> 59 | case maps:get(Param, Bindings, undefined) of 60 | undefined -> Accum; 61 | Val -> lists:append(Accum, [{Param, Val}]) 62 | end 63 | end, [], UrlParams), 64 | BodyParamsFound = fmke_http_utils:parse_body(BodyParams, Body), 65 | case BodyParamsFound of 66 | [] -> 67 | handle_reply(Mod, Req, {error, bad_req}, false, ?ERR_MISSING_BODY); 68 | _List -> 69 | case proplists:get_keys(BodyParamsFound) =:= proplists:get_keys(BodyParams) of 70 | true -> 71 | %% All body params that were requested have been found 72 | Mod:perform_operation(Method, Req1, UrlParamsFound, BodyParamsFound); 73 | false -> 74 | %% Some body parameters are missing, let Mod decide what to do 75 | Mod:perform_operation(Method, Req1, UrlParamsFound, {incomplete, BodyParamsFound}) 76 | end 77 | end 78 | catch 79 | error:ErrReason -> 80 | handle_reply(Mod, Req, {error, internal}, false, ErrReason); 81 | _:ExReason -> 82 | handle_reply(Mod, Req, {error, internal}, false, ExReason) 83 | end. 84 | 85 | handle_reply(_Mod, Req, ok, Success, Result) -> 86 | cowboy_req:reply(200, ?CONT_TYPE_JSON, ?ENCODE_RESPONSE(Success, Result), Req); 87 | 88 | handle_reply(_Mod, Req, {error, bad_req}, false, Result) -> 89 | cowboy_req:reply(400, ?CONT_TYPE_JSON, ?ENCODE_FAIL(Result), Req); 90 | 91 | handle_reply(Mod, Req, {error, internal}, false, Reason) -> 92 | Method = binary_to_list(cowboy_req:method(Req)), 93 | Uri = binary_to_list(cowboy_req:path(Req)), 94 | lager:error(io_lib:format("Internal error in ~p for operation ~p ~p: ~p~n", [Mod, Method, Uri, Reason])), 95 | cowboy_req:reply(500, ?CONT_TYPE_JSON, ?ENCODE_SRV_ERR, Req). 96 | -------------------------------------------------------------------------------- /src/fmke_gen_simplified_kv_driver.erl: -------------------------------------------------------------------------------- 1 | %% ------------------------------------------------------------------- 2 | %% 3 | %% Copyright (c) 2014 SyncFree Consortium. All Rights Reserved. 4 | %% 5 | %% This file is provided to you under the Apache License, 6 | %% Version 2.0 (the "License"); you may not use this file 7 | %% except in compliance with the License. You may obtain 8 | %% a copy of the License at 9 | %% 10 | %% http://www.apache.org/licenses/LICENSE-2.0 11 | %% 12 | %% Unless required by applicable law or agreed to in writing, 13 | %% software distributed under the License is distributed on an 14 | %% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | %% KIND, either express or implied. See the License for the 16 | %% specific language governing permissions and limitations 17 | %% under the License. 18 | %% 19 | %% ------------------------------------------------------------------- 20 | 21 | -module(fmke_gen_simplified_kv_driver). 22 | -include ("fmke.hrl"). 23 | 24 | %% Types TODO: refine type defs 25 | -type context() :: term(). %% specific to each driver 26 | -type map_update() :: [nested_object_update()]. 27 | -type nested_object_update() :: nested_register_update() | nested_set_update() | nested_map_update(). 28 | -type nested_register_update() :: {creat_register, key(), term()}. 29 | -type nested_set_update() :: {create_set, key(), [term()]}. 30 | -type nested_map_update() :: {create_map, key(), map_update()} | {update_map, key(), map_update()}. 31 | 32 | 33 | %% callbacks 34 | -callback start(term()) -> {ok, context()} | {error, reason()}. %TODO: precise typespec 35 | -callback stop(term()) -> term(). 36 | 37 | %% Transactions 38 | -callback start_transaction(context()) -> {ok, context()}. 39 | -callback commit_transaction(context()) -> {ok, context()}. 40 | 41 | %% Returns a map object. 42 | -callback get(key(), entity(), context()) -> {ok, app_record(), context()} | {error, reason()}. 43 | 44 | %% term() is a list of lists of operations where in each position you store the operations for each level of nesting [] 45 | %% [[{update, [{update,{key,mykey},{value,myvalue}]}, [], []] means that we will perform an operation on the top level 46 | %% map and none in the lower levels. On the other side, 47 | %% [[], [], [{other_update_op, something_else}]] means that we will only add 48 | -callback put(key(), entity(), map_update(), context()) -> {ok, context()} | {error, reason(), context()}. 49 | -------------------------------------------------------------------------------- /src/fmke_http_handler_app.erl: -------------------------------------------------------------------------------- 1 | -module (fmke_http_handler_app). 2 | -include ("fmke_http.hrl"). 3 | -include ("fmke.hrl"). 4 | 5 | -behaviour(fmke_gen_http_handler). 6 | 7 | -export([init/2, handle_req/3, perform_operation/4]). 8 | 9 | init(Req, Opts) -> 10 | fmke_gen_http_handler:init(?MODULE, Req, Opts). 11 | 12 | handle_req(<<"GET">>, _, Req) -> 13 | fmke_gen_http_handler:handle_req(?MODULE, <<"GET">>, Req, [], []); 14 | 15 | handle_req(<<"POST">>, _, Req) -> 16 | fmke_gen_http_handler:handle_req(?MODULE, <<"GET">>, Req, [], []); 17 | 18 | handle_req(<<"PUT">>, _, Req) -> 19 | fmke_gen_http_handler:handle_req(?MODULE, <<"GET">>, Req, [], []). 20 | 21 | perform_operation(<<"GET">>, Req, [], []) -> 22 | try 23 | StatusPropList = fmke:get_status(), 24 | DatabaseAddrs = lists:map(fun erlang:list_to_binary/1, proplists:get_value(database_addresses, StatusPropList)), 25 | StatusPropList1 = lists:keyreplace(database_addresses, 1, StatusPropList, {database_addresses, DatabaseAddrs}), 26 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, true, proplists:delete(http_port, StatusPropList1) ) 27 | catch error:ErrReason -> 28 | lager:debug("Error getting status:~n~p~n", [ErrReason]), 29 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 30 | end. 31 | -------------------------------------------------------------------------------- /src/fmke_http_handler_facilities.erl: -------------------------------------------------------------------------------- 1 | -module (fmke_http_handler_facilities). 2 | -include ("fmke_http.hrl"). 3 | 4 | -behaviour(fmke_gen_http_handler). 5 | 6 | -export([init/2, handle_req/3, perform_operation/4]). 7 | 8 | init(Req, Opts) -> 9 | fmke_gen_http_handler:init(?MODULE, Req, Opts). 10 | 11 | %% Create patient function ( POST /patients ) 12 | handle_req(<<"GET">>, _, Req) -> 13 | fmke_gen_http_handler:handle_req(?MODULE, <<"GET">>, Req, [id], []); 14 | 15 | handle_req(<<"POST">>, true, Req) -> 16 | Properties = [{id, integer}, {name, string}, {address, string}, {type, string}], 17 | fmke_gen_http_handler:handle_req(?MODULE, <<"POST">>, Req, [], Properties); 18 | 19 | handle_req(<<"PUT">>, true, Req) -> 20 | Properties = [{name, string}, {address, string}, {type, string}], 21 | fmke_gen_http_handler:handle_req(?MODULE, <<"PUT">>, Req, [id], Properties). 22 | 23 | perform_operation(<<"GET">>, Req, [{id, BinaryId}], []) -> 24 | try 25 | Id = fmke_http_utils:parse_id(BinaryId), 26 | {Success, ServerResponse} = case fmke:get_facility_by_id(Id) of 27 | {error, Reason} -> {false, Reason}; 28 | FacilityRecord -> {true, fmke_json:encode(FacilityRecord)} 29 | end, 30 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 31 | catch error:ErrReason -> 32 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 33 | end; 34 | 35 | perform_operation(<<"POST">>, Req, [], [{id, Id}, {name, Name}, {address, Address}, {type, Type}]) -> 36 | try 37 | {Success, ServerResponse} = case fmke:create_facility(Id, Name, Address, Type) of 38 | ok -> {true, ok}; 39 | {error, Reason} -> {false, Reason} 40 | end, 41 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 42 | catch error:ErrReason -> 43 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 44 | end; 45 | 46 | perform_operation(<<"PUT">>, Req, [{id, BinaryId}], [{name, Name}, {address, Address}, {type, Type}]) -> 47 | try 48 | Id = fmke_http_utils:parse_id(BinaryId), 49 | {Success, ServerResponse} = case fmke:update_facility_details(Id, Name, Address, Type) of 50 | ok -> {true, ok}; 51 | {error, Reason} -> {false, Reason} 52 | end, 53 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 54 | catch error:ErrReason -> 55 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 56 | end. 57 | -------------------------------------------------------------------------------- /src/fmke_http_handler_patients.erl: -------------------------------------------------------------------------------- 1 | -module (fmke_http_handler_patients). 2 | -include ("fmke_http.hrl"). 3 | 4 | -behaviour(fmke_gen_http_handler). 5 | 6 | -export([init/2, handle_req/3, perform_operation/4]). 7 | 8 | init(Req, Opts) -> 9 | fmke_gen_http_handler:init(?MODULE, Req, Opts). 10 | 11 | %% Create patient function ( POST /patients ) 12 | handle_req(<<"GET">>, _, Req) -> 13 | fmke_gen_http_handler:handle_req(?MODULE, <<"GET">>, Req, [id], []); 14 | 15 | handle_req(<<"POST">>, true, Req) -> 16 | Properties = [{id, integer}, {name, string}, {address, string}], 17 | fmke_gen_http_handler:handle_req(?MODULE, <<"POST">>, Req, [], Properties); 18 | 19 | handle_req(<<"PUT">>, true, Req) -> 20 | Properties = [{name, string}, {address, string}], 21 | fmke_gen_http_handler:handle_req(?MODULE, <<"PUT">>, Req, [id], Properties). 22 | 23 | perform_operation(<<"GET">>, Req, [{id, BinaryId}], []) -> 24 | try 25 | Id = fmke_http_utils:parse_id(BinaryId), 26 | {Success, ServerResponse} = case fmke:get_patient_by_id(Id) of 27 | {error, Reason} -> {false, Reason}; 28 | PatientRecord -> {true, fmke_json:encode(PatientRecord)} 29 | end, 30 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 31 | catch error:ErrReason -> 32 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 33 | end; 34 | 35 | perform_operation(<<"POST">>, Req, [], [{id, Id}, {name, Name}, {address, Address}]) -> 36 | try 37 | {Success, ServerResponse} = case fmke:create_patient(Id, Name, Address) of 38 | ok -> {true, ok}; 39 | {error, Reason} -> {false, Reason} 40 | end, 41 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 42 | catch error:ErrReason -> 43 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 44 | end; 45 | 46 | perform_operation(<<"PUT">>, Req, [{id, BinaryId}], [{name, Name}, {address, Address}]) -> 47 | try 48 | Id = fmke_http_utils:parse_id(BinaryId), 49 | {Success, ServerResponse} = case fmke:update_patient_details(Id, Name, Address) of 50 | ok -> {true, ok}; 51 | {error, Reason} -> {false, Reason} 52 | end, 53 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 54 | catch error:ErrReason -> 55 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 56 | end. 57 | -------------------------------------------------------------------------------- /src/fmke_http_handler_pharmacies.erl: -------------------------------------------------------------------------------- 1 | -module (fmke_http_handler_pharmacies). 2 | -include ("fmke_http.hrl"). 3 | 4 | -behaviour(fmke_gen_http_handler). 5 | 6 | -export([init/2, handle_req/3, perform_operation/4]). 7 | 8 | init(Req, Opts) -> 9 | fmke_gen_http_handler:init(?MODULE, Req, Opts). 10 | 11 | %% Create pharmacy function ( POST /pharmacies ) 12 | handle_req(<<"GET">>, _, Req) -> 13 | fmke_gen_http_handler:handle_req(?MODULE, <<"GET">>, Req, [id], []); 14 | 15 | handle_req(<<"POST">>, true, Req) -> 16 | Properties = [{id, integer}, {name, string}, {address, string}], 17 | fmke_gen_http_handler:handle_req(?MODULE, <<"POST">>, Req, [], Properties); 18 | 19 | handle_req(<<"PUT">>, true, Req) -> 20 | Properties = [{name, string}, {address, string}], 21 | fmke_gen_http_handler:handle_req(?MODULE, <<"PUT">>, Req, [id], Properties). 22 | 23 | perform_operation(<<"GET">>, Req, [{id, BinaryId}], []) -> 24 | try 25 | Id = fmke_http_utils:parse_id(BinaryId), 26 | {Success, ServerResponse} = case fmke:get_pharmacy_by_id(Id) of 27 | {error, Reason} -> {false, Reason}; 28 | PharmacyRecord -> {true, fmke_json:encode(PharmacyRecord)} 29 | end, 30 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 31 | catch error:ErrReason -> 32 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 33 | end; 34 | 35 | perform_operation(<<"POST">>, Req, [], [{id, Id}, {name, Name}, {address, Address}]) -> 36 | try 37 | {Success, ServerResponse} = case fmke:create_pharmacy(Id, Name, Address) of 38 | ok -> {true, ok}; 39 | {error, Reason} -> {false, Reason} 40 | end, 41 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 42 | catch error:ErrReason -> 43 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 44 | end; 45 | 46 | perform_operation(<<"PUT">>, Req, [{id, BinaryId}], [{name, Name}, {address, Address}]) -> 47 | try 48 | Id = fmke_http_utils:parse_id(BinaryId), 49 | {Success, ServerResponse} = case fmke:update_pharmacy_details(Id, Name, Address) of 50 | ok -> {true, ok}; 51 | {error, Reason} -> {false, Reason} 52 | end, 53 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 54 | catch error:ErrReason -> 55 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 56 | end. 57 | -------------------------------------------------------------------------------- /src/fmke_http_handler_prescriptions.erl: -------------------------------------------------------------------------------- 1 | -module (fmke_http_handler_prescriptions). 2 | -include ("fmke_http.hrl"). 3 | 4 | -behaviour(fmke_gen_http_handler). 5 | 6 | -export([init/2, handle_req/3, perform_operation/4]). 7 | 8 | init(Req, Opts) -> 9 | fmke_gen_http_handler:init(?MODULE, Req, Opts). 10 | 11 | %% Create patient function ( POST /patients ) 12 | handle_req(<<"GET">>, _, Req) -> 13 | fmke_gen_http_handler:handle_req(?MODULE, <<"GET">>, Req, [id], []); 14 | 15 | handle_req(<<"POST">>, true, Req) -> 16 | Properties = [{id, integer}, {patient_id, integer}, {prescriber_id, integer}, 17 | {pharmacy_id, integer}, {date_prescribed, string}, {drugs, csv_string}], 18 | fmke_gen_http_handler:handle_req(?MODULE, <<"POST">>, Req, [], Properties); 19 | 20 | handle_req(<<"PUT">>, true, Req) -> 21 | Properties = [{date_processed, string}, {drugs, csv_string}], 22 | fmke_gen_http_handler:handle_req(?MODULE, <<"PUT">>, Req, [id], Properties). 23 | 24 | perform_operation(<<"GET">>, Req, [{id, BinaryId}], []) -> 25 | try 26 | Id = fmke_http_utils:parse_id(BinaryId), 27 | {Success, ServerResponse} = case fmke:get_prescription_by_id(Id) of 28 | {error, Reason} -> {false, Reason}; 29 | PrescriptionRecord -> {true, fmke_json:encode(PrescriptionRecord)} 30 | end, 31 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 32 | catch error:ErrReason -> 33 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 34 | end; 35 | 36 | perform_operation(<<"POST">>, Req, [], 37 | [{id, Id}, {patient_id, PatId}, {prescriber_id, StaffId}, 38 | {pharmacy_id, PharmId}, {date_prescribed, DatePresc}, {drugs, Drugs}]) -> 39 | try 40 | {Success, ServerResponse} = case fmke:create_prescription(Id, PatId, StaffId, PharmId, DatePresc, Drugs) of 41 | ok -> {true, ok}; 42 | {error, txn_aborted} -> {false, <<"txn_aborted">>}; 43 | {error, Reason} -> {false, Reason} 44 | end, 45 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 46 | catch error:ErrReason -> 47 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 48 | end; 49 | 50 | %% Process prescription 51 | perform_operation(<<"PUT">>, Req, [{id, BinaryId}], {incomplete, [{date_processed, Date}]}) -> 52 | try 53 | Id = fmke_http_utils:parse_id(BinaryId), 54 | {Success, ServerResponse} = case fmke:process_prescription(Id, Date) of 55 | ok -> {true, ok}; 56 | {error, txn_aborted} -> {false, <<"txn_aborted">>}; 57 | {error, Reason} -> {false, Reason} 58 | end, 59 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 60 | catch error:ErrReason -> 61 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 62 | end; 63 | 64 | %% Update prescription medication 65 | perform_operation(<<"PUT">>, Req, [{id, BinaryId}], {incomplete, [{drugs, Drugs}]}) -> 66 | try 67 | Id = fmke_http_utils:parse_id(BinaryId), 68 | {Success, ServerResponse} = case fmke:update_prescription_medication(Id, add_drugs, Drugs) of 69 | ok -> {true, ok}; 70 | {error, txn_aborted} -> {false, <<"txn_aborted">>}; 71 | {error, prescription_already_processed} -> {false, <<"prescription_already_processed">>}; 72 | {error, Reason} -> {false, list_to_binary(lists:flatten(io_lib:format("~p", [Reason])))} 73 | end, 74 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 75 | catch error:ErrReason -> 76 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 77 | end. 78 | -------------------------------------------------------------------------------- /src/fmke_http_handler_staff.erl: -------------------------------------------------------------------------------- 1 | -module (fmke_http_handler_staff). 2 | -include ("fmke_http.hrl"). 3 | 4 | -behaviour(fmke_gen_http_handler). 5 | 6 | -export([init/2, handle_req/3, perform_operation/4]). 7 | 8 | init(Req, Opts) -> 9 | fmke_gen_http_handler:init(?MODULE, Req, Opts). 10 | 11 | %% Create patient function ( POST /patients ) 12 | handle_req(<<"GET">>, _, Req) -> 13 | fmke_gen_http_handler:handle_req(?MODULE, <<"GET">>, Req, [id], []); 14 | 15 | handle_req(<<"POST">>, true, Req) -> 16 | Properties = [{id, integer}, {name, string}, {address, string}, {speciality, string}], 17 | fmke_gen_http_handler:handle_req(?MODULE, <<"POST">>, Req, [], Properties); 18 | 19 | handle_req(<<"PUT">>, true, Req) -> 20 | Properties = [{name, string}, {address, string}, {speciality, string}], 21 | fmke_gen_http_handler:handle_req(?MODULE, <<"PUT">>, Req, [id], Properties). 22 | 23 | perform_operation(<<"GET">>, Req, [{id, BinaryId}], []) -> 24 | try 25 | Id = fmke_http_utils:parse_id(BinaryId), 26 | {Success, ServerResponse} = case fmke:get_staff_by_id(Id) of 27 | {error, Reason} -> {false, Reason}; 28 | StaffRecord -> {true, fmke_json:encode(StaffRecord)} 29 | end, 30 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 31 | catch error:ErrReason -> 32 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 33 | end; 34 | 35 | perform_operation(<<"POST">>, Req, [], [{id, Id}, {name, Name}, {address, Address}, {speciality, Speciality}]) -> 36 | try 37 | {Success, ServerResponse} = case fmke:create_staff(Id, Name, Address, Speciality) of 38 | ok -> {true, ok}; 39 | {error, Reason} -> {false, Reason} 40 | end, 41 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 42 | catch error:ErrReason -> 43 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 44 | end; 45 | 46 | perform_operation(<<"PUT">>, Req, [{id, BinaryId}], [{name, Name}, {address, Address}, {speciality, Speciality}]) -> 47 | try 48 | Id = fmke_http_utils:parse_id(BinaryId), 49 | {Success, ServerResponse} = case fmke:update_staff_details(Id, Name, Address, Speciality) of 50 | ok -> {true, ok}; 51 | {error, Reason} -> {false, Reason} 52 | end, 53 | fmke_gen_http_handler:handle_reply(?MODULE, Req, ok, Success, ServerResponse) 54 | catch error:ErrReason -> 55 | fmke_gen_http_handler:handle_reply(?MODULE, Req, {error, bad_req}, false, ErrReason) 56 | end. 57 | -------------------------------------------------------------------------------- /src/fmke_http_utils.erl: -------------------------------------------------------------------------------- 1 | %% Generally useful HTTP parsing functions. Required for gen_http_handler and for 2 | %% other modules that may wish to manually parse a specific field. 3 | -module (fmke_http_utils). 4 | -include ("fmke_http.hrl"). 5 | 6 | %%%------------------------------------------------------------------- 7 | %%% API 8 | %%%------------------------------------------------------------------- 9 | -export ([ 10 | parse_body/2 11 | ,parse_body/3 12 | ,parse_id/1 13 | ,parse_string/1 14 | ,parse_csv_string/1 15 | ]). 16 | 17 | -spec parse_body(list({atom(), atom()}), binary()) -> list({atom(), any()}). 18 | %% Tries to extract a list of properties from an HTTP body. 19 | %% The property list must be an Erlang proplist in the form [{prop_name, prop_type}] 20 | %% prop_type must be: string | integer 21 | %% Returns a proplist with all the found properties that match the passed types. 22 | parse_body([], _) -> 23 | []; 24 | parse_body(_, <<>>) -> 25 | []; 26 | parse_body(PropertyList, BinaryJson) -> 27 | Json = jsx:decode(BinaryJson), 28 | parse_body(PropertyList, Json, []). 29 | 30 | parse_body([], _, Accum) -> 31 | lists:reverse(Accum); 32 | parse_body([H|T], Json, Accum) -> 33 | {Property, Type} = H, 34 | true = is_atom(Type), 35 | EncodedProperty = list_to_binary(atom_to_list(Property)), 36 | try 37 | ParsedValue = case Type of 38 | csv_string -> parse_csv_string(proplists:get_value(EncodedProperty, Json)); 39 | string -> parse_string(proplists:get_value(EncodedProperty, Json)); 40 | integer -> parse_id(proplists:get_value(EncodedProperty, Json)); 41 | _ -> erlang:error(unknown_property_type, Type) 42 | end, 43 | parse_body(T, Json, [{Property, ParsedValue} | Accum]) 44 | catch 45 | %% Prevents error from bubbling up since this is a tentative approach 46 | _:_ -> parse_body(T, Json, Accum) 47 | end. 48 | 49 | %% Does a best effort approach to parsing an integer 50 | parse_id(undefined) -> 51 | erlang:error(missing_id); 52 | parse_id(Id) when is_integer(Id) andalso Id >= ?MIN_ID -> 53 | Id; 54 | parse_id(Id) when is_integer(Id) -> 55 | erlang:error(invalid_id); 56 | parse_id(Id) when is_list(Id) -> 57 | try 58 | parse_id(list_to_integer(Id)) 59 | catch 60 | _:_ -> erlang:error(invalid_id) 61 | end; 62 | parse_id(Id) when is_binary(Id) -> 63 | parse_id(binary_to_list(Id)). 64 | 65 | parse_string(undefined) -> 66 | erlang:error(missing_string); 67 | parse_string(String) when is_binary(String) -> 68 | List = binary_to_list(String), 69 | case io_lib:printable_unicode_list(List) of 70 | true -> List; 71 | false -> erlang:error(invalid_string) 72 | end; 73 | parse_string(String) when is_list(String) -> 74 | String. 75 | 76 | parse_csv_string(undefined) -> 77 | erlang:error(missing_csv_string); 78 | parse_csv_string(String) -> 79 | ParsedString = parse_string(String), 80 | lists:map(fun(Str) -> string:trim(Str) end, string:tokens(ParsedString, ",")). 81 | 82 | -ifdef(TEST). 83 | -include_lib("eunit/include/eunit.hrl"). 84 | 85 | parse_undefined_id_test() -> 86 | ?assertException(error, missing_id, parse_id(undefined)). 87 | 88 | parse_non_negative_id_from_integer_test() -> 89 | 0 = parse_id(0). 90 | 91 | parse_non_negative_id_from_string_test() -> 92 | 1 = parse_id("1"). 93 | 94 | parse_binary_non_negative_id_test() -> 95 | 2 = parse_id(<<"2">>). 96 | 97 | parse_negative_id_from_integer_test() -> 98 | ?assertError(invalid_id, parse_id(-1)). 99 | 100 | parse_negative_id_from_string_test() -> 101 | ?assertError(invalid_id, parse_id("-1")). 102 | 103 | parse_invalid_string_as_id_test() -> 104 | ?assertError(invalid_id, parse_id("abc")). 105 | 106 | parse_binary_string_as_id_test() -> 107 | ?assertError(invalid_id, parse_id(<<"d">>)). 108 | 109 | parse_invalid_binary_as_id_test() -> 110 | ?assertError(invalid_id, parse_id(<<1, 17, 42>>)). 111 | 112 | parse_undefined_as_string_test() -> 113 | ?assertError(missing_string, parse_string(undefined)). 114 | 115 | parse_valid_string_from_binary_test() -> 116 | "FMKe" = parse_string(<<"FMKe">>). 117 | 118 | parse_string_from_invalid_binary_test() -> 119 | ?assertError(invalid_string, parse_string(<<123, 200, 21>>)). 120 | 121 | parse_empty_list_of_params_with_valid_body_test() -> 122 | [] = parse_body([], <<"body">>). 123 | 124 | parse_empty_body_test() -> 125 | [] = parse_body([{username, string}, {password, string}], <<>>), 126 | [] = parse_body([{medicine, csv_string}], <<>>). 127 | 128 | parse_property_with_invalid_type_from_valid_body_test() -> 129 | [] = parse_body([{fmke, text}], <<"{\"fmke\":\"benchmark\"}">>). 130 | 131 | parse_multiple_properties_from_valid_body_test() -> 132 | [{fmke, "benchmark"}, {rating, "awesome"}] 133 | = parse_body([{fmke, string}, {rating, string}], <<"{\"fmke\":\"benchmark\",\"rating\":\"awesome\"}">>). 134 | 135 | parse_partial_prop_list_from_valid_body_test() -> 136 | [{fmke, "benchmark"}] = parse_body([{fmke, string}, {rating, string}], <<"{\"fmke\":\"benchmark\"}">>), 137 | [{fmke, "benchmark"}] = parse_body([{fmke, string}, {rating, integer}], <<"{\"fmke\":\"benchmark\"}">>), 138 | [{fmke, "benchmark"}] = parse_body([{fmke, string}, {rating, csv_string}], <<"{\"fmke\":\"benchmark\"}">>). 139 | 140 | parse_deeply_nested_object_from_body_test() -> 141 | [{fmke, [{<<"is">>, [{<<"a">>, [{<<"great">>, [{<<"benchmark">>, true}]}]}]}]}] 142 | = parse_body([{fmke, string}], <<"{\"fmke\":{\"is\":{\"a\":{\"great\":{\"benchmark\":true}}}}}">>). 143 | 144 | parse_multi_value_data_from_body_test() -> 145 | [{fmke, [<<"great">>, <<"useful">>]}] = parse_body([{fmke, string}], <<"{\"fmke\":[\"great\",\"useful\"]}">>). 146 | 147 | parse_csv_string_data_from_body_test() -> 148 | [{fmke, ["benchmark", "key-value stores"]}] 149 | = parse_body([{fmke, csv_string}], <<"{\"fmke\":\"benchmark,key-value stores\"}">>). 150 | 151 | parse_integer_number_from_body_test() -> 152 | [{number, 20012018}] = parse_body([{number, integer}], <<"{\"number\":\"20012018\"}">>), 153 | %% negative numbers are not considered valid IDs so they throw an exception 154 | [] = parse_body([{number, integer}], <<"{\"number\":\"-20012018\"}">>). 155 | 156 | -endif. 157 | -------------------------------------------------------------------------------- /src/fmke_setup_sup.erl: -------------------------------------------------------------------------------- 1 | %% ---------------------------------------------------------------------------- 2 | %% fmke_setup_sup: supervises the adapter, driver and connection manager 3 | %% Stars 2-3 children: 4 | %% - adapter: the proper adapter for handling the database according to the config 5 | %% - driver: a module that is able to perform operations on the selected database 6 | %% - conn_manager_sup: only started if the driver needs it, manages connections to the database 7 | %% ---------------------------------------------------------------------------- 8 | -module(fmke_setup_sup). 9 | 10 | -behaviour(supervisor). 11 | 12 | -include("fmke.hrl"). 13 | 14 | %% API 15 | -export([start_link/1]). 16 | 17 | %% Supervisor callbacks 18 | -export([init/1]). 19 | 20 | -define(SERVER, ?MODULE). 21 | -define(ETS_TABLE_OPTS, [set, public, named_table, {keypos,1}, {write_concurrency,false}, {read_concurrency,false}]). 22 | 23 | start_link(Args) -> 24 | supervisor:start_link({local, ?SERVER}, ?MODULE, Args). 25 | 26 | init([]) -> 27 | %% Driver, adapter, data_model and connection_pool_size are assumed to be defined at this point by fmke_sup. 28 | Driver = fmke_driver_config:selected_driver(), 29 | {ok, DataModel} = application:get_env(?APP, data_model), 30 | {ok, ConnPoolSize} = application:get_env(?APP, connection_pool_size), 31 | %% these remaining 2 options may be undefined if working with Mnesia, ETS, or other types of connections that don't 32 | %% use {hostname, port} combinations to connect to the data store. 33 | Hostnames = application:get_env(?APP, database_addresses), 34 | PortNums = application:get_env(?APP, database_ports), 35 | 36 | case fmke_driver_config:requires_ets_table(Driver) of 37 | true -> ets:new(?ETS_TABLE_NAME, ?ETS_TABLE_OPTS); 38 | false -> ok 39 | end, 40 | 41 | RestartStrategy = #{ 42 | strategy => rest_for_one, 43 | intensity => 10, 44 | period => 10 45 | }, 46 | 47 | DataModel = case application:get_env(?APP, data_model) of 48 | undefined -> 49 | ?DEFAULT(data_model); 50 | {ok, RequestedDataModel} -> 51 | RequestedDataModel 52 | end, 53 | 54 | BaseChildren = [handler_pool_spec()], 55 | 56 | Children = case {Hostnames, PortNums} of 57 | {undefined, undefined} -> 58 | lager:info("list of hosts and ports are undefined, cannot create connection pools."), 59 | ok = application:set_env(?APP, pools, []), 60 | BaseChildren; 61 | {_Hosts, undefined} -> 62 | lager:info("list of ports is undefined, cannot create connection pools."), 63 | ok = application:set_env(?APP, pools, []), 64 | BaseChildren; 65 | {undefined, _Ports} -> 66 | lager:info("list of hosts is undefined, cannot create connection pools."), 67 | ok = application:set_env(?APP, pools, []), 68 | BaseChildren; 69 | {{ok, Hs}, {ok, Ps}} -> 70 | {Hosts, Ports} = make_same_len(Hs, Ps), 71 | case fmke_driver_config:requires_conn_manager(Driver) of 72 | false -> 73 | ok = application:set_env(?APP, pools, []), 74 | ok = application:set_env(?APP, hosts, Hosts), 75 | ok = application:set_env(?APP, ports, Ports), 76 | BaseChildren; 77 | true -> 78 | Mod = fmke_driver_config:get_client_lib(Driver), 79 | Pools = gen_pool_names(Hosts, Ports), 80 | Connections = lists:zip(Hosts, Ports), 81 | Args = [Pools, Connections, Mod, ConnPoolSize], 82 | BaseChildren ++ [conn_mgr_sup_spec(Args)] 83 | end 84 | end, 85 | 86 | {ok, {RestartStrategy, Children}}. 87 | 88 | -spec conn_mgr_sup_spec(Args::list(term())) -> supervisor:child_spec(). 89 | conn_mgr_sup_spec(Args) -> 90 | #{ 91 | id => conn_manager_sup, 92 | start => {fmke_db_conn_sup, start_link, [Args]}, 93 | restart => permanent, 94 | type => supervisor 95 | }. 96 | 97 | -spec handler_pool_spec() -> supervisor:child_spec(). 98 | handler_pool_spec() -> 99 | Driver = fmke_driver_config:selected_driver(), 100 | {Module, WorkerArgs} = case fmke_driver_config:is_simple_kv_driver(Driver) of 101 | false -> 102 | {Driver, []}; 103 | true -> 104 | {ok, DataModel} = application:get_env(?APP, data_model), 105 | {fmke_kv_adapter, [Driver, DataModel]} 106 | end, 107 | Name = handlers, 108 | NumHandlers = get_handler_pool_size(), 109 | lager:info("Handler pool will have ~p procs.", [NumHandlers]), 110 | SizeArgs = [{size, NumHandlers}, {max_overflow, 0}], 111 | PoolArgs = [{name, {local, Name}}, {worker_module, Module}] ++ SizeArgs, 112 | poolboy:child_spec(Name, PoolArgs, WorkerArgs). 113 | 114 | get_handler_pool_size() -> 115 | Driver = fmke_driver_config:selected_driver(), 116 | {ok, ConnPoolSize} = application:get_env(?APP, connection_pool_size), 117 | case fmke_driver_config:requires_conn_manager(Driver) of 118 | false -> 119 | ConnPoolSize; 120 | true -> 121 | ConnPoolSize * get_num_db_pools() 122 | end. 123 | 124 | get_num_db_pools() -> 125 | {ok, Hostnames} = application:get_env(?APP, database_addresses), 126 | {ok, PortNums} = application:get_env(?APP, database_ports), 127 | {Hosts, _} = make_same_len(Hostnames, PortNums), 128 | length(Hosts). 129 | 130 | -spec gen_pool_names(list(list()), list(non_neg_integer())) -> list(atom()). 131 | gen_pool_names(Addrs, Ports) -> 132 | gen_pool_names(Addrs, Ports, []). 133 | 134 | gen_pool_names([], [], Accum) -> 135 | lists:reverse(Accum); 136 | gen_pool_names([A|T], [P|T2], Accum) -> 137 | gen_pool_names(T, T2, [gen_pool_name(A, P) | Accum]). 138 | 139 | -spec make_same_len(L1 :: list(), L2 :: list()) -> {list(), list()}. 140 | make_same_len(L1, L2) when length(L1) == length(L2) -> {L1, L2}; 141 | make_same_len([H1|_T1] = L1, L2) when length(L1) < length(L2) -> make_same_len([H1 | L1], L2); 142 | make_same_len(L1, [H2|_T2] = L2) when length(L1) > length(L2) -> make_same_len(L1, [H2 | L2]). 143 | 144 | -spec gen_pool_name(Addr :: list(), Port :: non_neg_integer()) -> atom(). 145 | gen_pool_name(Addr, Port) -> 146 | AtomCompatAddr = get_atom_compatible_list(Addr), 147 | list_to_atom(unicode:characters_to_list(["pool_", AtomCompatAddr, "_", integer_to_list(Port)])). 148 | 149 | -spec get_atom_compatible_list(Str :: list()) -> list(). 150 | get_atom_compatible_list(Str) -> 151 | lists:flatten(string:replace(lists:flatten(string:replace(Str, ".", ":", all)), ":", "_", all)). 152 | 153 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 154 | %% Eunit Tests %% 155 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 156 | 157 | -ifdef(TEST). 158 | -include_lib("eunit/include/eunit.hrl"). 159 | 160 | single_host_single_port_list_setup_test() -> 161 | {Hosts, Ports} = make_same_len(["127.0.0.1"], [8087]), 162 | ?assertEqual(Hosts, ["127.0.0.1"]), 163 | ?assertEqual(Ports, [8087]). 164 | 165 | single_host_multiple_ports_list_setup_test() -> 166 | {Hosts, Ports} = make_same_len(["127.0.0.1"], [8087, 8187, 8287, 8387]), 167 | ?assertEqual(Hosts, ["127.0.0.1", "127.0.0.1", "127.0.0.1", "127.0.0.1"]), 168 | ?assertEqual(Ports, [8087, 8187, 8287, 8387]). 169 | 170 | multiple_hosts_single_port_list_setup_test() -> 171 | {Hosts, Ports} = make_same_len(["127.0.0.1", "8.8.8.8", "196.162.1.1", "0.0.0.0"], [8087]), 172 | ?assertEqual(Hosts, ["127.0.0.1", "8.8.8.8", "196.162.1.1", "0.0.0.0"]), 173 | ?assertEqual(Ports, [8087, 8087, 8087, 8087]). 174 | 175 | multiple_hosts_multiple_ports_same_length_list_setup_test() -> 176 | Hosts = ["8.8.8.8", "196.162.1.1"], 177 | Ports = [8087, 8187], 178 | ?assertEqual({Hosts, Ports}, make_same_len(Hosts, Ports)). 179 | 180 | multiple_hosts_multiple_ports_more_hosts_list_setup_test() -> 181 | Hosts = ["127.0.0.1", "8.8.8.8", "196.162.1.1", "0.0.0.0"], 182 | Ports = [8087, 8187], 183 | ?assertEqual({Hosts, [8087, 8087, 8087, 8187]}, make_same_len(Hosts, Ports)). 184 | 185 | multiple_hosts_multiple_ports_more_ports_list_setup_test() -> 186 | Hosts = ["8.8.8.8", "196.162.1.1"], 187 | Ports = [8087, 8187, 8287, 8387], 188 | ?assertEqual({["8.8.8.8", "8.8.8.8", "8.8.8.8", "196.162.1.1"], Ports}, make_same_len(Hosts, Ports)). 189 | 190 | get_atom_from_ipv4_addr_test() -> 191 | ?assertEqual('127_0_0_1', list_to_atom(get_atom_compatible_list("127.0.0.1"))). 192 | 193 | get_atom_from_ipv6_addr_test() -> 194 | ?assertEqual('2001_0db8_85a3_0000_0000_8a2e_0370_7334', 195 | list_to_atom(get_atom_compatible_list("2001:0db8:85a3:0000:0000:8a2e:0370:7334"))). 196 | 197 | -endif. 198 | -------------------------------------------------------------------------------- /src/fmke_sup.erl: -------------------------------------------------------------------------------- 1 | %% ---------------------------------------------------------------------------- 2 | %% fmke_sup: supervise the FMKe application 3 | %% Stars 3 children: 4 | %% - cowboy: web server 5 | %% - fmke: application 6 | %% - fmke_setup_sup: supervisor for the driver setup 7 | %% ---------------------------------------------------------------------------- 8 | 9 | -module(fmke_sup). 10 | 11 | -behaviour(supervisor). 12 | 13 | -include ("fmke.hrl"). 14 | 15 | %% API 16 | -export([start_link/1]). 17 | 18 | %% Supervisor callbacks 19 | -export([init/1]). 20 | 21 | %% useful config funs 22 | -import(fmke_driver_config, [ 23 | driver_adapter/1, 24 | default_driver/1, 25 | db_from_driver/1 26 | ]). 27 | 28 | -define(SERVER, ?MODULE). 29 | 30 | -define(KV_ADAPTER, fmke_kv_adapter). 31 | -define(SQL_ADAPTER, fmke_sql_adapter). 32 | -define(PASS_THROUGH_ADAPTER, fmke_pt_adapter). 33 | 34 | %%==================================================================== 35 | %% API functions 36 | %%==================================================================== 37 | 38 | start_link(Args) -> 39 | supervisor:start_link({local, ?SERVER}, ?MODULE, Args). 40 | 41 | %%==================================================================== 42 | %% Supervisor callbacks 43 | %%==================================================================== 44 | 45 | %% Child :: {Id,StartFunc,Restart,Shutdown,Type,Modules} 46 | init(_Args) -> 47 | config_env(), 48 | %% read required parameters from app environment 49 | {ok, HttpPort} = application:get_env(?APP, http_port), 50 | Adapter = fmke_driver_config:selected_adapter(), 51 | 52 | RestartStrategy = #{strategy => one_for_one, intensity => 10, period => 10}, 53 | {ok, {RestartStrategy, [ 54 | gen_web_server_spec(HttpPort), 55 | fmke_spec(Adapter), 56 | setup_sup_spec() 57 | ]}}. 58 | 59 | %%==================================================================== 60 | %% Internal functions 61 | %%==================================================================== 62 | 63 | -spec gen_web_server_spec(HttpPort::non_neg_integer()) -> supervisor:child_spec(). 64 | gen_web_server_spec(HttpPort) -> 65 | Dispatch = cowboy_router:compile([ 66 | {'_', [ 67 | {"/", fmke_http_handler_app, []}, 68 | {"/prescriptions/[:id]", fmke_http_handler_prescriptions, []}, 69 | {"/patients/[:id]", fmke_http_handler_patients, []}, 70 | {"/pharmacies/[:id]", fmke_http_handler_pharmacies, []}, 71 | {"/pharmacies/[:id]/prescriptions", fmke_http_handler_pharmacies, prescriptions}, 72 | {"/pharmacies/[:id]/processed_prescriptions", fmke_http_handler_pharmacies, processed_prescriptions}, 73 | {"/facilities/[:id]", fmke_http_handler_facilities, []}, 74 | {"/staff/[:id]", fmke_http_handler_staff, []}, 75 | {"/staff/[:id]/prescriptions", fmke_http_handler_staff, prescriptions} 76 | ]} 77 | ]), 78 | 79 | #{ 80 | id => cowboy, 81 | start => {cowboy, start_clear, [fmke_http_listener, [{port, HttpPort}], #{env => #{dispatch => Dispatch}}]}, 82 | restart => permanent, 83 | type => worker 84 | }. 85 | 86 | -spec fmke_spec(module()) -> supervisor:child_spec(). 87 | fmke_spec(Adapter) -> 88 | #{ 89 | id => fmke, 90 | start => {fmke, start_link, [[Adapter]]}, 91 | restart => permanent, 92 | type => worker 93 | }. 94 | 95 | -spec setup_sup_spec() -> supervisor:child_spec(). 96 | setup_sup_spec() -> 97 | #{ 98 | id => setup_sup, 99 | start => {fmke_setup_sup, start_link, [[]]}, 100 | restart => permanent, 101 | type => supervisor 102 | }. 103 | 104 | config_env() -> 105 | try 106 | {ok, CurrentDirectory} = file:get_cwd(), 107 | ConfigFile = CurrentDirectory ++ ?CONFIG_FILE_PATH, 108 | lager:info("Trying to fetch config from ~p...~n", [ConfigFile]), 109 | {ok, AppProps} = file:consult(ConfigFile), 110 | config(AppProps) 111 | catch 112 | Error:Reason:Stack -> 113 | lager:error("Error reading from config file: ~p:~p~nStacktrace: ~p~n", [Error, Reason, Stack]), 114 | lager:info("Could not read from config file, reverting to environment and default values..."), 115 | config([]) 116 | end. 117 | 118 | %% Sets all options needed to start FMKe, from the 4 following sources, ordered by priority: 119 | %% OS Environment, Application Environment, config file, default value 120 | config(Config) -> 121 | Driver = get_option(driver, Config), 122 | Database = get_option(target_database, Config), 123 | PoolSize = get_option(connection_pool_size, Config), 124 | Addresses = get_option(database_addresses, Config), 125 | Ports = get_option(database_ports, Config), 126 | HttpPort = get_option(http_port, Config), 127 | Model = get_option(data_model, Config), 128 | set_opts([ 129 | {driver, Driver}, 130 | {target_database, Database}, 131 | {connection_pool_size, PoolSize}, 132 | {database_addresses, Addresses}, 133 | {database_ports, Ports}, 134 | {http_port, HttpPort}, 135 | {data_model, Model} 136 | ]). 137 | 138 | set_opts([]) -> 139 | ok; 140 | 141 | set_opts([{_Opt, undefined} | Rest]) -> 142 | set_opts(Rest); 143 | 144 | set_opts([{Opt, Val} | Rest]) -> 145 | config(Opt, Val), 146 | set_opts(Rest). 147 | 148 | config(data_model, Model) -> 149 | maybe_config(data_model, Model); 150 | config(http_port, HttpPort) -> 151 | maybe_config(http_port, HttpPort); 152 | config(target_database, Database) -> 153 | maybe_config(target_database, Database), 154 | Driver = default_driver(Database), 155 | maybe_config(driver, Driver), 156 | case driver_adapter(Driver) of 157 | none -> 158 | ok; 159 | Adapter -> 160 | maybe_config(adapter, Adapter) 161 | end; 162 | config(database_ports, Ports) -> 163 | maybe_config(database_ports, Ports); 164 | config(database_addresses, Addresses) -> 165 | maybe_config(database_addresses, Addresses); 166 | config(connection_pool_size, Size) -> 167 | maybe_config(connection_pool_size, Size); 168 | config(driver, Driver) -> 169 | maybe_config(driver, Driver), 170 | case driver_adapter(Driver) of 171 | none -> 172 | ok; 173 | Adapter -> 174 | maybe_config(adapter, Adapter) 175 | end, 176 | maybe_config(target_database, db_from_driver(Driver)). 177 | 178 | maybe_config(Key, Val) -> 179 | case application:get_env(?APP, Key) of 180 | undefined -> 181 | lager:info("Setting FMKe option ~p = ~p~n", [Key, Val]), 182 | ok = application:set_env(?APP, Key, Val); 183 | {ok, Predefined} -> 184 | lager:info("Setting FMKe option ~p failed (already defined as ~p)~n", [Key, Predefined]), 185 | already_defined 186 | end. 187 | 188 | get_option(Opt, Config) -> 189 | {_Source, Val} = get_value(os:getenv(atom_to_list(Opt)), 190 | application:get_env(?APP, Opt), 191 | proplists:get_value(Opt, Config), 192 | maps:get(Opt, ?DEFAULTS, undefined)), 193 | Val. 194 | 195 | get_value(false, undefined, undefined, Val) -> {defaults, Val}; 196 | get_value(false, undefined, Val, _) -> {config_file, Val}; 197 | get_value(false, {ok, Val}, _, _) -> {app_env, Val}; 198 | get_value(Val, _, _, _) -> {os_env, Val}. 199 | -------------------------------------------------------------------------------- /src/gen_fmke_adapter.erl: -------------------------------------------------------------------------------- 1 | -module(gen_fmke_adapter). 2 | 3 | -include ("fmke.hrl"). 4 | 5 | -type context() :: term(). 6 | 7 | -callback start(Driver::atom()) -> {ok, pid()} | {error, term()}. 8 | -callback stop() -> {ok, term()} | {error, term()}. 9 | 10 | 11 | %%----------------------------------------------------------------------------- 12 | %% Create Operations 13 | %%----------------------------------------------------------------------------- 14 | 15 | 16 | -callback create_patient(Id::id(), Name::string(), Address::string()) -> 17 | {ok | {error, Reason::term()}, Context::context()}. 18 | 19 | -callback create_pharmacy(Id::id(), Name::string(), Address::string()) -> 20 | {ok | {error, Reason::term()}, Context::context()}. 21 | 22 | -callback create_facility(Id::id(), Name::string(), Address::string(), Type::string()) -> 23 | {ok | {error, Reason::term()}, Context::context()}. 24 | 25 | -callback create_staff(Id::id(), Name::string(), Address::string(), Speciality::string()) -> 26 | {ok | {error, Reason::term()}, Context::context()}. 27 | 28 | -callback create_prescription(PrescriptionId::id(), PatientId::id(), PrescriberId::id(), 29 | PharmacyId::id(), DatePrescribed::string(), Drugs::list(crdt())) -> 30 | {ok | {error, Reason::term()}, Context::context()}. 31 | 32 | 33 | %%----------------------------------------------------------------------------- 34 | %% Get Operations 35 | %%----------------------------------------------------------------------------- 36 | 37 | 38 | -callback get_facility_by_id(Id::id()) -> 39 | {{ok, Object::facility()} | {error, Reason::term()}, Context::context()}. 40 | 41 | -callback get_patient_by_id(Id::id()) -> 42 | {{ok, Object::patient()} | {error, Reason::term()}, Context::context()}. 43 | 44 | -callback get_pharmacy_by_id(Id::id()) -> 45 | {{ok, Object::pharmacy()} | {error, Reason::term()}, Context::context()}. 46 | 47 | -callback get_prescription_by_id(Id::id()) -> 48 | {{ok, Object::prescription()} | {error, Reason::term()}, Context::context()}. 49 | 50 | -callback get_staff_by_id(Id::id()) -> 51 | {{ok, Object::staff()} | {error, Reason::term()}, Context::context()}. 52 | 53 | -callback get_processed_pharmacy_prescriptions(Id::id()) -> 54 | {{ok, ListObjects::list(prescription() | binary())} | {error, Reason::term()}, Context::context()}. 55 | 56 | -callback get_pharmacy_prescriptions(Id::id()) -> 57 | {{ok, ListObjects::list(prescription() | binary())} | {error, Reason::term()}, Context::context()}. 58 | 59 | -callback get_staff_prescriptions(Id::id()) -> 60 | {{ok, ListObjects::list(prescription() | binary())} | {error, Reason::term()}, Context::context()}. 61 | 62 | 63 | %%----------------------------------------------------------------------------- 64 | %% Update Operations 65 | %%----------------------------------------------------------------------------- 66 | 67 | 68 | -callback process_prescription(Id::id(), DateProcessed::string()) -> 69 | {ok | {error, Reason::term()}, Context::context()}. 70 | 71 | -callback update_patient_details(Id::id(), Name::string(), Address::string()) -> 72 | {ok | {error, Reason::term()}, Context::context()}. 73 | 74 | -callback update_pharmacy_details(Id::id(), Name::string(), Address::string()) -> 75 | {ok | {error, Reason::term()}, Context::context()}. 76 | 77 | -callback update_facility_details(Id::id(), Name::string(), Address::string(), Type::string()) -> 78 | {ok | {error, Reason::term()}, Context::context()}. 79 | 80 | -callback update_staff_details(Id::id(), Name::string(), Address::string(), Speciality::string()) -> 81 | {ok | {error, Reason::term()}, Context::context()}. 82 | 83 | -callback update_prescription_medication(Id::id(), Operation::atom(), Drugs::list(crdt())) -> 84 | {ok | {error, Reason::term()}, Context::context()}. 85 | -------------------------------------------------------------------------------- /src/gen_fmke_kv_driver.erl: -------------------------------------------------------------------------------- 1 | %% This module documents the callbacks that an FMKE driver for a Key-Value Store must implement. 2 | %% 3 | %% A brief explanation about FMKe adapters and drivers: 4 | %% 5 | %% An adapter is an Erlang module that implements the complete FMKe callback set, but that is able to make assumptions 6 | %% about the data model, connection pool or any other configurable parameter. Adapters don't communicate directly with 7 | %% client libraries for databases, but instead do it through drivers. 8 | %% 9 | %% A driver is a simple wrapper over a database's client library that exposes a common interface to all databases. 10 | %% When implementing a driver it is necessary to implement additional logic required to maintain correct application 11 | %% state, such as keeping track of previously read values within a transaction. Failure to implement the additional 12 | %% logic may result in anomalies which will should be documented. The performance to correctness trade-off is common 13 | %% in these types of storage systems, and the documentation of the presented anomalies along with performance values 14 | %% is paramount. 15 | %% 16 | %% Since adapters do not make assumptions about the capabilities of the database, the drivers will need to export 17 | %% callbacks related to transactions (e.g. start_transaction/1, commit_transaction/1). These functions are expected to 18 | %% return opaque state that is passed in to further operations, meaning that you can add contextual information by 19 | %% returning {ok, term()}, or just {ok, []} if there is no need for context in order to perform the operations. 20 | %% 21 | %% Drivers might also need to set up additional components and state for themselves, which is why the start/0 22 | %% hooks exist. In these functions you may open a pool of connections to the database (but for that purpose you can 23 | %% already use the fmke_db_conn_manager module), create an ETS table for caching results, etc. 24 | %% Conversely, the stop/0 function will allow you to terminate gracefully and perform any teardown you feel necessary. 25 | %% 26 | %% The get and put functions that drivers need to implement contain extra parameters in order to give operation context 27 | %% to the drivers. This is to avoid all possible overhead from using a generic approach (for instance, having to derive 28 | %% which entity is being obtained from the key passed in get/3, if you used separate buckets in the database for each 29 | %% one) as well as trying to provide optimal compatibility with other storage systems that may require extra context to 30 | %% perform operations. 31 | -module(gen_fmke_kv_driver). 32 | 33 | -include("fmke.hrl"). 34 | 35 | -type value() :: term(). 36 | -type context() :: term(). 37 | -type options() :: list({atom(), term()}). 38 | -type txn_result() :: ok | {error, term()}. 39 | % -type data_model() :: nested | non_nested. 40 | 41 | %% --------------------------------------------------------------------------------------------------------------------- 42 | %% Setup and teardown callbacks 43 | %% --------------------------------------------------------------------------------------------------------------------- 44 | 45 | %% Startup hook that provides information about whether the current benchmark execution is using a normalized or nested 46 | %% data layout. It is the driver's responsability to implement the logic for both data layouts, although the code should 47 | %% not change significantly between them. (See example below) 48 | %% A typical way of storing objects in CRDT databases would be to nest every field inside a top level record, which has 49 | %% so far proved to have worse performance, since each CRDT state size will increase over multiple operations. 50 | %% Furthermore, application records such as patients will need to store their associated prescriptions, which are 51 | %% separate entities/records, further increasing CRDT state size. One way to bypass this is to store a reference to the 52 | %% prescription key inside the patient, and we consider this to be a "normalized" (non-nested) data layout. 53 | %% Implementing a driver may be done for a single data layout, ignoring the other completely. When test executions are 54 | %% run, only valid data model implementations are considered for performance results. 55 | % -callback start(DataModel::data_model()) -> {error, term()} | {ok, pid()}. 56 | 57 | %% Teardown hook, called when the application is stopped. 58 | % -callback stop() -> ok. 59 | 60 | %% --------------------------------------------------------------------------------------------------------------------- 61 | %% Transactional support callbacks 62 | %% --------------------------------------------------------------------------------------------------------------------- 63 | 64 | %% Starts a transaction while providing some context of the type of operations that are going to be performed. 65 | %% A proplist of options (Options) is passed in, with the following values considered valid options: 66 | %% {entity, Entity :: entity()} -> 67 | %% The following operations that are going to be performed in this transaction only concern one entity, Entity. 68 | %% 69 | %% Returns any erlang term containing the state that is required by the driver to execute each operation related with a 70 | %% transaction. It is common for the returned state to include a Pid that contains a connection to the database and 71 | %% possibly identifier(s) for the transaction. Any erlang term is considered valid and will be passed in to subsequent 72 | %% operations related to the same transaction. 73 | -callback start_transaction(Options::options()) -> {ok, OperationContext::context()}. 74 | 75 | %% Signals the end of a transaction, passing in the current operation context as well as a list of options that 76 | %% currently serves no purpose. A typical implementation of commit_transaction includes calling commit_transaction on 77 | %% client library (if supported) and returning the Pid to the connection pool. 78 | %% 79 | %% See some implementations in the fmke_db_adapter_driver_antidote.erl and fmke_db_adapter_driver_riak.erl modules. 80 | -callback commit_transaction(OperationContext::context(), Options::options()) -> Result::txn_result(). 81 | 82 | %% --------------------------------------------------------------------------------------------------------------------- 83 | %% Key value callbacks 84 | %% --------------------------------------------------------------------------------------------------------------------- 85 | 86 | %% get/2 - Fetches a list of keys from the database. 87 | %% To provide context, some information about the entity being retrieved is included, and additionally the operation 88 | %% context is also passed in from a previous get/3, put/4, or start_transaction/1. 89 | %% 90 | %% Returns a triple with {ok, GetResult, NextOperationContext} if the operation was executed successfully or 91 | %% {error, Reason, NextOperationContext} otherwise. 92 | -callback get(list({Key::key(), Type::entity()}), OperationContext::context()) -> 93 | {list(app_record() | {error, term()}), context()}. 94 | 95 | %% put/3 - Adds a list of key-value entries to the database. 96 | %% To provide context, some information about the each entry being added is included, and additionally the operation 97 | %% context is also passed in from a previous get/3, put/4, or start_transaction/1. 98 | %% 99 | %% Returns a pair with {list(put_results()), NextOperationContext} if the operation was executed successfully or 100 | %% {error, Reason, NextOperationContext} otherwise. 101 | %% 102 | %% The Key to be written is passed in binary string format, as that is currently universally supported by all libraries. 103 | %% The Value to be written is a value that the driver is able to recognize, which means that the adapters need to pass 104 | %% valid values that the drivers are able to recognize and convert to a proper internal representation. 105 | %% 106 | %% A more in-depth explanation of what key() and value() should be: 107 | %% 1. key() is a binary string representation of the key that is going to be written. 108 | %% 2. value() is either an application record (in which case it is considered that every field is supposed to stay under 109 | %% the same key, ) 110 | -callback put(list({Key::key(), Type::entity(), Value::value()}), OperationContext::context()) -> 111 | {list(ok | {error, term()}), context()}. 112 | -------------------------------------------------------------------------------- /test/fmke_antidote_transactions_SUITE.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : fmke_antidote_transactions_SUITE.erl 3 | %%% Author : Gonçalo Tomás 4 | %%% Description : Tests the behaviour of AntidoteDB transactions. 5 | %%% Created : Mon 1 Oct 2018 17:42 6 | %%%------------------------------------------------------------------- 7 | -module(fmke_antidote_transactions_SUITE). 8 | -include("fmke.hrl"). 9 | 10 | -compile([export_all, nowarn_export_all]). 11 | 12 | -include_lib("common_test/include/ct.hrl"). 13 | -include_lib("eunit/include/eunit.hrl"). 14 | 15 | -define (NODENAME, 'fmke@127.0.0.1'). 16 | -define (COOKIE, fmke). 17 | 18 | suite() -> 19 | [{timetrap, {minutes, 5}}]. 20 | 21 | %%-------------------------------------------------------------------- 22 | %% Function: init_per_suite(Config0) -> 23 | %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} 24 | %% Config0 = Config1 = [tuple()] 25 | %% Reason = term() 26 | %%-------------------------------------------------------------------- 27 | init_per_suite(Config) -> 28 | TestNode = 'fmke_antidote_ct@127.0.0.1', 29 | ok = fmke_test_setup:ensure_start_dist_node(TestNode), 30 | true = erlang:set_cookie(TestNode, ?COOKIE), 31 | fmke_test_setup:start_node_with_antidote_backend(?NODENAME), 32 | true = erlang:set_cookie(?NODENAME, ?COOKIE), 33 | Config. 34 | 35 | %%-------------------------------------------------------------------- 36 | %% Function: end_per_suite(Config0) -> term() | {save_config,Config1} 37 | %% Config0 = Config1 = [tuple()] 38 | %%-------------------------------------------------------------------- 39 | end_per_suite(_Config) -> 40 | fmke_test_setup:stop_node(?NODENAME), 41 | fmke_test_setup:stop_all(), 42 | net_kernel:stop(), 43 | ok. 44 | 45 | %%-------------------------------------------------------------------- 46 | %% Function: init_per_testcase(TestCase, Config0) -> 47 | %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} 48 | %% TestCase = atom() 49 | %% Config0 = Config1 = [tuple()] 50 | %% Reason = term() 51 | %%-------------------------------------------------------------------- 52 | init_per_testcase(_TestCase, Config) -> 53 | Config. 54 | 55 | %%-------------------------------------------------------------------- 56 | %% Function: end_per_testcase(TestCase, Config0) -> 57 | %% term() | {save_config,Config1} | {fail,Reason} 58 | %% TestCase = atom() 59 | %% Config0 = Config1 = [tuple()] 60 | %% Reason = term() 61 | %%-------------------------------------------------------------------- 62 | end_per_testcase(_TestCase, _Config) -> 63 | ok. 64 | 65 | %%-------------------------------------------------------------------- 66 | %% Function: all() -> GroupsAndTestCases | {skip,Reason} 67 | %% GroupsAndTestCases = [{group,GroupName} | TestCase] 68 | %% GroupName = atom() 69 | %% TestCase = atom() 70 | %% Reason = term() 71 | %%-------------------------------------------------------------------- 72 | all() -> [read_read_succeds, read_write_succeeds, write_write_aborts]. 73 | 74 | read_read_succeds(_Config) -> 75 | Key = list_to_binary(rand_str:get(64)), 76 | %% add key to antidote 77 | Pid = checkout_remote_pid(), 78 | {ok, Txn} = antidotec_pb:start_transaction(Pid, ignore), 79 | BoundObject = {Key, antidote_crdt_counter_pn, <<"bucket">>}, 80 | Obj = antidotec_counter:increment(1, antidotec_counter:new()), 81 | ok = antidotec_pb:update_objects(Pid, antidotec_counter:to_ops(BoundObject, Obj), Txn), 82 | {ok, _} = antidotec_pb:commit_transaction(Pid, Txn), 83 | Pid1 = checkout_remote_pid(), 84 | Pid2 = checkout_remote_pid(), 85 | {ok, Txn1} = antidotec_pb:start_transaction(Pid1, ignore), 86 | {ok, Txn2} = antidotec_pb:start_transaction(Pid2, ignore), 87 | {ok, [Val1]} = antidotec_pb:read_objects(Pid1, [BoundObject], Txn1), 88 | {ok, [Val2]} = antidotec_pb:read_objects(Pid2, [BoundObject], Txn2), 89 | {ok, _} = antidotec_pb:commit_transaction(Pid1, Txn1), 90 | {ok, _} = antidotec_pb:commit_transaction(Pid2, Txn2), 91 | Value1 = antidotec_counter:value(Val1), 92 | Value2 = antidotec_counter:value(Val2), 93 | checkin_remote_pid(Pid), 94 | checkin_remote_pid(Pid1), 95 | checkin_remote_pid(Pid2), 96 | ?assertEqual(Value1, Value2). 97 | 98 | read_write_succeeds(_Config) -> 99 | Key = list_to_binary(rand_str:get(64)), 100 | %% add key to antidote 101 | Pid = checkout_remote_pid(), 102 | {ok, Txn} = antidotec_pb:start_transaction(Pid, ignore), 103 | BoundObject = {Key, antidote_crdt_counter_pn, <<"bucket">>}, 104 | Obj = antidotec_counter:increment(1, antidotec_counter:new()), 105 | ok = antidotec_pb:update_objects(Pid, antidotec_counter:to_ops(BoundObject, Obj), Txn), 106 | {ok, _} = antidotec_pb:commit_transaction(Pid, Txn), 107 | Pid1 = checkout_remote_pid(), 108 | Pid2 = checkout_remote_pid(), 109 | {ok, Txn1} = antidotec_pb:start_transaction(Pid1, ignore), 110 | {ok, Txn2} = antidotec_pb:start_transaction(Pid2, ignore), 111 | {ok, [_Val1]} = antidotec_pb:read_objects(Pid1, [BoundObject], Txn1), 112 | ObjUpdate = antidotec_counter:increment(1, Obj), 113 | ok = antidotec_pb:update_objects(Pid2, antidotec_counter:to_ops(BoundObject, ObjUpdate), Txn2), 114 | {ok, _} = antidotec_pb:commit_transaction(Pid1, Txn1), 115 | {ok, _} = antidotec_pb:commit_transaction(Pid2, Txn2), 116 | checkin_remote_pid(Pid), 117 | checkin_remote_pid(Pid1), 118 | checkin_remote_pid(Pid2), 119 | ok. 120 | 121 | write_write_aborts(_Config) -> 122 | Key = list_to_binary(rand_str:get(64)), 123 | %% add key to antidote 124 | Pid = checkout_remote_pid(), 125 | {ok, Txn} = antidotec_pb:start_transaction(Pid, ignore), 126 | BoundObject = {Key, antidote_crdt_counter_pn, <<"bucket">>}, 127 | Obj = antidotec_counter:increment(1, antidotec_counter:new()), 128 | ok = antidotec_pb:update_objects(Pid, antidotec_counter:to_ops(BoundObject, Obj), Txn), 129 | {ok, _} = antidotec_pb:commit_transaction(Pid, Txn), 130 | Pid1 = checkout_remote_pid(), 131 | Pid2 = checkout_remote_pid(), 132 | {ok, Txn1} = antidotec_pb:start_transaction(Pid1, ignore), 133 | {ok, Txn2} = antidotec_pb:start_transaction(Pid2, ignore), 134 | ObjUpdate1 = antidotec_counter:increment(1, Obj), 135 | ObjUpdate2 = antidotec_counter:increment(2, Obj), 136 | ok = antidotec_pb:update_objects(Pid2, antidotec_counter:to_ops(BoundObject, ObjUpdate2), Txn2), 137 | ok = antidotec_pb:update_objects(Pid1, antidotec_counter:to_ops(BoundObject, ObjUpdate1), Txn1), 138 | %% check if both transactions committed successfully 139 | case {antidotec_pb:commit_transaction(Pid1, Txn1), antidotec_pb:commit_transaction(Pid2, Txn2)} of 140 | {{error, _}, {error, _}} -> 141 | %% both failed 142 | ok; 143 | {{ok, _}, {error, _}} -> 144 | %% one of them failed, the other succeeded 145 | ok; 146 | {{error, _}, {ok, _}} -> 147 | %% one of them failed, the other succeeded 148 | ok; 149 | {{ok, _}, {ok, _}} -> 150 | %% both transactions succeeded in an update to the same key at the same time 151 | throw("write_write_transaction_succeded") 152 | end, 153 | checkin_remote_pid(Pid), 154 | checkin_remote_pid(Pid1), 155 | checkin_remote_pid(Pid2), 156 | ok. 157 | 158 | checkin_remote_pid(Pid) -> 159 | rpc(fmke_db_conn_manager, checkout, [Pid]). 160 | 161 | checkout_remote_pid() -> 162 | rpc(fmke_db_conn_manager, checkout, []). 163 | 164 | rpc(Mod, Fun, Args) -> 165 | rpc:call(?NODENAME, Mod, Fun, Args). 166 | -------------------------------------------------------------------------------- /test/fmke_configs/antidote_non_nested_data_model.config: -------------------------------------------------------------------------------- 1 | {fmke_nodename, 'fmke_ct_antidote_non_nested@127.0.0.1'}. 2 | {ct_nodename, 'ct_antidote_non_nested@127.0.0.1'}. 3 | {database_addresses, ["127.0.0.1"]}. 4 | {database_ports, [4000]}. 5 | {target_database, antidote}. 6 | {http_port, 10001}. 7 | -------------------------------------------------------------------------------- /test/fmke_configs/aql_non_nested_data_model.config: -------------------------------------------------------------------------------- 1 | {fmke_nodename, 'fmke_ct_aql_non_nested@127.0.0.1'}. 2 | {ct_nodename, 'ct_aql_non_nested@127.0.0.1'}. 3 | {database_addresses, ["127.0.0.1"]}. 4 | {database_ports, [8321]}. 5 | {target_database, aql}. 6 | {http_port, 10007}. 7 | -------------------------------------------------------------------------------- /test/fmke_configs/cassandra_non_nested_data_model.config: -------------------------------------------------------------------------------- 1 | {fmke_nodename, 'fmke_ct_cassandra_non_nested@127.0.0.1'}. 2 | {ct_nodename, 'ct_cassandra_non_nested@127.0.0.1'}. 3 | {database_addresses, ["0.0.0.0"]}. 4 | {database_ports, [9042]}. 5 | {target_database, cassandra}. 6 | {http_port, 10006}. 7 | -------------------------------------------------------------------------------- /test/fmke_configs/ets_nested_data_model.config: -------------------------------------------------------------------------------- 1 | {fmke_nodename, 'fmke_ct_ets_nested@127.0.0.1'}. 2 | {ct_nodename, 'ct_ets_nested@127.0.0.1'}. 3 | {target_database, ets}. 4 | {data_model, nested}. 5 | {http_port, 10002}. 6 | -------------------------------------------------------------------------------- /test/fmke_configs/ets_non_nested_data_model.config: -------------------------------------------------------------------------------- 1 | {fmke_nodename, 'fmke_ct_ets_nested@127.0.0.1'}. 2 | {ct_nodename, 'ct_ets_nested@127.0.0.1'}. 3 | {target_database, ets}. 4 | {data_model, non_nested}. 5 | {http_port, 10003}. 6 | -------------------------------------------------------------------------------- /test/fmke_configs/redis_cluster_non_nested_data_model.config: -------------------------------------------------------------------------------- 1 | {fmke_nodename, 'fmke_ct_redis_non_nested@127.0.0.1'}. 2 | {ct_nodename, 'ct_redis_non_nested@127.0.0.1'}. 3 | {database_addresses, ["127.0.0.1"]}. 4 | {database_ports, [7000]}. 5 | {target_database, redis_cluster}. 6 | {http_port, 10004}. 7 | -------------------------------------------------------------------------------- /test/fmke_configs/redis_crdb_non_nested_data_model.config: -------------------------------------------------------------------------------- 1 | {fmke_nodename, 'fmke_ct_redis_non_nested@127.0.0.1'}. 2 | {ct_nodename, 'ct_redis_non_nested@127.0.0.1'}. 3 | {database_addresses, ["127.0.0.1"]}. 4 | {database_ports, [7000]}. 5 | {driver, fmke_driver_opt_redis_crdb}. 6 | {http_port, 10004}. 7 | {target_database, redis_crdb}. 8 | -------------------------------------------------------------------------------- /test/fmke_configs/riak_non_nested_data_model.config: -------------------------------------------------------------------------------- 1 | {fmke_nodename, 'fmke_ct_riak_non_nested@127.0.0.1'}. 2 | {ct_nodename, 'ct_riak_non_nested@127.0.0.1'}. 3 | {database_addresses, ["127.0.0.1"]}. 4 | {database_ports, [4008]}. 5 | {target_database, riak}. 6 | {http_port, 10005}. 7 | -------------------------------------------------------------------------------- /test/fmke_configs/riak_simple_nested.config: -------------------------------------------------------------------------------- 1 | {fmke_nodename, 'fmke_ct_riak_non_nested@127.0.0.1'}. 2 | {ct_nodename, 'ct_riak_non_nested@127.0.0.1'}. 3 | {database_addresses, ["127.0.0.1"]}. 4 | {database_ports, [4008]}. 5 | {target_database, riak}. 6 | {driver, fmke_driver_riak_kv}. 7 | {data_model, nested}. 8 | {http_port, 10005}. 9 | -------------------------------------------------------------------------------- /test/fmke_configs/riak_simple_non_nested.config: -------------------------------------------------------------------------------- 1 | {fmke_nodename, 'fmke_ct_riak_non_nested@127.0.0.1'}. 2 | {ct_nodename, 'ct_riak_non_nested@127.0.0.1'}. 3 | {database_addresses, ["127.0.0.1"]}. 4 | {database_ports, [4008]}. 5 | {target_database, riak}. 6 | {driver, fmke_driver_riak_kv}. 7 | {data_model, non_nested}. 8 | {http_port, 10005}. 9 | -------------------------------------------------------------------------------- /test/fmke_core_unit_test_SUITE_data/default.config: -------------------------------------------------------------------------------- 1 | {fmke_nodename, 'fmke_ct_ets_nested@127.0.0.1'}. 2 | {ct_nodename, 'ct_ets_nested@127.0.0.1'}. 3 | {target_database, ets}. 4 | {data_model, nested}. 5 | {http_port, 10006}. 6 | -------------------------------------------------------------------------------- /test/fmke_db_conn_manager_SUITE.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : fmke_db_conn_manager_SUITE.erl 3 | %%% Author : Gonçalo Tomás 4 | %%% Description : Tests the behaviour of the DB connection manager 5 | %%% under several scenarios. 6 | %%% Created : Fri 9 Feb 2018 17:42 7 | %%%------------------------------------------------------------------- 8 | -module(fmke_db_conn_manager_SUITE). 9 | -include("fmke.hrl"). 10 | 11 | -compile([export_all, nowarn_export_all]). 12 | 13 | -include_lib("common_test/include/ct.hrl"). 14 | 15 | -define (NODENAME, 'fmke@127.0.0.1'). 16 | -define (COOKIE, fmke). 17 | 18 | suite() -> 19 | [{timetrap, {minutes, 3}}]. 20 | 21 | %%-------------------------------------------------------------------- 22 | %% Function: init_per_suite(Config0) -> 23 | %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} 24 | %% Config0 = Config1 = [tuple()] 25 | %% Reason = term() 26 | %%-------------------------------------------------------------------- 27 | init_per_suite(Config) -> 28 | TestNode = 'fmke_db_conn_mgr_test@127.0.0.1', 29 | ok = fmke_test_setup:ensure_start_dist_node(TestNode), 30 | true = erlang:set_cookie(TestNode, ?COOKIE), 31 | fmke_test_setup:start_node_with_mock_cluster(?NODENAME), 32 | true = erlang:set_cookie(?NODENAME, ?COOKIE), 33 | Config. 34 | 35 | %%-------------------------------------------------------------------- 36 | %% Function: end_per_suite(Config0) -> term() | {save_config,Config1} 37 | %% Config0 = Config1 = [tuple()] 38 | %%-------------------------------------------------------------------- 39 | end_per_suite(_Config) -> 40 | fmke_test_setup:stop_node(?NODENAME), 41 | fmke_test_setup:stop_all(), 42 | net_kernel:stop(), 43 | ok. 44 | 45 | %%-------------------------------------------------------------------- 46 | %% Function: init_per_testcase(TestCase, Config0) -> 47 | %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} 48 | %% TestCase = atom() 49 | %% Config0 = Config1 = [tuple()] 50 | %% Reason = term() 51 | %%-------------------------------------------------------------------- 52 | init_per_testcase(_TestCase, Config) -> 53 | Config. 54 | 55 | %%-------------------------------------------------------------------- 56 | %% Function: end_per_testcase(TestCase, Config0) -> 57 | %% term() | {save_config,Config1} | {fail,Reason} 58 | %% TestCase = atom() 59 | %% Config0 = Config1 = [tuple()] 60 | %% Reason = term() 61 | %%-------------------------------------------------------------------- 62 | end_per_testcase(_TestCase, _Config) -> 63 | ok. 64 | 65 | %%-------------------------------------------------------------------- 66 | %% Function: all() -> GroupsAndTestCases | {skip,Reason} 67 | %% GroupsAndTestCases = [{group,GroupName} | TestCase] 68 | %% GroupName = atom() 69 | %% TestCase = atom() 70 | %% Reason = term() 71 | %%-------------------------------------------------------------------- 72 | all() -> 73 | [ 74 | get_pool_names, 75 | round_robin_policy, 76 | no_overflow_when_using_conn_pool_size_pids, 77 | checkin_unknown_pid_is_recognized_by_manager, 78 | dead_pid_is_cleaned_from_manager_state 79 | ]. 80 | 81 | get_pool_names(_Config) -> 82 | {ok, Pools} = rpc(application, get_env, [?APP, pools]), 83 | [pool_127_0_0_1_8087, pool_localhost_8087] = Pools. 84 | 85 | round_robin_policy(_Config) -> 86 | Pids = checkout_multiple(6), 87 | [P1, P2, P3, P4, P5, P6] = Pids, 88 | {ok, Pools} = get_pools(), 89 | [Pool1Pids, Pool2Pids] = lists:map(fun get_pids/1, Pools), 90 | true = lists:member(P1, Pool1Pids), 91 | true = lists:member(P2, Pool2Pids), 92 | true = lists:member(P3, Pool1Pids), 93 | true = lists:member(P4, Pool2Pids), 94 | true = lists:member(P5, Pool1Pids), 95 | true = lists:member(P6, Pool2Pids), 96 | checkin_multiple(Pids), 97 | ok. 98 | 99 | no_overflow_when_using_conn_pool_size_pids(_Config) -> 100 | ConnPoolSize = get_pool_size(), 101 | true = ConnPoolSize > 1, 102 | Pids = checkout_multiple(ConnPoolSize + 1), 103 | {ok, [Pool1, Pool2]} = get_pools(), 104 | TwiceConnPoolSize = 2 * ConnPoolSize, 105 | {ready, Pool1CurrSize, CurrOverflow1, _Monitors1} = get_pool_state(Pool1), 106 | {ready, Pool2CurrSize, CurrOverflow2, _Monitors2} = get_pool_state(Pool2), 107 | TwiceConnPoolSize = length(Pids) + Pool1CurrSize + Pool2CurrSize, 108 | 0 = CurrOverflow1, 109 | 0 = CurrOverflow2, 110 | checkin_multiple(Pids), 111 | ok. 112 | 113 | checkin_unknown_pid_is_recognized_by_manager(_Config) -> 114 | no_such_pid = checkin(self()). 115 | 116 | dead_pid_is_cleaned_from_manager_state(_Config) -> 117 | Pid = checkout(), 118 | rpc(erlang, send, [fmke_db_conn_manager, {'EXIT', Pid, died}]), 119 | true = (undefined =/= rpc(erlang, process_info, [Pid])), 120 | timer:sleep(500), 121 | no_such_pid = checkin(Pid), 122 | true = rpc(erlang, exit, [Pid, die]), 123 | undefined = rpc(erlang, process_info, [Pid]). 124 | 125 | get_pids(Pool) -> 126 | Results = rpc(gen_server, call, [Pool, get_all_workers]), 127 | lists:map(fun({_Monitors, Pid, _Type, _Module}) -> Pid end, Results). 128 | 129 | get_pools() -> 130 | rpc:call(?NODENAME, application, get_env, [?APP, pools]). 131 | 132 | get_pool_state(Pool) -> 133 | rpc(gen_server, call, [Pool, status]). 134 | 135 | get_pool_size() -> 136 | {ok, ConnPoolSize} = rpc(application, get_env, [?APP, connection_pool_size]), 137 | ConnPoolSize. 138 | 139 | checkin(Pid) -> 140 | rpc:call(?NODENAME, fmke_db_conn_manager, checkin, [Pid]). 141 | 142 | checkin_multiple(Pids) -> 143 | lists:map(fun(Pid) -> rpc:call(?NODENAME, fmke_db_conn_manager, checkin, [Pid]) end, Pids). 144 | 145 | checkout() -> 146 | rpc(fmke_db_conn_manager, checkout, []). 147 | 148 | checkout_multiple(N) -> 149 | lists:map(fun(_N) -> rpc:call(?NODENAME, fmke_db_conn_manager, checkout, []) end, lists:seq(1, N)). 150 | 151 | rpc(Mod, Fun, Args) -> 152 | rpc:call(?NODENAME, Mod, Fun, Args). 153 | -------------------------------------------------------------------------------- /test/fmke_http_api_SUITE_data/default.config: -------------------------------------------------------------------------------- 1 | {fmke_nodename, 'fmke_ct_ets_nested@127.0.0.1'}. 2 | {ct_nodename, 'ct_ets_nested@127.0.0.1'}. 3 | {target_database, ets}. 4 | {data_model, non_nested}. 5 | {http_port, 10007}. 6 | -------------------------------------------------------------------------------- /test/fmke_unstable_db_conn_SUITE.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %%% File : fmke_delayed_start_connection_test_SUITE.erl 3 | %%% Author : Gonçalo Tomás 4 | %%% Description : Tests the behaviour of the DB connection manager 5 | %%% when FMKe experiments database system is ready. 6 | %%% Only makes sense to test against databases that use FMKe's 7 | %%% database connection manager (e.g. antidote, riak) 8 | %%% Created : Wed 14 Nov 2018 02:03 9 | %%%------------------------------------------------------------------- 10 | -module(fmke_unstable_db_conn_SUITE). 11 | -include("fmke.hrl"). 12 | 13 | -compile([export_all, nowarn_export_all]). 14 | 15 | -include_lib("common_test/include/ct.hrl"). 16 | 17 | -define (NODENAME, 'fmke@127.0.0.1'). 18 | -define (COOKIE, fmke). 19 | 20 | suite() -> 21 | [{timetrap, {minutes, 3}}]. 22 | 23 | %%-------------------------------------------------------------------- 24 | %% Function: init_per_suite(Config0) -> 25 | %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} 26 | %% Config0 = Config1 = [tuple()] 27 | %% Reason = term() 28 | %%-------------------------------------------------------------------- 29 | init_per_suite(Config) -> 30 | TestNode = 'fmke_db_conn_robustness_test@127.0.0.1', 31 | ok = fmke_test_setup:ensure_start_dist_node(TestNode), 32 | true = erlang:set_cookie(TestNode, ?COOKIE), 33 | Opts = [ 34 | {database_addresses, ["127.0.0.1"]}, 35 | {database_ports, [8087]}, 36 | {target_database, riak}, 37 | {connection_pool_size, 2}, 38 | {http_port, 10008} 39 | ], 40 | Data = [ 41 | {patients, [{1, "john smith", "somewhere in portugal"}]} 42 | ], 43 | Config ++ [{fmke_opts, Opts}, {data, Data}]. 44 | 45 | %%-------------------------------------------------------------------- 46 | %% Function: end_per_suite(Config0) -> term() | {save_config,Config1} 47 | %% Config0 = Config1 = [tuple()] 48 | %%-------------------------------------------------------------------- 49 | end_per_suite(_Config) -> 50 | fmke_test_setup:stop_node(?NODENAME), 51 | fmke_test_setup:stop_all(), 52 | net_kernel:stop(), 53 | ok. 54 | 55 | init_per_testcase(_TestCase, Config) -> 56 | Config. 57 | 58 | end_per_testcase(_TestCase, _Config) -> 59 | ok. 60 | 61 | %%-------------------------------------------------------------------- 62 | %% Function: all() -> GroupsAndTestCases | {skip,Reason} 63 | %% GroupsAndTestCases = [{group,GroupName} | TestCase] 64 | %% GroupName = atom() 65 | %% TestCase = atom() 66 | %% Reason = term() 67 | %%-------------------------------------------------------------------- 68 | all() -> 69 | [ 70 | start_fmke_ok, 71 | check_status_ok, 72 | kill_database, 73 | check_that_database_is_down, 74 | check_fmke_down, 75 | restart_db_and_wait_for_it, 76 | recheck_status_ok 77 | ]. 78 | 79 | start_fmke_ok(Config) -> 80 | Opts = ?config(fmke_opts, Config), 81 | true = erlang:set_cookie(?NODENAME, ?COOKIE), 82 | fmke_test_setup:launch_fmke(?NODENAME, Opts), 83 | ok. 84 | 85 | check_status_ok(Config) -> 86 | add_data(Config), 87 | Data = ?config(data, Config), 88 | [{patients, [{Id, Name, Address}]}] = Data, 89 | ExpectedPat = #patient{id = Id, name = Name, address = Address}, 90 | RemotePat = rpc(fmke, get_patient_by_id, [Id]), 91 | true = fmke_test_utils:compare_patients(ExpectedPat, RemotePat), 92 | ok. 93 | 94 | kill_database(_Config) -> 95 | fmke_test_setup:stop_riak(), 96 | ok. 97 | 98 | check_that_database_is_down(Config) -> 99 | _FmkeOpts = ?config(fmke_opts, Config), 100 | {ok, Pools} = rpc(application, get_env, [?APP, pools]), 101 | lists:map(fun(Pool) -> 102 | {badrpc, {'EXIT', {timeout, _MoreInfo}}} = rpc(gen_server, call, [Pool, get_avail_workers]) 103 | end, Pools), 104 | ok. 105 | 106 | check_fmke_down(Config) -> 107 | Data = ?config(data, Config), 108 | [{patients, [{Id, _Name, _Address}]}] = Data, 109 | {badrpc, {'EXIT', {timeout, _MoreInfo}}} = rpc(fmke, get_patient_by_id, [Id]), 110 | ok. 111 | 112 | restart_db_and_wait_for_it(Config) -> 113 | %% give some time for workers to generate more error messages in the FMKe node 114 | timer:sleep(3000), 115 | %% restart riak 116 | fmke_test_setup:start_riak(), 117 | add_data(Config), 118 | ok. 119 | 120 | recheck_status_ok(Config) -> 121 | Data = ?config(data, Config), 122 | [{patients, [{Id, Name, Address}]}] = Data, 123 | ExpectedPat = #patient{id = Id, name = Name, address = Address}, 124 | %% remember to sleep for the maximum amount between connection attempts (10s) 125 | timer:sleep(10000), 126 | RemotePat = rpc(fmke, get_patient_by_id, [Id]), 127 | true = fmke_test_utils:compare_patients(ExpectedPat, RemotePat), 128 | ok. 129 | 130 | %% abstracts what data is supposed to be in the database. Since we essencially kill it and bring it back up, 131 | %% no data persistence is provided, thus we need to add data again in the restart_db_and_wait_for_it test. 132 | add_data(Config) -> 133 | Data = ?config(data, Config), 134 | [{patients, [{Id, Name, Address}]}] = Data, 135 | ok = rpc(fmke, create_patient, [Id, Name, Address]). 136 | 137 | rpc(Mod, Fun, Args) -> 138 | rpc:call(?NODENAME, Mod, Fun, Args). 139 | --------------------------------------------------------------------------------