├── .gitignore ├── .gitmodules ├── LICENSE ├── Makefile ├── README.md ├── _config.yml ├── docs ├── Readme.md ├── _config.yml ├── mix-tea.pdf └── mix-tea.png ├── euc ├── MyriaInstances.pem ├── analyzers │ ├── Makefile │ ├── aligned_max_throughputs.sh │ ├── analyze_and_plot_tvl.sh │ ├── average.sh │ ├── bin_things.py │ ├── csv_of_aligned_max_throughputs.sh │ ├── estimate_max_throughput.sh │ ├── fell_over.py │ ├── gnuplot_pipestream │ ├── how_many_aborts.py │ ├── main.cpp │ ├── max_throughput.sh │ ├── parallel_max_throughput.sh │ ├── quick_analyzer.py │ ├── read_file.hpp │ ├── submit_vs_run.py │ ├── transpose.sh │ ├── trivial_average.py │ └── window_averages.py ├── as-gentoo.sh ├── build-analyzer.sh ├── hours_minutes_to_minutes.sh ├── kill_all.sh ├── kill_overrun.sh ├── local_writesweep.sh ├── local_writesweep_tracked.sh ├── low_ram.sh ├── mutils ├── plot_two_graphs.sh ├── ram_monitor.sh ├── reboot_all.sh ├── results-analyzer.cpp ├── results-analyzer.sh ├── running_time.sh ├── test-things-loop-body.sh ├── test-things-loop-body2.sh ├── test-things.sh ├── time_launched.sh ├── vacuum.sh └── vm-actions.sh └── transactions ├── .vscode └── settings.json ├── Basics.cpp ├── Basics.hpp ├── DataStore.hpp ├── GDataStore.hpp ├── Handle.hpp ├── MTL.hpp ├── Makefile ├── Operations.hpp ├── Ostreams.cpp ├── Ostreams.hpp ├── RemoteObject.hpp ├── UnmatchedRemoteObject.hpp ├── big_prime ├── config_params_parsing_test.cpp ├── configuration_params.cpp ├── configuration_params.hpp ├── debug_scratch.cpp ├── deref_test.cpp ├── drafts ├── NewOperations.hpp └── testNewOperations.cpp ├── integrate_new_parser.txt ├── logging_example.cpp ├── mailing_list_add_new_user.cpp ├── mailing_list_add_new_user.cpp.precompiled ├── mailing_list_create_group.cpp ├── mailing_list_create_group.cpp.precompiled ├── mailing_list_create_user.cpp ├── mailing_list_create_user.cpp.precompiled ├── mailing_list_download_inbox.cpp ├── mailing_list_download_inbox.cpp.precompiled ├── mailing_list_example.cpp ├── mailing_list_example.hpp ├── mailing_list_post_new_message.cpp ├── mailing_list_post_new_message.cpp.precompiled ├── mailing_list_test.cpp ├── mtl ├── .clang-format ├── AST_parse.hpp ├── AST_split.hpp ├── AST_typecheck.hpp ├── RemoteList.hpp ├── TransactionContext.cpp ├── TransactionContext.hpp ├── builtins.hpp ├── builtins_declarations.hpp ├── collect_proper_label.hpp ├── common_strings.hpp ├── contains_improper_labels.hpp ├── contains_min_ofs.hpp ├── endorse_relabel.hpp ├── endorsement.hpp ├── environments.hpp ├── environments_serialization.hpp ├── flatten_expressions.hpp ├── generic_typecheck_decl.hpp ├── insert_tracking.hpp ├── interp.hpp ├── label_inference.hpp ├── label_utilities.hpp ├── mixt_captures.hpp ├── mixt_method.hpp ├── mtl.hpp ├── mtlbasics.hpp ├── mtlutils.hpp ├── name_while.hpp ├── new-mtl-development-log.tar.bz2 ├── new-parsing │ ├── Makefile │ ├── allocated_ref.hpp │ ├── allocator.hpp │ ├── array.hpp │ ├── ast.hpp │ ├── ast.hpp.php │ ├── ast.php │ ├── ast_skeleton.php │ ├── common.php │ ├── ctutils-old.hpp │ ├── ctutils.hpp │ ├── parse.hpp │ ├── parse.hpp.php │ ├── pretty_print.hpp │ ├── pretty_print.hpp.php │ ├── test_allocator.cpp │ ├── test_ctutils.cpp │ ├── to-old-ast-parse.hpp │ ├── union.hpp │ └── util.php ├── parse_bindings.hpp ├── parse_bindings_decl.hpp ├── parse_bindings_impl.hpp ├── parse_expressions.hpp ├── parse_expressions_decl.hpp ├── parse_expressions_impl.hpp ├── parse_printer.hpp ├── parse_statements.hpp ├── parse_statements_decl.hpp ├── parse_statements_impl.hpp ├── parse_utilities.hpp ├── phase_context.hpp ├── pre_endorse.hpp ├── recollapse.hpp ├── relabel.hpp ├── remote_interp.hpp ├── remove_empties.hpp ├── remove_unused.hpp ├── replace_label.hpp ├── replace_type.hpp ├── run_phase.hpp ├── runnable_transaction.hpp ├── split_context.hpp ├── split_phase.hpp ├── split_phase_impl_utils.hpp ├── split_phase_utils.hpp ├── split_printer.hpp ├── struct.hpp ├── test_parse_utilities.cpp ├── top.hpp ├── transaction.hpp ├── transaction_macros.hpp ├── transaction_method_argument.hpp ├── transaction_method_transaction.hpp ├── transaction_method_with.hpp ├── traverse_assist.hpp ├── type_environment.hpp ├── typecheck_and_label.hpp ├── typecheck_and_label_decl.hpp ├── typecheck_and_label_impl.hpp ├── typecheck_and_label_macros.hpp ├── typecheck_handle_operations.hpp ├── typecheck_printer.hpp ├── without_names.hpp ├── worklist.hpp └── writes_remote.hpp ├── myria-utils ├── myria_utils.hpp └── utils.hpp ├── pg_env.sh ├── pgsql ├── GSQLObject.cpp ├── Makefile ├── SQLCommands.hpp ├── SQLConnection.cpp ├── SQLConnection.hpp ├── SQLLevels.cpp ├── SQLLevels.hpp ├── SQLStore.cpp ├── SQLStore.hpp ├── SQLStore_impl.hpp ├── SQLTransaction.cpp ├── SQLTransaction.hpp ├── SQL_internal_utils.cpp ├── SQL_internal_utils.hpp ├── mailing_list_loop_causal.sh ├── mailing_list_loop_strong.sh ├── relay.cpp ├── simple_test_loop_causal.sh ├── simple_test_loop_causal_tracked.sh ├── simple_test_loop_strong.sh ├── simple_test_loop_strong_tracked.sh └── test_relay_serialization.cpp ├── ponyq ├── normalq.hpp └── ponyq.hpp ├── postgres_stuff ├── Makefile ├── causal-postgres-numbering-trigger-intstore.sql ├── causal-postgres-numbering-trigger.sql ├── print_causal.cpp ├── print_strong.cpp ├── sync.cpp └── sync_defs.h ├── print_txn.cpp ├── raw_sqlstore └── RawSQLStore.hpp ├── regression_tests.cpp ├── relay_connections_pool.hpp ├── run_result.cpp ├── run_result.hpp ├── sample_arrival_intervals.cpp ├── server ├── StoreRelay.hpp └── transaction_listener.hpp ├── simple_txn_test.cpp ├── test-ct.cpp ├── test_binop.cpp ├── test_builtins.cpp ├── test_client.hpp ├── test_complex_transaction.cpp ├── test_complex_transaction_precompiled.incl ├── test_conversion.cpp ├── test_endorse.cpp ├── test_handle_serialization.cpp ├── test_insert_tracking.cpp ├── test_isvalid.cpp ├── test_mailing_list.cpp ├── test_mixt_method.cpp ├── test_mixt_method_macro.cpp ├── test_operation.cpp ├── test_parsing.cpp ├── test_rawsql.cpp ├── test_testing_store.cpp ├── test_utils.cpp ├── test_utils.hpp ├── testing_store ├── TestingStore.hpp └── mid.hpp ├── tests ├── Makefile ├── process_pool │ └── test_process_pool.cpp └── sqlstore │ ├── Makefile │ └── sqlstore_tests.cpp ├── threaded_trial.hpp ├── tracker ├── ClientTracker.hpp ├── ClockManager.cpp ├── DisabledTracker.cpp ├── Ends.cpp ├── Ends.hpp ├── Tombstone.cpp ├── Tombstone.hpp ├── Tracker.cpp ├── Tracker.hpp ├── Tracker_private_declarations.hpp ├── TrackingContext.hpp ├── find_tombstones.hpp ├── trackable_datastore_impl.hpp └── tracker_dummy.cpp ├── vm.sh └── voting_example.cpp /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files 2 | 1;4602;0c*.slo 3 | *.lo 4 | *.o 5 | *.obj 6 | 7 | # Compiled Dynamic libraries 8 | *.so 9 | *.dylib 10 | *.dll 11 | 12 | # Compiled Static libraries 13 | *.lai 14 | *.la 15 | *.a 16 | *.lib 17 | 18 | # Executables 19 | *.exe 20 | *.out 21 | *.app 22 | votes 23 | test 24 | MyriaStore.* 25 | MyriaStore 26 | postgres_stuff/print_causal 27 | postgres_stuff/print_strong 28 | postgres_stuff/sync 29 | vm 30 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "transactions/mutils"] 2 | path = transactions/mutils 3 | url = git@github.com:mpmilano/mutils 4 | [submodule "transactions/mutils-serialization"] 5 | path = transactions/mutils-serialization 6 | url = git@github.com:mpmilano/mutils-serialization 7 | [submodule "transactions/mutils-tasks"] 8 | path = transactions/mutils-tasks 9 | url = git@github.com:mpmilano/mutils-tasks 10 | [submodule "transactions/mutils-networking"] 11 | path = transactions/mutils-networking 12 | url = git@github.com:mpmilano/mutils-networking 13 | [submodule "transactions/mutils-containers"] 14 | path = transactions/mutils-containers 15 | url = git@github.com:mpmilano/mutils-containers 16 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | cd transactions; $(MAKE) 3 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-midnight -------------------------------------------------------------------------------- /docs/_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-midnight -------------------------------------------------------------------------------- /docs/mix-tea.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mpmilano/MixT/3b418abb2febdfd44fefd4997501b4311406d7d0/docs/mix-tea.pdf -------------------------------------------------------------------------------- /docs/mix-tea.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mpmilano/MixT/3b418abb2febdfd44fefd4997501b4311406d7d0/docs/mix-tea.png -------------------------------------------------------------------------------- /euc/MyriaInstances.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEAp7UH3bc1pbW6Yo3AkYWhF2atubD/Pd/NKzfOIAXUaUP4KhBQ 3 | HUz4+e5Kyz+rkD+/UpnHdxZwkY1j9hmuE2qnsIxTSufrTXeoKYlrSI1V5veX1FZY 4 | t+zxt3eLYOb36uhFW3t2WXQlLDAjF9ml68wcVgZYel9MDp9BdIRuh1CKxmTkUoaf 5 | gLNhJS+sAtg8JxQRZmFhKlYFM/WyQF58iuQFS9s5j+WlRPKooIt9mK4EFm2SVnnM 6 | 0t1/6AyfI49LJoEYIQ8P+U1dwRJi2Ga7OHeXfjqNRCJL1fUXyZYynioXxTLwyzTh 7 | YWhUlm1Pjpy3khGxe/IxThXP7H/EOcQ4zmXl1QIDAQABAoIBAFUlqpq9PDWoMLt8 8 | 8PpxeDqjXSzcWpsVl9uLd9H2Y8QWSFdC9QoLXJW4kKzk5LxWQVM01Hr4H6smuEvo 9 | +eLN0dnnRRrQ6Tzpta1xIdEyQR0FNEcQl/a6DHdz8IYAfnr79srdwlbki0MGJask 10 | TAOOHAzpJ97pp3n9DYSVAHEfSXY0SjqshfTldMXUTfuoBHtxF6jVYrA6U5/HHNyw 11 | PAA2/OefbyEhgw6Dx2kdrILgT7ed/JONx2+b3gkxypZ4giM8vJVltHPGqP4BN6Tq 12 | OTTqsAACFlzJQCxdEYlTh/1c9LPTFi/fD5bU9yrHNaDUHmlyIb/Olkg2IznClcoI 13 | 52uO8cECgYEA83sMv3oL2XL+0muhBCJ57PdnaaXLhasC9jyvlYb2edhpgOjvaovV 14 | HiFP2zMKJ1H6fbx9IxNk6uD6aT5IjpGM/jgjOjpflFX/1peekbVphBsFBtbwSohH 15 | 5Q/Zl9lixbNneto9wu+87Lol5VWR5r13dLUKD/wnx155qSJmyVrAp60CgYEAsFSR 16 | 2/Pqxoc8f+nMwJ2xzLXnfYOcJg51eeV0NagmYIvUpBsOBFAmzgZ+wcxn+FeWBhRQ 17 | YNTxGVH6cE3bIMF2Zys9GM7ck+lDSE0jk+Q+F7yf4UBn1pNbQtVFnoA8YyitbKfC 18 | BWzJmIRx4YlOCKDljKs09W3W3UVV1NPizVOfG8kCgYBDJs3WRXfdKj0jlbGb7VJJ 19 | S9bGqYZfQFIZzZTznXx/EHPQrO3TKevGWpOmtgX34EFAymqM8P1lVlVaD2Z9g6Eq 20 | QBtfJNhVlf5wlfKS50DJwZX6U++D7uA9ScjOe5MlSZyhom1kRTCK7bAj9xy9UcYP 21 | dxrmW7ow31qYjjdhE1mNJQKBgEtgK2W50/9/qeYIHIWuI2Yd0FIAI4zFG4S7CD0z 22 | nwwgWgq4ofpIBP6HGkJfzYYEctYwctehz1qCh6rgLS7IZIPfoONs8i+sRB2xE+dR 23 | ZxWghTu1222HWjiskHEb4jSFG/VZlP/arCatXz0JA7l+hgHm4eGUTYgS7ojm9onN 24 | GDO5AoGADtlVu39Kf3NLv8zg/YzzA2BLtr7ycN1UQwDflhhAjPCyAjw36shKYpFq 25 | lCd+RqWftUS8G9mDOwIrjXhn/Il2WUhyO0UOyHb8JZjD8sfS7+fKeY5Rbyx3/NxO 26 | WWJkMaZdI0mWlf2nDQ6B+Kf5NEXcZ8d1XePsMKZmXAxvzbaU/2s= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /euc/analyzers/Makefile: -------------------------------------------------------------------------------- 1 | TRANS_DIR= `pwd`/ 2 | IP_quad=`ifconfig | sed s/broadcast.*//g | tr " " "\n" | grep 128.84 | rev | sed s/'\..*'//g | rev` 3 | storeList='pgsql::SQLStore, pgsql::SQLStore' 4 | CPPFLAGS= -I$(PWD)/../../transactions/ -I$(PWD)/../../transactions//mtl -I$(PWD)/../../transactions//myria-utils -I$(PWD)/../../transactions//mutils-networking -I$(PWD)/../../transactions//mutils -I$(PWD)/../../transactions//mutils-containers -I$(PWD)/../../transactions//mutils-serialization -I$(PWD)/../../transactions//mutils-tasks -I$(PWD)/../../transactions//testing -I$(PWD)/../../transactions//server -I$(PWD)/../../transactions//pgsql -I$(PWD)/../../transactions//tracker -g -stdlib=libc++ --std=c++1z -DNUM_CAUSAL_GROUPS="4" -DSTORE_LIST=$(storeList) -DMY_IP=\"$(MY_IP)\" -DMAX_THREADS=$(MAX_THREADS) -DIP_QUAD=$(IP_quad) -DSTRONG_REMOTE_IP=\"$(STRONG_REMOTE_IP)\" -DCAUSAL_GROUP=$(causalGroup) -DCAUSAL_REMOTE_IP_1=\"$(CAUSAL_REMOTE_IP_1)\" -DCAUSAL_REMOTE_IP_2=\"$(CAUSAL_REMOTE_IP_2)\" -ferror-limit=1 -Wall -Werror -Wextra -DTRACK -DNOPOOL 5 | LDFLAGS= -stdlib=libc++ --std=c++1z -lm -pthread -lpqxx 6 | object_files=utils.o SQLLevels.o configuration_params.o run_result.o 7 | 8 | all: $(object_files) 9 | clang++ main.cpp $(object_files) $(CPPFLAGS) -o main $(LDFLAGS) 10 | utils.o: 11 | clang++ -c ../../transactions/*/utils.cpp $(CPPFLAGS) 12 | SQLLevels.o: 13 | clang++ -c ../../transactions/*/SQLLevels.cpp -DNUM_CAUSAL_MASTERS="2" -DCAUSAL_GROUP=$(causalGroup) $(CPPFLAGS) 14 | configuration_params.o: 15 | clang++ -c ../../transactions/configuration_params.cpp -DNUM_CAUSAL_MASTERS="2" -DCAUSAL_GROUP=$(causalGroup) $(CPPFLAGS) 16 | run_result.o: 17 | clang++ -c ../../transactions/run_result.cpp -DNUM_CAUSAL_MASTERS="2" -DCAUSAL_GROUP=$(causalGroup) $(CPPFLAGS) 18 | 19 | clean: 20 | rm *.o 21 | -------------------------------------------------------------------------------- /euc/analyzers/aligned_max_throughputs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd /tmp/max_throughputs/ 4 | for bar in *iteration2*read.7; do 5 | suffix=`echo $bar | sed s/.*iteration.//g | rev | cut -d'/' -f1 | rev`; 6 | for foo in ignore; do 7 | pre_num=`echo $suffix | cut -d'.' -f2 | cut -d'-' -f1`; 8 | echo $pre_num | sed s/^.$/"$pre_num"0/g; echo '|'; 9 | cat *"$suffix" | xargs ~/research/andrew/consistency-tester/euc/analyzers/average.sh; 10 | done | tr "\n" "," | cut -d',' -f1-10 ; 11 | done | sort -n | sed s/,/', '/g | sort -n | sed s/'|,'/'|'/g 12 | -------------------------------------------------------------------------------- /euc/analyzers/analyze_and_plot_tvl.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #we expect three arguments: arg1 : window increment; arg2: window duration; arg3: test run result file 3 | ./main $* | grep "us$" | tr , ' ' | awk '{print $1,$2}' | sed s/_Hz// | sed s/ms// | gnuplot -p -e "set title '$3'" -e "plot ' 0: 17 | print("fall in range " + str(bin[0]) + "-" + str(bin[-1]) +": " + str(len(bin))) 18 | -------------------------------------------------------------------------------- /euc/analyzers/csv_of_aligned_max_throughputs.sh: -------------------------------------------------------------------------------- 1 | rm /tmp/csvleft.csv 2>/dev/null 2 | rm /tmp/csvright.csv 2>/dev/null 3 | mkdir -p /tmp/csv_of_aligned_workdir/ 4 | i=0 5 | while read outer; do 6 | sort=0 7 | type=0 8 | echo $outer | tr '|' '\n' | while read line; do 9 | if [[ $sort = 0 ]]; then 10 | type=`echo $line | cut -d',' -f1`; 11 | fi 12 | if [[ $sort = 1 ]]; then 13 | echo $type > /tmp/csv_of_aligned_workdir/l_"$i" 14 | echo $line | tr "," "\n" | tr -d ' ' >> /tmp/csv_of_aligned_workdir/l_"$i" 15 | fi 16 | sort=$[sort+1] 17 | done 18 | echo /tmp/csv_of_aligned_workdir/l_"$i" >> /tmp/csv_of_aligned_workdir/lefts 19 | i=$[i+1] 20 | done < /dev/stdin 21 | cat /tmp/csv_of_aligned_workdir/lefts | xargs paste -d',' > /tmp/csvleft.csv 22 | rm /tmp/csv_of_aligned_workdir/lefts 23 | rm /tmp/csv_of_aligned_workdir/l_* 24 | rmdir /tmp/csv_of_aligned_workdir 25 | -------------------------------------------------------------------------------- /euc/analyzers/estimate_max_throughput.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | file=$1 4 | offset=$2 5 | 6 | granularity=1000 7 | lines=`wc -l $file | awk '{print $1}'` 8 | echo $lines 9 | echo $granularity 10 | iters=$[lines / granularity] 11 | uselines=$[offset * granularity] 12 | cat $file | tail -$uselines | sort -n > "$file"-sorted 13 | for ((i=0; i < $offset; i=$i+1)); do 14 | start=`head -$[$granularity + ($i * $granularity)] "$file"-sorted | tail -$granularity | head -1 | awk '{print $1}' | tr -d ','` 15 | end=`head -$[$granularity + ($i * $granularity)] "$file"-sorted | tail -$granularity | tail -1 | awk '{print $2}' | tr -d ','` 16 | echo $[ ($granularity * 1000000) / (end-start)] 17 | done 18 | -------------------------------------------------------------------------------- /euc/analyzers/fell_over.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import quick_analyzer 4 | from bin_things import bin_things 5 | 6 | bin_things(10,quick_analyzer.stream_operator(lambda log : log.done_time - log.submit_time)) 7 | -------------------------------------------------------------------------------- /euc/analyzers/gnuplot_pipestream: -------------------------------------------------------------------------------- 1 | for foo in MyriaStore-*; do cat $foo/throughput_v_latency.csv | grep -v Myria | sed -e s/_Hz//g -e s/us//g -e s/ms//g | tr ',' ' ' | awk '{print $1, $3}' | gnuplot -p -e "set title '$foo'" -e "plot ' 2 ] 8 | print('number aborts: ') 9 | print(len(abort_list)) 10 | print('number commits ') 11 | print(len(full_list) - len(abort_list)) 12 | print('abort messages: ') 13 | for message in abort_list: 14 | print(message) 15 | 16 | 17 | -------------------------------------------------------------------------------- /euc/analyzers/max_throughput.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./main $1 $2 $3 | grep "us$" | tr , ' ' | awk '{print $2,$1}' | grep -v [2-9][0-9][0-9][0-9]ms | grep -v [0-9][0-9][0-9][0-9][0-9]*ms | cut -d' ' -f2 | cut -d'_' -f1 | sort -n | tail -1 > $4 4 | -------------------------------------------------------------------------------- /euc/analyzers/parallel_max_throughput.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | i=0 4 | for foo in "$1"-results-iteration*; do 5 | ./max_throughput.sh 2 30s $foo /tmp/max_throughputs/`echo $foo | rev | cut -d'/' -f1 | rev` & 6 | i=$[i+1]; 7 | if [[ $i = 8 ]] ; then 8 | wait; 9 | i=0; 10 | fi; 11 | done 12 | -------------------------------------------------------------------------------- /euc/analyzers/quick_analyzer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | #idea; this script expects to take the file on stdin and 4 | #some expression to be run against it as a string in $*. 5 | #makes the various fields available as bash variables 6 | 7 | import sys 8 | import functools 9 | from collections import namedtuple 10 | 11 | 12 | def concat_all(l): 13 | r="" 14 | for s in l: 15 | r += s 16 | return r 17 | 18 | myria_log = namedtuple("myria_log",['submit_time', 'run_time', 'cc_num_tries', 'done_time', 'is_write', 'is_read', 'is_strong', 'is_causal', 'remote_failure_string', 'num_causal_tries', 'transaction_action', 'tracker_strong_afterread_tombstone_exists', 'tracker_strong_afterread_nonce_unavailable', 'tracker_causal_afterread_candidate']) 19 | 20 | def compose(*functions): 21 | return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x) 22 | 23 | def stream_operator(preprocess_fun): 24 | to_return=[] 25 | for line in sys.stdin.readlines(): 26 | if '[]() -> struct myria_log {struct myria_log ret' in line: 27 | entries = line.split(sep='{')[-1].split(sep='}')[0].split(sep=', ') 28 | to_return.append( 29 | preprocess_fun( 30 | myria_log( 31 | submit_time=int(entries[0]), 32 | run_time=int(entries[1]), 33 | cc_num_tries=int(entries[2]), 34 | done_time=int(entries[3]), 35 | is_write=bool(entries[4]), 36 | is_read=bool(entries[5]), 37 | is_strong=bool(entries[6]), 38 | is_causal=bool(entries[7]), 39 | remote_failure_string=entries[8], 40 | num_causal_tries=int(entries[9]), 41 | transaction_action=bool(entries[10]), 42 | tracker_strong_afterread_tombstone_exists=int(entries[11]), 43 | tracker_strong_afterread_nonce_unavailable=int(entries[12]), 44 | tracker_causal_afterread_candidate=int(entries[13])))) 45 | return to_return 46 | -------------------------------------------------------------------------------- /euc/analyzers/read_file.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "run_result.hpp" 3 | #include "configuration_params.hpp" 4 | #include 5 | #include 6 | 7 | namespace myria{ 8 | 9 | std::pair > read_from_file(const typename run_result::time_t &now, 10 | char const * const fname){ 11 | std::cout << fname << std::endl; 12 | std::pair > retp; 13 | std::vector &ret = retp.second; 14 | std::ifstream file{fname}; 15 | try { 16 | std::string config_line; 17 | std::getline(file,config_line); 18 | std::istringstream ignore_in{config_line}; 19 | configuration_parameters &ignore = retp.first; 20 | ignore_in >> ignore; 21 | std::cout << ignore << std::endl; 22 | { 23 | std::string line; 24 | while (std::getline(file,line)){ 25 | assert(line.size() > 8); 26 | ret.emplace_back(); 27 | std::istringstream in{line}; 28 | char const * const c_line = line.c_str(); 29 | (void) c_line; 30 | ret.back().read(now,in); 31 | } 32 | } 33 | return retp; 34 | } 35 | catch(const std::exception &e){ 36 | std::cout << e.what() << std::endl; 37 | throw e; 38 | } 39 | } 40 | 41 | std::pair > read_from_files(const typename run_result::time_t &now, 42 | std::size_t count, char ** fnames){ 43 | std::pair > result; 44 | for (std::size_t i = 0; i < count; ++i){ 45 | auto partial = read_from_file(now,fnames[i]); 46 | if (i > 0) assert (same_run(result.first,partial.first)); 47 | result.first = partial.first; 48 | result.second.insert(result.second.end(),partial.second.begin(),partial.second.end()); 49 | } 50 | return result; 51 | } 52 | 53 | } 54 | -------------------------------------------------------------------------------- /euc/analyzers/submit_vs_run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import quick_analyzer 4 | from bin_things import bin_things 5 | 6 | bin_things(10,quick_analyzer.stream_operator(lambda log : log.run_time - log.submit_time if log.submit_time < (480000/2) else -1)) 7 | #bin_things(10,quick_analyzer.stream_operator(lambda log : log.submit_time)) 8 | 9 | -------------------------------------------------------------------------------- /euc/analyzers/transpose.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | mkdir -p /tmp/transpose_workdir 3 | i=0 4 | while read outer; do 5 | i=$[i+1] 6 | echo $outer | tr ',' '\n' > /tmp/transpose_workdir/$i 7 | echo -n /tmp/transpose_workdir/"$i " 8 | done < /dev/stdin | xargs paste -d',' 9 | #rm /tmp/transpose_workdir/[0-9]* 10 | #rmdir /tmp/transpose_workdir/ 11 | -------------------------------------------------------------------------------- /euc/analyzers/trivial_average.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import quick_analyzer 4 | 5 | res = quick_analyzer.stream_operator(lambda log : (log.submit_time,log.done_time)) 6 | short_res = res[int(len(res)/3):int(2*len(res)/3)] 7 | 8 | max_done = -1 9 | min_start = 99999999999999999999 10 | 11 | for start,done in short_res: 12 | if start < min_start: 13 | min_start = start 14 | if done > max_done: 15 | max_done = done 16 | 17 | print(1000*len(short_res) / (max_done - min_start)) 18 | -------------------------------------------------------------------------------- /euc/analyzers/window_averages.py: -------------------------------------------------------------------------------- 1 | #moving window averages. Calculates the moving-window averages for MyriaStore 2 | 3 | #all times are in miliseconds! 4 | #argument: duration of window 5 | #argument: step-duration of window 6 | #argument: list of (start_time, finish_time, is_error) tuples. 7 | #probably list of tuples; regardless must allow destructuring bind. + index-based accesses 8 | def moving_window(window_size, window_step, time_couples): 9 | #sort by completion time, and then filter out any integer overflows in the log 10 | sorted_by_completion_time = [elem for elem in sorted(time_couples, key=(lambda x : x[1])) if elem[1] > 0] 11 | max_done_time = sorted_by_completion_time[-1][1] 12 | 13 | start_index = 0 14 | window_start = 0 15 | 16 | print(sorted_by_completion_time) 17 | print(max_done_time) 18 | 19 | averages = [] 20 | window_end=window_step 21 | while window_end <= max_done_time: 22 | try: 23 | total_latency = 0 24 | total_events = 0 25 | for (start_time,done_time,is_error) in sorted_by_completion_time: 26 | if done_time < window_start: 27 | start_index += 1 28 | continue 29 | if done_time > window_end: 30 | break 31 | if is_error: 32 | continue 33 | total_latency += done_time - submit_time 34 | total_events += 1 35 | if total_events == 0: 36 | continue 37 | else: 38 | averages.append(tuple(total_events,total_latency)) 39 | finally: 40 | window_start += window_step 41 | window_end += window_step 42 | return averages 43 | -------------------------------------------------------------------------------- /euc/as-gentoo.sh: -------------------------------------------------------------------------------- 1 | #echo `whoami` "is now here: " `pwd` "with id" $1 2 | cd 3 | cd consistency-tester/transactions/ 4 | if [[ $first_iter ]]; 5 | then 6 | git checkout pg_env.sh 7 | git checkout vm_main.cpp 8 | git checkout master 9 | git pull 10 | #git checkout 8aa31399d5e62f5ad1f94459ecffb94bc0f18fc8 11 | if [[ -d mutils ]] 12 | then cd mutils; git checkout master; git pull; cd .. 13 | else git clone https://github.com/mpmilano/mutils.git 14 | fi 15 | if [[ -d mutils-tasks ]] 16 | then cd mutils-tasks; git checkout master; git pull; cd .. 17 | else git clone https://github.com/mpmilano/mutils-tasks.git 18 | fi 19 | cd mutils-tasks; git checkout master; cd .. 20 | if [[ -d mutils-serialization ]] 21 | then cd mutils-serialization; git checkout master; git pull; cd .. 22 | else git clone https://github.com/mpmilano/mutils-serialization.git 23 | fi 24 | if [[ -d mutils-containers ]] 25 | then cd mutils-containers; git checkout master; git pull; cd .. 26 | else git clone https://github.com/mpmilano/mutils-containers.git 27 | fi 28 | if [[ -d mutils-networking ]] 29 | then cd mutils-networking; git checkout master; git pull; cd .. 30 | else git clone https://github.com/mpmilano/mutils-networking.git 31 | fi 32 | echo rebuilding 33 | make clean 34 | else rm vm 35 | fi 36 | source pg_env.sh 37 | echo $* 38 | export causalGroup="$1" 39 | export MY_IP="$2" 40 | shift 2 41 | export CAUSAL_REMOTE_IP_1="$2" 42 | export CAUSAL_REMOTE_IP_2="$3" 43 | export STRONG_REMOTE_IP="$1" 44 | shift 3 45 | export num_clients=$1 46 | export client_rate=$2 47 | export client_increase_rate=$3 48 | export test_stop_time=$4 49 | export percent_causal=$5 50 | export percent_read=$6 51 | shift 6 52 | export MAX_THREADS=$1 53 | export first_iter=$2 54 | killall -9 simple_txn_test 55 | rm /tmp/Myria* 56 | make -j4 vm 57 | ./vm 58 | wait 59 | echo "done waiting" 60 | exit 61 | -------------------------------------------------------------------------------- /euc/hours_minutes_to_minutes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo $* | tr ':' ' ' | awk '{print $1*60 + $2}' 3 | -------------------------------------------------------------------------------- /euc/kill_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -z "$instance_list" ]] 4 | then 5 | echo "failure: specify instance_list as environment variable" 6 | exit 1 7 | fi 8 | 9 | for foo in $instance_list; do 10 | ssh -o "UserKnownHostsFile /dev/null" -o strictHostKeyChecking=no -i MyriaInstances.pem ubuntu@$foo sudo killall -9 vm & 11 | done 12 | wait 13 | -------------------------------------------------------------------------------- /euc/kill_overrun.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | if [[ `./running_time.sh $*` -gt 25 ]]; then 5 | killall $* 6 | else 7 | echo "spared" 8 | fi 9 | 10 | -------------------------------------------------------------------------------- /euc/local_writesweep.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #assume this tells me where to restart 4 | if [[ $1 ]]; then 5 | start_at="$1" 6 | else start_at=0 7 | fi 8 | 9 | full_range=".01 .05 .1 .15 .2 .25 .3 .35 .4 .45 .5 .55 .6 .65 .7 .75 .8 .85 .9 .99" 10 | 11 | short_sweep=".05 .3 .7 .95" 12 | 13 | for iteration_number in 2 3 4 5; do 14 | for percent_causal in $full_range; do 15 | if [[ `echo "$percent_causal >= $start_at" | bc` = 1 ]] 16 | then for percent_read in $short_sweep; do 17 | echo "starting $percent_causal, $percent_read" 18 | ssh research@research.xelserv.com killall strong_relay 19 | ssh research@milano.cs.cornell.edu killall causal_relay 20 | sleep 15 21 | ./simple_txn_test 128.253.3.197 8876 128.84.217.139 8877 10_Hz 510 10_Hz 10min 0.01 $percent_causal $percent_read /tmp/MyriaStore-results-iteration"$iteration_number"-causal"$percent_causal"-read"$percent_read" 5s 10 8 22 | 23 | done 24 | fi 25 | done 26 | done 27 | -------------------------------------------------------------------------------- /euc/local_writesweep_tracked.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #assume this tells me where to restart 4 | if [[ $1 ]]; then 5 | start_at="$1" 6 | else start_at=0 7 | fi 8 | 9 | full_range=".01 .05 .15 .25 .35 .45 .55 .65 .75 .85 .9 .99" 10 | 11 | short_sweep=".7" 12 | 13 | for iteration_number in 2; do 14 | for percent_causal in $full_range; do 15 | if [[ `echo "$percent_causal >= $start_at" | bc` = 1 ]] 16 | then for percent_read in $short_sweep; do 17 | echo "starting $percent_causal, $percent_read" 18 | ssh research@research.xelserv.com killall strong_relay_tracked 19 | ssh research@milano.cs.cornell.edu killall causal_relay_tracked 20 | sleep 15 21 | echo ./simple_txn_test_tracked 128.253.3.197 8876 128.84.217.139 8877 10_Hz 510 10_Hz 10min 0.01 $percent_causal $percent_read /tmp/MyriaStore-tracked-results-iteration"$iteration_number"-causal"$percent_causal"-read"$percent_read" 5s 10 8 22 | ./simple_txn_test_tracked 128.253.3.197 8876 128.84.217.139 8877 10_Hz 510 10_Hz 10min 0.01 $percent_causal $percent_read /tmp/MyriaStore-tracked-results-iteration"$iteration_number"-causal"$percent_causal"-read"$percent_read" 5s 10 8 23 | 24 | done 25 | fi 26 | done 27 | done 28 | -------------------------------------------------------------------------------- /euc/low_ram.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | [[ `free -m | awk '{print $7}' | grep [0-9]` -lt 800 ]] 3 | -------------------------------------------------------------------------------- /euc/mutils: -------------------------------------------------------------------------------- 1 | /home/xlnagla/research/andrew/consistency-tester/transactions/mutils -------------------------------------------------------------------------------- /euc/plot_two_graphs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | which_prefix_1="$1" #~research/results/ 3 | which_prefix_2="$2" #~research/results/ 4 | which_directories="$3" #MyriaStore-.05-.05 5 | for foo in "$which_directories"; do cat $which_prefix_1/$foo/throughput*latency* | grep -v Myria | sed -e s/_Hz//g -e s/us//g -e s/ms//g | tr ',' ' ' > /tmp/plot1; cat $which_prefix_2/$foo/throughput*latency* | grep -v Myria | sed -e s/_Hz//g -e s/us//g -e s/ms//g | tr ',' ' ' > /tmp/plot2; gnuplot -p -e "set title '$foo'" -e "set term png" -e "set output '$foo.png'" -e "plot '/tmp/plot1' using 3:1, '/tmp/plot2' using 3:1"; done 6 | -------------------------------------------------------------------------------- /euc/ram_monitor.sh: -------------------------------------------------------------------------------- 1 | if ssh $* ./low_ram.sh; then 2 | echo "low ram"; 3 | killall strong_relay; 4 | killall causal_relay; 5 | killall strong_relay_tracked; 6 | killall causal_relay_tracked; 7 | killall simple_txn_test; 8 | killall simple_txn_test_tracked; 9 | killall mailing_list_test; 10 | else 11 | echo fine; 12 | fi 13 | -------------------------------------------------------------------------------- /euc/reboot_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -z "$instance_list" ]] 4 | then 5 | echo "failure: specify instance_list as environment variable" 6 | exit 1 7 | fi 8 | 9 | for foo in $instance_list; do 10 | ssh -o "UserKnownHostsFile /dev/null" -o strictHostKeyChecking=no -i MyriaInstances.pem ubuntu@$foo sudo /sbin/reboot 11 | done 12 | -------------------------------------------------------------------------------- /euc/results-analyzer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -d "$1" ]] 4 | then results_dir="$1" 5 | else echo "Error: results dir (first argument) must be directory. Got $1" 6 | exit 1 7 | fi 8 | 9 | if [[ -d "$2" ]] 10 | then analyzer="$2" 11 | else echo "Error: analyzer directory (second argument) must be directory. Got $2" 12 | exit 1 13 | fi 14 | 15 | if [[ -d "$3" ]] 16 | then mutils="$3" 17 | else echo "Error: mutils library (third argument) must be directory. Got $3" 18 | exit 1 19 | fi 20 | 21 | shift 22 | shift 23 | shift 24 | 25 | test_dir=`echo "$results_dir"/output_*o | rev | cut -d'/' -f1 | rev` 26 | if [[ -f `find "$results_dir" -name $test_dir -type f` ]] 27 | then echo "object files found" 28 | else bash "$analyzer"/build-analyzer.sh $results_dir "$mutils" 29 | fi 30 | 31 | rm /tmp/myriastore_results_analysis_dir2/analyzer_bin 32 | mkdir -p /tmp/myriastore_results_analysis_dir2/ 33 | clang++ -g -O3 -ferror-limit=1 -I"$mutils" -L"$mutils" --std=c++14 -lmutils -lgc -lprofiler -o /tmp/myriastore_results_analysis_dir2/analyzer_bin -I"$results_dir" $results_dir/output*.o "$analyzer"/results-analyzer.cpp; 34 | LD_LIBRARY_PATH="$mutils" /tmp/myriastore_results_analysis_dir2/analyzer_bin -ferror-limit=1 35 | 36 | -------------------------------------------------------------------------------- /euc/running_time.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo $[`date +%H:%M | xargs ./hours_minutes_to_minutes.sh ` - `./time_launched.sh $*`] 3 | -------------------------------------------------------------------------------- /euc/test-things-loop-body.sh: -------------------------------------------------------------------------------- 1 | i=$1 2 | foo="$2" 3 | percent_read="$3" 4 | percent_causal="$4" 5 | strong_target=$5 6 | causal_target_1=$6 7 | causal_target_2=$7 8 | num_clients=$8 9 | client_rate=$9 10 | shift 11 | client_increase_rate=$9 12 | shift 13 | test_stop_time=$9 14 | shift 15 | max_threads=$9 16 | shift 17 | first_iter=$9 18 | 19 | scp -o "UserKnownHostsFile /dev/null" -o strictHostKeyChecking=no -i MyriaInstances.pem vm-actions.sh ubuntu@"$foo":vm-actions.sh 20 | scp -o "UserKnownHostsFile /dev/null" -o strictHostKeyChecking=no -i MyriaInstances.pem as-gentoo.sh ubuntu@"$foo":as-gentoo.sh 21 | ssh -o "UserKnownHostsFile /dev/null" -o strictHostKeyChecking=no -i MyriaInstances.pem ubuntu@"$foo" sudo /bin/bash vm-actions.sh $i "$foo" $ndebug $strong_target $causal_target_1 $causal_target_2 $num_clients $client_rate $client_increase_rate $test_stop_time $percent_causal $percent_read $max_threads $first_iter 22 | -------------------------------------------------------------------------------- /euc/test-things-loop-body2.sh: -------------------------------------------------------------------------------- 1 | i=$1 2 | foo="$2" 3 | causal_percent="$3" 4 | read_percent="$4" 5 | 6 | path=/home/research/results/MyriaStore-"$causal_percent"-"$read_percent"/"$foo"/ 7 | 8 | mkdir -p "$path" 9 | scp -o "UserKnownHostsFile /dev/null" -o strictHostKeyChecking=no -i MyriaInstances.pem ubuntu@"$foo":/mnt/gentoo/tmp/MyriaStore* "$path" 10 | 11 | -------------------------------------------------------------------------------- /euc/test-things.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | oldwd=`pwd` 4 | cd ~/euc/ 5 | 6 | i=0 7 | if [[ -z $strong_target ]] 8 | then 9 | echo "failure: specify strong target as environment variable" 10 | exit 1 11 | elif [[ -z $causal_target_1 || -z $causal_target_2 ]] 12 | then 13 | echo "failure: specify causal target as environment variable" 14 | exit 1 15 | elif [[ -z "$instance_list" ]] 16 | then 17 | echo "failure: specify instance_list as environment variable" 18 | exit 1 19 | elif [[ -z "$causal_percentages" ]] 20 | then 21 | echo "failure: specify causal_percentages" 22 | exit 1 23 | elif [[ -z "$read_percentages" ]] 24 | then 25 | echo "failure: specify read_percentages as environment variable." 26 | exit 1 27 | elif [[ -z "$num_clients" ]] 28 | then 29 | echo "failure: specify num_clients as environment variable." 30 | exit 1 31 | elif [[ -z "$client_rate" ]] 32 | then 33 | echo "failure: specify client_rate as environment variable." 34 | exit 1 35 | elif [[ -z "$client_increase_rate" ]] 36 | then 37 | echo "failure: specify client_increase_rate as environment variable." 38 | exit 1 39 | elif [[ -z "$test_stop_time" ]] 40 | then 41 | echo "failure: specify test_stop_time as environment variable." 42 | exit 1 43 | elif [[ -z "$max_threads" ]] 44 | then 45 | echo "failure: specify max_threads as environment variable." 46 | exit 1 47 | fi 48 | if [[ "$rebuild" = "true" ]] 49 | then first_iter="true" 50 | fi 51 | 52 | for read_percent in $read_percentages; do 53 | for causal_percent in $causal_percentages; do 54 | i=0 55 | ssh research@"$strong_target" killall strong_relay 56 | ssh research@"$causal_target_1" killall causal_relay 57 | ssh research@"$causal_target_2" killall causal_relay 58 | #postgres restart can take up to 92 seconds, apparently. 59 | sleep 95 60 | for foo in $instance_list 61 | do 62 | i=$[i%4 + 1] 63 | /bin/bash test-things-loop-body.sh $i $foo $read_percent $causal_percent $strong_target $causal_target_1 $causal_target_2 $num_clients $client_rate $client_increase_rate $test_stop_time $max_threads $first_iter& 64 | done 65 | wait 66 | unset first_iter 67 | i=0 68 | echo "all done" 69 | for foo in $instance_list 70 | do 71 | i=$[i%4 + 1] 72 | /bin/bash test-things-loop-body2.sh $i $foo $causal_percent $read_percent& 73 | done 74 | wait 75 | echo copy done 76 | done 77 | done 78 | 79 | cd $oldwd 80 | -------------------------------------------------------------------------------- /euc/time_launched.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ps -ef | grep -i $* | grep -v grep | awk '{print $5}' | xargs ./hours_minutes_to_minutes.sh 3 | -------------------------------------------------------------------------------- /euc/vacuum.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo 'vacuum analyze causalstore."IntStore"; vacuum analyze causalstore."BlobStore"; vacuum analyze causalstore.counters; vacuum analyze "BlobStore"."BlobStore"; vacuum analyze "BlobStore"."IntStore"' | psql -h $1 DataStore; 3 | -------------------------------------------------------------------------------- /euc/vm-actions.sh: -------------------------------------------------------------------------------- 1 | #echo hello `whoami` 2 | #echo "I have id $1" 3 | mkdir /mnt/gentoo 2>/dev/null 4 | mount /dev/vdb /mnt/gentoo 2>/dev/null 5 | cp /home/ubuntu/as-gentoo.sh /mnt/gentoo 6 | cd /mnt/gentoo/ 7 | mount --rbind /dev dev 2>/dev/null 8 | rm /dev/shm 2>/dev/null 9 | mkdir /dev/shm 2>/dev/null 10 | mount -t tmpfs shm /dev/shm 2>/dev/null 11 | mount --make-rslave dev 2>/dev/null 12 | mount --rbind /sys/ sys 2>/dev/null 13 | mount -t proc none proc 2>/dev/null 14 | mount --make-rslave sys 2>/dev/null 15 | echo " 16 | * hard nofile 500000 17 | * soft nofile 500000 18 | root hard nofile 500000 19 | root soft nofile 500000 20 | " > /etc/security/limits.conf 21 | cp /etc/security/limits.conf etc/security/limits.conf 22 | cp -L /etc/resolv.conf etc/ 2>/dev/null 23 | cd /mnt/ 24 | if [[ -f /mnt/gentoo/etc/portage/package.env ]] 25 | then 26 | echo "this one is spared" 27 | else 28 | echo "dev-libs/libpqxx clang" > /mnt/gentoo/etc/portage/package.env 29 | chroot gentoo /usr/bin/emerge --oneshot dev-libs/libpqxx 30 | fi 31 | 32 | #echo "done so far" `pwd` 33 | echo $* 34 | chroot gentoo /bin/su -c "/bin/bash /as-gentoo.sh $*" research 35 | #chroot gentoo /bin/su -c "/usr/bin/emerge libunwind" root 36 | echo exiting vm 37 | pkill --signal HUP sshd 38 | exit 39 | -------------------------------------------------------------------------------- /transactions/.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "extras": "cpp", 4 | "sstream": "cpp", 5 | "array": "cpp", 6 | "atomic": "cpp", 7 | "*.tcc": "cpp", 8 | "bitset": "cpp", 9 | "cctype": "cpp", 10 | "chrono": "cpp", 11 | "clocale": "cpp", 12 | "cmath": "cpp", 13 | "condition_variable": "cpp", 14 | "cstdint": "cpp", 15 | "cstdio": "cpp", 16 | "cstdlib": "cpp", 17 | "cstring": "cpp", 18 | "ctime": "cpp", 19 | "cwchar": "cpp", 20 | "cwctype": "cpp", 21 | "deque": "cpp", 22 | "list": "cpp", 23 | "unordered_map": "cpp", 24 | "vector": "cpp", 25 | "exception": "cpp", 26 | "fstream": "cpp", 27 | "functional": "cpp", 28 | "future": "cpp", 29 | "initializer_list": "cpp", 30 | "iosfwd": "cpp", 31 | "iostream": "cpp", 32 | "istream": "cpp", 33 | "limits": "cpp", 34 | "mutex": "cpp", 35 | "new": "cpp", 36 | "ostream": "cpp", 37 | "numeric": "cpp", 38 | "ratio": "cpp", 39 | "stdexcept": "cpp", 40 | "streambuf": "cpp", 41 | "system_error": "cpp", 42 | "thread": "cpp", 43 | "type_traits": "cpp", 44 | "tuple": "cpp", 45 | "typeinfo": "cpp", 46 | "utility": "cpp", 47 | "__config": "cpp" 48 | } 49 | } -------------------------------------------------------------------------------- /transactions/Basics.cpp: -------------------------------------------------------------------------------- 1 | #include "Basics.hpp" 2 | 3 | namespace myria{ 4 | 5 | } 6 | -------------------------------------------------------------------------------- /transactions/Basics.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | namespace myria{ 5 | 6 | 7 | constexpr int num_processes = 50; 8 | static_assert(num_processes <= 100,"Error: you are at risk of too many open files"); 9 | 10 | using Name = long int; 11 | 12 | 13 | 14 | } 15 | -------------------------------------------------------------------------------- /transactions/DataStore.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "GDataStore.hpp" 4 | #include "Basics.hpp" 5 | 6 | namespace myria { 7 | 8 | template struct LabelFreeHandle; 9 | 10 | namespace tracker { 11 | struct Tombstone; 12 | using Clock = std::array; 13 | using Nonce = long; 14 | } 15 | 16 | template class DataStore : public virtual GDataStore { 17 | public: 18 | // we'll delete the TransactionContext 19 | // when the transaction is over. Do any cleanup you need to do then. 20 | // the parameters to this function should just be passed directly to 21 | // TransactionContext's constructor. 22 | virtual std::unique_ptr> begin_transaction( 23 | #ifndef NDEBUG 24 | const std::string &why 25 | #endif 26 | ) = 0; 27 | virtual ~DataStore() = default; 28 | }; 29 | 30 | struct TrackableDataStore_super : public virtual GDataStore { 31 | virtual std::unique_ptr> 32 | new_tomb_trk(mtl::GPhaseContext *ctx, Name, const tracker::Tombstone &) = 0; 33 | virtual bool exists_trk(mtl::GPhaseContext *_ctx, Name) = 0; 34 | virtual std::unique_ptr> 35 | existing_clock_trk(mtl::GPhaseContext *_ctx, Name) = 0; 36 | virtual std::unique_ptr> 37 | existing_tombstone_trk(mtl::GPhaseContext *_ctx, Name) = 0; 38 | virtual ~TrackableDataStore_super() = default; 39 | }; 40 | 41 | struct WeakTrackableDataStore : public virtual TrackableDataStore_super { 42 | virtual const std::array &local_time() const = 0; 43 | virtual ~WeakTrackableDataStore() = default; 44 | }; 45 | 46 | struct StrongTrackableDataStore : public virtual TrackableDataStore_super { 47 | virtual ~StrongTrackableDataStore() = default; 48 | }; 49 | 50 | template 51 | struct TrackableDataStore_common : virtual public TrackableDataStore_super { 52 | std::unique_ptr> 53 | new_tomb_trk(mtl::GPhaseContext *_ctx, Name n, const tracker::Tombstone &val); 54 | 55 | bool exists_trk(mtl::GPhaseContext *_ctx, Name n); 56 | 57 | std::unique_ptr> 58 | existing_clock_trk(mtl::GPhaseContext *_ctx, Name n); 59 | std::unique_ptr> 60 | existing_tombstone_trk(mtl::GPhaseContext *_ctx, Name); 61 | virtual ~TrackableDataStore_common() = default; 62 | }; 63 | 64 | // to support tracking, your datastore needs a few extra 65 | // methods. We ensure they are available here. 66 | template struct _TrackableDataStore; 67 | template 68 | struct _TrackableDataStore : public WeakTrackableDataStore, 69 | public TrackableDataStore_common, 70 | public DataStore