├── LookasideCache_Metastability ├── LoadGenerator │ ├── README.md │ ├── Makefile │ ├── trigger_size_k.py │ ├── LICENSE │ ├── curl │ │ ├── stdcheaders.h │ │ ├── curlver.h │ │ ├── mprintf.h │ │ ├── easy.h │ │ ├── curlbuild.h │ │ ├── curlrules.h │ │ └── multi.h │ ├── lookaside_cache_vm.pem │ ├── TraceFileGenerator.py │ ├── TraceReader.cpp │ ├── time.hpp │ ├── TraceReader.hpp │ ├── run_experiment.py │ ├── collect_stats_over_time.py │ └── TraceReplay.cpp ├── NGINX Web Server │ └── www │ │ └── html │ │ ├── util │ │ ├── info.php │ │ ├── memcached_throughput_test.php │ │ ├── db_connect_test.php │ │ ├── database_throughtput_test.php │ │ ├── memcache_load_data.php │ │ └── server_throughput_test.php │ │ ├── bkup │ │ ├── index.nginx-debian.html │ │ ├── index.php.save │ │ ├── _index.php │ │ └── _index.html │ │ └── index.php ├── setup_scripts │ ├── linearize_column_data.sql │ ├── .server_setup.sh.swp │ ├── add_user.sql │ ├── set_server_exec_time.sh │ ├── setup_memcached.sh │ ├── setup_client.sh │ ├── init_database.sql │ ├── setup_server.sh │ └── setup_mysql.sh ├── Memcached codes │ └── warm_up_cache.py ├── config_files │ ├── deb_conf.dat │ └── default └── README.md ├── GC_Metastability ├── example_result │ └── GC_Metastability_Example.png ├── Dockerfile ├── README.md ├── run.sh ├── plot.py ├── analyze.py └── GCMetastability.java ├── README.md ├── Retry_Metastability ├── primary │ ├── trigger.sh │ └── config.js ├── client │ ├── config.sh │ ├── experiment.sh │ ├── mongoC3.go │ └── experiments.sh └── README.md ├── .gitignore └── LICENSE /LookasideCache_Metastability/LoadGenerator/README.md: -------------------------------------------------------------------------------- 1 | # MetastabilityCacheExample -------------------------------------------------------------------------------- /LookasideCache_Metastability/NGINX Web Server/www/html/util/info.php: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/setup_scripts/linearize_column_data.sql: -------------------------------------------------------------------------------- 1 | use metastable_test_db; 2 | SET @a:= 300; 3 | UPDATE large_test_table SET tcol04=@a:=@a-1; -------------------------------------------------------------------------------- /GC_Metastability/example_result/GC_Metastability_Example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lexiangh/Metastability/HEAD/GC_Metastability/example_result/GC_Metastability_Example.png -------------------------------------------------------------------------------- /LookasideCache_Metastability/setup_scripts/.server_setup.sh.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lexiangh/Metastability/HEAD/LookasideCache_Metastability/setup_scripts/.server_setup.sh.swp -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Metastability 2 | This is the open-sourced code repo for paper [OSDI'22] Metastable Failures in the Wild. The three metastability examples are released in the seperate folders. Please check each folder for detailed instructions. 3 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/setup_scripts/add_user.sql: -------------------------------------------------------------------------------- 1 | CREATE USER 'metastable'@'remote_server_ip' IDENTIFIED BY 'hello@123'; 2 | GRANT CREATE, ALTER, DROP, INSERT, UPDATE, DELETE, SELECT, REFERENCES, RELOAD on *.* TO 'metastable'@'remote_server_ip' WITH GRANT OPTION; 3 | FLUSH PRIVILEGES; 4 | exit; -------------------------------------------------------------------------------- /GC_Metastability/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | 3 | RUN apt-get update && \ 4 | DEBIAN_FRONTEND=noninteractive apt-get install -y \ 5 | openjdk-8-jdk \ 6 | python3-pip \ 7 | && apt-get clean && rm -rf /var/lib/apt/lists/* \ 8 | && pip3 install pandas \ 9 | && pip3 install matplotlib 10 | 11 | RUN mkdir /gc_artifacts 12 | WORKDIR /gc_artifacts -------------------------------------------------------------------------------- /LookasideCache_Metastability/setup_scripts/set_server_exec_time.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo sed -i 's/max_execution-time/;max_execution_time/' /etc/php/7.2/fpm/php.ini 3 | echo "max_execution-time = $1" >> /etc/php/7.2/fpm/php.ini 4 | 5 | sudo sed -i 's/request_terminate_timeout/;request_terminate_timeout/' /etc/php/7.2/fpm/pool.d/www.conf 6 | echo "request_terminate_timeout= $1" >> /etc/php/7.2/fpm/pool.d/www.conf -------------------------------------------------------------------------------- /Retry_Metastability/primary/trigger.sh: -------------------------------------------------------------------------------- 1 | #sample usage 2 | #bash mongo.sh 10 5 .5 1 3 | #runs 10 seconds, then trigger executes for 5 seconds at 0.5 cpu then back to full cpu 4 | 5 | untilTrigger="$1" 6 | triggerDuration="$2" 7 | triggerCPU="$3" 8 | finalCPU="$4" 9 | 10 | #date +%s%N 11 | sudo docker update --cpus "$finalCPU" primary 12 | sleep "$untilTrigger" 13 | sudo docker update --cpus "$triggerCPU" primary 14 | sleep "$triggerDuration" 15 | sudo docker update --cpus "$finalCPU" primary 16 | 17 | -------------------------------------------------------------------------------- /Retry_Metastability/client/config.sh: -------------------------------------------------------------------------------- 1 | primary= 2 | secondary1= 3 | secondary2= 4 | 5 | pem_file= 6 | 7 | [ -z "$primary" ] && echo "ERROR: set primary replica ip in config.sh" && exit 1 8 | [ -z "$secondary1" ] && echo "ERROR: set secondary1 replica ip in config.sh" && exit 1 9 | [ -z "$secondary2" ] && echo "ERROR: set secondary2 replica ip in config.sh" && exit 1 10 | [ -z "$pem_file" ] && echo "ERROR: set pem file absolute address in config.sh" && exit 1 11 | 12 | d=$(date +"%Y%m%d") #used to create output directory 13 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/setup_scripts/setup_memcached.sh: -------------------------------------------------------------------------------- 1 | sudo apt-get update 2 | sudo apt-get -y install memcached 3 | sudo apt-get -y install libmemcached-tools 4 | sudo chmod 777 /etc/memcached.conf 5 | sudo sed -i 's/-l 127.0.0.1/-l 0.0.0.0/' /etc/memcached.conf 6 | sudo sed -i 's/-m/#-m/' /etc/memcached.conf 7 | echo "-m $1" >> /etc/memcached.conf 8 | sudo service memcached restart 9 | sudo apt-get -y install python3-pip 10 | sudo pip3 install pymemcache 11 | 12 | sudo sed -i "/warm_up_size =/c\warm_up_size = $2" ../Memcached\ codes/warm_up_cache.py 13 | sudo mv ../Memcached\ codes/warm_up_cache.py ~/warm_up_cache.py -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/Makefile: -------------------------------------------------------------------------------- 1 | TARGET = TraceReplay 2 | OBJS += TraceReplay.o 3 | OBJS += TraceReader.o 4 | LIBS += -lpthread 5 | LIBS += -lcurl 6 | LIBS += -lrt 7 | 8 | CC = g++ 9 | #CFLAGS = -Wall -Werror -DDEBUG -g # debug flags 10 | CFLAGS = -Wall -Werror -g -O2 # release flags 11 | CFLAGS += -MMD -MP 12 | LDFLAGS += -Llib $(LIBS) 13 | 14 | default: $(TARGET) 15 | all: $(TARGET) 16 | 17 | $(TARGET): $(OBJS) 18 | $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) 19 | 20 | %.o: %.c 21 | $(CC) $(CFLAGS) -c -o $@ $< 22 | 23 | %.o: %.cpp 24 | $(CC) $(CFLAGS) -c -o $@ $< 25 | 26 | DEPS = $(OBJS:%.o=%.d) 27 | -include $(DEPS) 28 | 29 | clean: 30 | -rm $(TARGET) $(OBJS) $(DEPS) 31 | -------------------------------------------------------------------------------- /Retry_Metastability/primary/config.js: -------------------------------------------------------------------------------- 1 | //Configure ip settings here 2 | let primary = ""//enter primary ip address 3 | let secondary1 = ""//enter secondary 1 address 4 | let secondary2 = ""//enter secondary2 address 5 | 6 | 7 | rsconf = { 8 | _id : "rsmongo", 9 | members: [ 10 | { 11 | "_id": 0, 12 | "host": `${primary}:27017`, 13 | "priority": 4 14 | }, 15 | { 16 | "_id": 1, 17 | "host": `${secondary1}:27017`, 18 | "priority": 2 19 | }, 20 | { 21 | "_id": 2, 22 | "host": `${secondary2}:27017`, 23 | "priority": 1 24 | } 25 | ] 26 | } 27 | 28 | rs.initiate(rsconf); 29 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/NGINX Web Server/www/html/bkup/index.nginx-debian.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Welcome to nginx! 5 | 12 | 13 | 14 |

Welcome to nginx!

15 |

If you see this page, the nginx web server is successfully installed and 16 | working. Further configuration is required.

17 | 18 |

For online documentation and support please refer to 19 | nginx.org.
20 | Commercial support is available at 21 | nginx.com.

22 | 23 |

Thank you for using nginx.

24 | 25 | 26 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/NGINX Web Server/www/html/bkup/index.php.save: -------------------------------------------------------------------------------- 1 | addServer("10.158.50.15", 11211); 4 | 5 | $request_type = $_GET['request_type']; 6 | $request_size = $_GET['request_size']; 7 | $arrival_time = $_GET['arrival_time']; 8 | $retry_policy = $_GET['retry_policy']; 9 | 10 | $zipf_index = 0; 11 | memcached_response = $mem_var->get($zipf_index) 12 | 13 | echo $request_type; 14 | echo $request_size; 15 | echo $arrival_time; 16 | echo $retry_policy; 17 | 18 | if ($response) { 19 | echo $response; 20 | } else { 21 | echo "Adding Keys (K) for Values (V), You can then grab Value (V) for your Key (K) \m/ (-_-) \m/ "; 22 | $mem_var->set("Bilbo", "Here s Your (Ring) Master stored in MemCached (^_^)") or die(" Keys Couldn't be Created : Bilbo Not Found :'( "); 23 | } 24 | ?> 25 | -------------------------------------------------------------------------------- /GC_Metastability/README.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | This directory contains an example program to demonstrate metastable failures due to Garbage Collection (GC). The mechanism is load-spike -> high queue length -> high GC behavior -> slowdown to requests processing -> high queue length. Please refer to Section 5.1 in the paper for more details. 3 | 4 | ## Dependencies 5 | ```bash 6 | sudo apt-get install docker.io 7 | sudo apt-get install python3-pip 8 | pip3 install pandas 9 | pip3 install matplotlib 10 | ``` 11 | 12 | ## Reference system set up 13 | * Instance type: AWS EC2 m5.large 14 | * OS: Ubuntu 20.04 15 | 16 | # Example Usage 17 | 18 | ```bash 19 | sudo docker build -t exp . 20 | ./run.sh 220 -1 1200 0 256m 21 | ./analyze.py job.csv gc.csv 22 | ./plot.py 23 | ``` 24 | 25 | The output plot is ./measurement_plots.png 26 | 27 | # Example Results 28 | ![plot](./example_result/GC_Metastability_Example.png) 29 | 30 | -------------------------------------------------------------------------------- /Retry_Metastability/client/experiment.sh: -------------------------------------------------------------------------------- 1 | source config.sh 2 | 3 | while getopts "p:c:n:i:r:s:d:t:o:f:" flag 4 | do 5 | case "${flag}" in 6 | p) primary=${OPTARG};; 7 | c) collection=${OPTARG};; 8 | n) n=${OPTARG};; 9 | i) requestInterval=${OPTARG};; 10 | o) timeOut=${OPTARG};; 11 | r) requestResends=${OPTARG};; 12 | s) untilTrigger=${OPTARG};; 13 | d) triggerDuration=${OPTARG};; 14 | t) triggerCpu=${OPTARG};; 15 | f) fileName=${OPTARG};; 16 | esac 17 | done 18 | 19 | 20 | 21 | mkdir -p $d 22 | 23 | ./mongoC3 -primary=$primary -collection=$collection -n=$n -interval=$requestInterval \ 24 | -timeout=$timeOut -resends=$requestResends 2>> $d/${fileName} & 25 | ssh -i $pem_file -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null ubuntu@$primary "bash primary/trigger.sh $untilTrigger $triggerDuration $triggerCpu 2" 26 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/trigger_size_k.py: -------------------------------------------------------------------------------- 1 | from pymemcache.client import base 2 | import sys 3 | 4 | args_len = len(sys.argv[1:]) 5 | 6 | memcached_host = '172.31.1.84' 7 | 8 | if(args_len != 1): 9 | print("enter valid parameter, 1 param for size") 10 | exit() 11 | 12 | print("Trigger started, size = " + str(sys.argv[1:][0])) 13 | 14 | trigger_size = int(sys.argv[1:][0]) 15 | client = base.Client(( memcached_host, 11211)) 16 | 17 | if(trigger_size > 0): 18 | for i in range(1, trigger_size +1): 19 | client.delete(str(i)) 20 | elif(trigger_size == -1): 21 | client.flush_all() 22 | print("flushed memcached") 23 | 24 | 25 | """ 26 | for i in range(1, trigger_size +1): 27 | result = client.get(str(i)) 28 | print(str(result)) 29 | 30 | test_key = "test_key" 31 | test_value = "test value" 32 | client.set(test_key, test_value) 33 | get_result = client.get(test_key) 34 | print("get: " + str(get_result)) 35 | client.delete(test_key) 36 | get_result_after_delete = client.get(test_key) 37 | print(str(get_result_after_delete)) 38 | """ 39 | 40 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Timothy Zhu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/setup_scripts/setup_client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo apt-get update; 3 | sudo apt -y install build-essential; 4 | sudo apt-get -y install python3-pip; 5 | sudo pip3 install matplotlib 6 | sudo pip3 install scipy 7 | sudo pip3 install pymemcache 8 | sudo pip3 install SciencePlots 9 | 10 | sudo sed -i "/string ngnix_server_ip =/c\string ngnix_server_ip =\"$1\";" ../LoadGenerator/TraceReplay.cpp 11 | sudo sed -i "/memcached_host =/c\memcached_host = \'$2\'" ../LoadGenerator/run_experiment.py 12 | sudo sed -i "/master_host =/c\master_host = \'$4\'" ../LoadGenerator/run_experiment.py 13 | 14 | sudo sed -i "/memcached_host =/c\memcached_host = \'$2\'" ../LoadGenerator/trigger_size_k.py 15 | sudo sed -i "/row_nums_in_db =/c\row_nums_in_db = $3" ../LoadGenerator/TraceFileGenerator.py 16 | mkdir ../LoadGenerator/traces 17 | mkdir ../LoadGenerator/results_warm_cache 18 | mkdir ../LoadGenerator/result_stats 19 | mkdir ../LoadGenerator/experiment_plots 20 | cd ../LoadGenerator && make 21 | sudo chmod 600 ../config_files/cache_workers.pem 22 | sudo cp ../config_files/cache_workers.pem ../LoadGenerator/cache_workers.pem 23 | 24 | ssh-keyscan $1 >> $HOME/.ssh/known_hosts 25 | ssh-keyscan $2 >> $HOME/.ssh/known_hosts 26 | ssh-keyscan $4 >> $HOME/.ssh/known_hosts -------------------------------------------------------------------------------- /LookasideCache_Metastability/setup_scripts/init_database.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE IF NOT EXISTS metastable_test_db; 2 | 3 | CREATE TABLE `metastable_test_db`.`large_test_table` ( 4 | `id` int(11) NOT NULL AUTO_INCREMENT, 5 | `tcol01` tinyint(4) DEFAULT NULL, 6 | `tcol02` smallint(6) DEFAULT NULL, 7 | `tcol03` mediumint(9) DEFAULT NULL, 8 | `tcol04` int(11) DEFAULT NULL, 9 | `tcol05` bigint(20) DEFAULT NULL, 10 | `tcol06` float DEFAULT NULL, 11 | `tcol07` double DEFAULT NULL, 12 | `tcol08` decimal(10,2) DEFAULT NULL, 13 | `tcol09` date DEFAULT NULL, 14 | `tcol10` datetime DEFAULT NULL, 15 | `tcol11` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, 16 | `tcol12` time DEFAULT NULL, 17 | `tcol13` year(4) DEFAULT NULL, 18 | `tcol14` varchar(100) DEFAULT NULL, 19 | `tcol15` char(2) DEFAULT NULL, 20 | `tcol16` blob, 21 | `tcol17` text, 22 | `tcol18` mediumtext, 23 | `tcol19` mediumblob, 24 | `tcol20` longblob, 25 | `tcol21` longtext, 26 | `tcol22` mediumtext, 27 | `tcol23` varchar(3) DEFAULT NULL, 28 | `tcol24` varbinary(10) DEFAULT NULL, 29 | `tcol25` enum('a','b','c') DEFAULT NULL, 30 | `tcol26` set('red','green','blue') DEFAULT NULL, 31 | `tcol27` float(5,3) DEFAULT NULL, 32 | `tcol28` double(4,2) DEFAULT NULL, 33 | PRIMARY KEY (`id`) 34 | ) ENGINE=InnoDB; 35 | -------------------------------------------------------------------------------- /GC_Metastability/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rps=$1 4 | trigger_duration=$2 # in ms 5 | experiment_duration=$3 # in second 6 | auto_detection_enabled=$4 # positive integer indicates "true" 7 | maxheapsize=$5 #e.g., 256m 8 | 9 | image_name="exp" 10 | container_name="exp_container" 11 | 12 | # Remove existing containers 13 | sudo docker stop ${container_name} 14 | sudo docker rm ${container_name} 15 | sudo docker volume rm -f $(sudo docker volume ls -qf dangling=true) 16 | sudo docker image prune -f 17 | 18 | sudo docker create -it -m=1g --name ${container_name} ${image_name} /bin/bash 19 | sudo docker start ${container_name} 20 | sudo docker cp GCMetastability.java ${container_name}:/gc_artifacts/GCMetastability.java 21 | sleep 2 22 | 23 | sudo docker exec ${container_name} /bin/bash -c " 24 | javac GCMetastability.java && java -XX:MaxHeapSize=${maxheapsize} -XX:+CrashOnOutOfMemoryError -XX:+PrintGC -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCApplicationStoppedTime -Xloggc:gc.log GCMetastability ${rps} ${trigger_duration} ${experiment_duration} ${auto_detection_enabled} & 25 | sleep 2; 26 | vmid=\$(jps | grep GCMetastability | awk '{print \$1}'); 27 | jstat -gcutil -t \${vmid} 100 > gc.csv; 28 | exit 29 | " 30 | 31 | sudo docker cp ${container_name}:/gc_artifacts/job.csv . 32 | sudo docker cp ${container_name}:/gc_artifacts/gc.log . 33 | sudo docker cp ${container_name}:/gc_artifacts/gc.csv . 34 | sudo docker cp ${container_name}:/gc_artifacts/exp_record.csv . 35 | 36 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/NGINX Web Server/www/html/util/memcached_throughput_test.php: -------------------------------------------------------------------------------- 1 | addServer($memcached_server , 11211); 7 | echo "throughput test code\r\n"; 8 | $iter = 10; 9 | $i = 0; 10 | $total_completed_jobs = 0; 11 | $experiment_time_period = 30; 12 | $total_num_db_entries = 34511000; 13 | 14 | while($i < $iter){ 15 | 16 | $x = 0; 17 | $start = microtime(true);//hrtime(true); 18 | 19 | while(1) { 20 | $idx = rand(1, $total_num_db_entries); 21 | $response = $mem_var->get($idx); 22 | #echo $response; 23 | if($response){ 24 | $x++; 25 | // echo "key found!! \r\n"; 26 | } 27 | else{ 28 | //echo "key= " . $idx . " not found\r\n"; 29 | $x++; 30 | } 31 | 32 | $end = microtime(true); 33 | $elapsed_time = ($end - $start); //1000000000; // time in seconds 34 | //echo $elapsed_time . "\r\n"; 35 | if($elapsed_time >= $experiment_time_period){ 36 | break; 37 | } 38 | } 39 | $total_completed_jobs+=$x; 40 | echo "current iteration: " . $i . "\r\n"; 41 | $i++; 42 | } 43 | 44 | 45 | echo "throughput[memcached]: " . (($total_completed_jobs/$iter)/$experiment_time_period) . "\r\n"; 46 | 47 | 48 | ?> 49 | 50 | 51 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/curl/stdcheaders.h: -------------------------------------------------------------------------------- 1 | #ifndef __STDC_HEADERS_H 2 | #define __STDC_HEADERS_H 3 | /*************************************************************************** 4 | * _ _ ____ _ 5 | * Project ___| | | | _ \| | 6 | * / __| | | | |_) | | 7 | * | (__| |_| | _ <| |___ 8 | * \___|\___/|_| \_\_____| 9 | * 10 | * Copyright (C) 1998 - 2010, Daniel Stenberg, , et al. 11 | * 12 | * This software is licensed as described in the file COPYING, which 13 | * you should have received as part of this distribution. The terms 14 | * are also available at http://curl.haxx.se/docs/copyright.html. 15 | * 16 | * You may opt to use, copy, modify, merge, publish, distribute and/or sell 17 | * copies of the Software, and permit persons to whom the Software is 18 | * furnished to do so, under the terms of the COPYING file. 19 | * 20 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 21 | * KIND, either express or implied. 22 | * 23 | ***************************************************************************/ 24 | 25 | #include 26 | 27 | size_t fread (void *, size_t, size_t, FILE *); 28 | size_t fwrite (const void *, size_t, size_t, FILE *); 29 | 30 | int strcasecmp(const char *, const char *); 31 | int strncasecmp(const char *, const char *, size_t); 32 | 33 | #endif /* __STDC_HEADERS_H */ 34 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/NGINX Web Server/www/html/util/db_connect_test.php: -------------------------------------------------------------------------------- 1 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/lookaside_cache_vm.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEAgFS/KzmG4Vac4ZcaPXQrUIKo6Ry9qeyJQfHHNKQqSEOFgQBC 3 | 9ELI59uUw4vLKPU19XRuu3Rya6Ik1OoJKBhe77eY3PWgPlXI0p/8u2Obzrccz+ms 4 | 9aWWnV480XdyoW2oYUuCmhHfCNbbtpx5ff7qjGsQCfuCHGBbLg+FhdNSp4mFDHB9 5 | +DxahLYNci1+YcOo1g56zyasNEDkOEq0Nw6nX9nXiM04ctFgHEyeL4klZi9IJ9yo 6 | I9e/zWz+sbt3v3kBtY96MlXtiBJ3IHk830hdQAfWNTnIgMgGvE+P/v2ptn66gVzt 7 | gzIwL/V/knG5txqzbQ1STfAiq+tVsN521ZexRwIDAQABAoIBAFCsde0HYfpgh4jt 8 | g4pkcCJRsTuyXKzozLSO+KroFMiO4ethrbV+EmxnRpLTW6jo1b3zLvVa099D9CGA 9 | 6raw82WBJnCpXM4zAWcfOjLXAIin5k3x2VAng5J0PolzrLU8iWB4pqaSW4JkB4U5 10 | wsppKSuS1mdgG8AY9tjy2v5xBYHZ9mG+0eSWPE5D9iOdhF8U4PC1RZZ6S723cVPs 11 | BDVO/IKMYYJ+GAEHtfyZS9KmkOn4+AsGdQk4q0WDgA0bvtvRw+j7RYqb0CHQsmg0 12 | IXGcapP5ISq99+njeJXBYp3BOisOsGUGVmBA9OSSCoILfyINsAl+HkA1q8sjapTW 13 | Gx7ZCikCgYEAwJyraiGHbQD2bgIMotwTF+6YE/yc6ObtRNGSNLjP+tUBQACqUHlq 14 | rY3ORBactcMiGt+iAnCiXWJ99pjk3e7EVjTeI+MCCIVCOlgLDkWc1gmwMQDkqwhp 15 | FP5zxuS5diEk2IlcKUkMZr8wvz6NOEydjhBoozteWH61d9XZC2X9jkMCgYEAqpB7 16 | 41gYP7m+igJqF7+RtVzdGCbaZ0uKy1FWSfwNouIE85OljAjv8lBcT3jhR2ZLUge+ 17 | 9/J3XXhub0jtfhLXSDvH+6VZO9mxIuJ5nhUoGAou4Ed+rL3J1h0qRN+PWELodd67 18 | veRD8PZ9HNABgQPO0AcF7jiEbiCdJaEBYYmpWq0CgYBI7kmdiwqLRJUKvG3qcKgu 19 | JKKi1sDXEzF+IFha7CucD3FEubvDcuXox0v1zhYSYMuWH11pzRo9f+CwMd5fBlyD 20 | Q8tlDTyk20hjbRjqk4M6LDrxoyPyeuHHGD3gbpSm3Q/licVCwK0YboZXs6inS6NQ 21 | unfUbtNfcMLnmCpxvXGPnwKBgBVZ+OhAwuhXTC7iln1akdbuRQ8c7gI4RqulBvRE 22 | BUD6ojwLo5GgHMlsneGk9C6H863VImbPh3m/9bMgofpCSYjVUveBf2Yqni4Lw8Wy 23 | SE6aF7wjtwvQxk9zXzM3+S3HUmpO85mV1Zt4f7j3oPiRjXYlKuzjTlCq7IElYpLf 24 | NMARAoGAJoDH8tu18u8r+YKkBi886Sp0CPBEO40z8m3U75YQVcGrkqzrSdLH0yby 25 | Ip8GG4e3s1bcXKIQCvxmurdZ5WUvWlS7xMNNM1pveEkQC1qQHJNjme5sf3TM7h/4 26 | ev49gPjAZ+JizWRkl627UH9yslExZC4PZCKURrFP5hlF0xq+nxE= 27 | -----END RSA PRIVATE KEY----- -------------------------------------------------------------------------------- /LookasideCache_Metastability/Memcached codes/warm_up_cache.py: -------------------------------------------------------------------------------- 1 | from pymemcache.client import base 2 | import sys 3 | 4 | print("Cache warmup started") 5 | client = base.Client(('127.0.0.1', 11211)) 6 | warm_up_size = WARM_UP_SIZE 7 | 8 | dummy_string = "10768620+290195+5+5+203+47+515796+469964+290195+6430632+2552315418688618546+5785091512271041250+0+2.34537+4.087147+1.984295+1.49+2.20+2021-11-02+2021-11-01+2021-04-03 11:11:51+2021-08-09 17:05:21+2021-11-23 18:02:40+2021-11-23 18:02:40+13:07:07+04:58:25+2020+2020+quae possimus laboriosam similique soluta reprehenderit est.+magnam eos soluta excepturi.+Lo+Ta+excepturi quae asperiores provident at qui et ex rem omnis.+assumenda consequatur possimus sit voluptate est corporis nulla!+et unde minus distinctio!+excepturi excepturi aspernatur quisquam vitae eaque.+praesentium et quasi assumenda.+atque perferendis quasi vel assumenda!+occaecati quaerat sapiente.+eum pariatur dicta velit quia quia.+est rerum a beatae ullam numquam facere velit.+quia beatae perspiciatis id unde quasi.+et suscipit tempore nihil.+autem sunt sed voluptas qui fugit eum.+ratione placeat consequatur maiores nobis quasi.+dolorum at est et sint voluptas.+Mat+Pet+Michael+Pamela+a+a+blue+blue+0.361+0.000+0.29+0.00+290195+5+47+469964+6430632+5785091512271041250+2.34537+1.984295+2.20+2021-11-01+2021-08-09 17:05:21+2021-11-23 18:02:40+04:58:25+2020+magnam eos soluta excepturi.+Ta+assumenda consequatur possimus sit voluptate est corporis nulla!+excepturi excepturi aspernatur quisquam vitae eaque.+atque perferendis quasi vel assumenda!+eum pariatur dicta velit quia quia.+quia beatae perspiciatis id unde quasi.+autem sunt sed voluptas qui fugit eum.+dolorum at est et sint voluptas.+Pet+Pamela+a+blue+0.000+0.00" 9 | 10 | for i in range(warm_up_size, 1, -1): 11 | result = client.set(str(i), dummy_string) 12 | 13 | print("Cache warmup completed.") -------------------------------------------------------------------------------- /LookasideCache_Metastability/setup_scripts/setup_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo apt-get update 3 | sudo apt-get -y upgrade 4 | sudo apt-get -y install nginx 5 | 6 | sudo add-apt-repository ppa:ondrej/php -y 7 | sudo yes | sudo apt-get install php7.2-cli php7.2-fpm php7.2-curl php7.2-gd php7.2-mysql php7.2-mbstring zip unzip php7.2-memcache; 8 | sudo cp ../config_files/default /etc/nginx/sites-available/default; 9 | 10 | 11 | sudo sed -i "s/DATABASE_SERVER_IP/$1/" ../NGINX\ Web\ Server/www/html/index.php 12 | sudo sed -i "s/MEMCACHED_SERVER_IP/$2/" ../NGINX\ Web\ Server/www/html/index.php 13 | sudo sed -i "s/DATABASE_QUERY_WEIGHT/$3/" ../NGINX\ Web\ Server/www/html/index.php 14 | 15 | 16 | sudo sed -i "s/DATABASE_SERVER_IP/$1/" ../NGINX\ Web\ Server/www/html/util/db_connect_test.php 17 | sudo sed -i "s/MEMCACHED_SERVER_IP/$2/" ../NGINX\ Web\ Server/www/html/util/memcached_throughput_test.php 18 | 19 | #copy all server codes to appropriate folder 20 | sudo rm -r /var/www/html/* 21 | sudo cp -r ../NGINX\ Web\ Server/www/html/* /var/www/html/ 22 | 23 | # set execution time 24 | # set default execution time 1s 25 | #set default request_termination time 1s 26 | ##/etc/php/7.2/fpm/php.ini max_execution_time 27 | #/etc/php/7.2/fpm/pool.d/www.conf 28 | sudo chmod 777 /etc/php/7.2/fpm/php.ini 29 | sudo chmod 777 /etc/php/7.2/fpm/pool.d/www.conf 30 | 31 | sudo sed -i 's/max_execution-time/;max_execution_time/' /etc/php/7.2/fpm/php.ini 32 | echo "max_execution-time = 1s" >> /etc/php/7.2/fpm/php.ini 33 | 34 | sudo sed -i 's/request_terminate_timeout/;request_terminate_timeout/' /etc/php/7.2/fpm/pool.d/www.conf 35 | echo "request_terminate_timeout= 1s" >> /etc/php/7.2/fpm/pool.d/www.conf 36 | 37 | sudo service nginx reload 38 | sudo service php7.2-fpm restart 39 | 40 | ssh-keyscan $1 >> $HOME/.ssh/known_hosts 41 | ssh-keyscan $2 >> $HOME/.ssh/known_hosts 42 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/config_files/deb_conf.dat: -------------------------------------------------------------------------------- 1 | Name: mysql-apt-config/dmr-warning 2 | Template: mysql-apt-config/dmr-warning 3 | Owners: mysql-apt-config 4 | 5 | Name: mysql-apt-config/preview-component 6 | Template: mysql-apt-config/preview-component 7 | Value: 8 | Owners: mysql-apt-config 9 | 10 | Name: mysql-apt-config/repo-codename 11 | Template: mysql-apt-config/repo-codename 12 | Value: bionic 13 | Owners: mysql-apt-config 14 | 15 | Name: mysql-apt-config/repo-distro 16 | Template: mysql-apt-config/repo-distro 17 | Value: ubuntu 18 | Owners: mysql-apt-config 19 | 20 | Name: mysql-apt-config/repo-url 21 | Template: mysql-apt-config/repo-url 22 | Owners: mysql-apt-config 23 | 24 | Name: mysql-apt-config/select-preview 25 | Template: mysql-apt-config/select-preview 26 | Value: Disabled 27 | Owners: mysql-apt-config 28 | 29 | Name: mysql-apt-config/select-product 30 | Template: mysql-apt-config/select-product 31 | Value: Ok 32 | Owners: mysql-apt-config 33 | Flags: seen 34 | Variables: 35 | selected_preview = Disabled 36 | selected_server = mysql-5.7 37 | selected_tools = Enabled 38 | 39 | Name: mysql-apt-config/select-server 40 | Template: mysql-apt-config/select-server 41 | Value: mysql-5.7 42 | Owners: mysql-apt-config 43 | Flags: seen 44 | Variables: 45 | installed_server = mysql-5.7 46 | server_versions = mysql-5.7, mysql-8.0 47 | 48 | Name: mysql-apt-config/select-tools 49 | Template: mysql-apt-config/select-tools 50 | Value: Enabled 51 | Owners: mysql-apt-config 52 | 53 | Name: mysql-apt-config/tools-component 54 | Template: mysql-apt-config/tools-component 55 | Value: mysql-tools 56 | Owners: mysql-apt-config 57 | 58 | Name: mysql-apt-config/unsupported-platform 59 | Template: mysql-apt-config/unsupported-platform 60 | Value: ubuntu bionic 61 | Owners: mysql-apt-config 62 | Flags: seen 63 | Variables: 64 | platform = ubuntu focal 65 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/NGINX Web Server/www/html/util/database_throughtput_test.php: -------------------------------------------------------------------------------- 1 | free_result(); 39 | $dbh -> close(); 40 | 41 | if($response){ 42 | $x++; 43 | } 44 | 45 | $end = microtime(true); 46 | $elapsed_time = ($end - $start); //1000000000; // time in seconds 47 | //echo $elapsed_time . "\r\n"; 48 | if($elapsed_time >= $experiment_time_period){ 49 | break; 50 | } 51 | } 52 | $total_completed_jobs+=$x; 53 | echo "current iteration: " . $i . "\r\n"; 54 | $i++; 55 | } 56 | 57 | 58 | echo "throughput [database]: " . (($total_completed_jobs/$iter)/$experiment_time_period) . "\r\n"; 59 | 60 | ?> 61 | 62 | 63 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/TraceFileGenerator.py: -------------------------------------------------------------------------------- 1 | # import exponential 2 | import numpy as np 3 | from scipy.stats import zipf 4 | import math 5 | import random 6 | import sys 7 | import os.path 8 | import numpy as np 9 | import collections 10 | 11 | row_nums_in_db = 2827946 12 | print("TraceFileGenerator: parameters: " + str(sys.argv[1:])) 13 | 14 | # command line arguments 15 | arrival_rate_parameter = float(sys.argv[1:][0]) 16 | alpha= float(sys.argv[1:][1]) 17 | duration_of_test = int(sys.argv[1:][2]) 18 | 19 | 20 | number_of_arrivals = int(arrival_rate_parameter * duration_of_test) 21 | request_types = np.zeros(number_of_arrivals).astype(int) 22 | job_size_rate_parameter = 100 23 | 24 | f_name = "traces/trace_file_" + str(arrival_rate_parameter) + "_" + str(alpha) +".txt" 25 | f = open(f_name, "w+") 26 | 27 | scale_arrival = 1/arrival_rate_parameter 28 | interarrival_times = np.random.exponential(scale_arrival, number_of_arrivals) 29 | arrival_times = np.cumsum(interarrival_times) 30 | 31 | scale_jobsize = 1/job_size_rate_parameter 32 | job_sizes = np.random.exponential(scale_jobsize, number_of_arrivals).astype(int) 33 | #print(job_sizes) 34 | 35 | # zipf distribution for generating load 36 | zipf_rand_num_iter = 0 37 | job_types = np.zeros(number_of_arrivals) 38 | 39 | while(zipf_rand_num_iter!= number_of_arrivals): 40 | new_zipf_value = np.random.zipf(alpha) 41 | if(new_zipf_value>0 and new_zipf_value<=row_nums_in_db): 42 | job_types[zipf_rand_num_iter] = new_zipf_value 43 | zipf_rand_num_iter+=1 44 | 45 | # job_types_unscaled = np.random.zipf(alpha, number_of_arrivals) #zipf.rvs(alpha, size = number_of_arrivals) 46 | job_types = job_types.astype(int) #(job_types_unscaled/float(max(job_types_unscaled)))*1000 47 | #print(min(job_types)) 48 | #print(max(job_types)) 49 | 50 | 51 | for i in range(0, number_of_arrivals): 52 | string = str(arrival_times[i]) + " " + str(request_types[i]) + " " + str(job_sizes[i]) + " " + str(job_types[i]) + "\n" 53 | f.write(string) 54 | f.close() 55 | 56 | 57 | counter=collections.Counter(job_types) 58 | iter = 0 59 | sum = 0 60 | 61 | for i in counter.keys(): 62 | if(iter == 2000000): 63 | break 64 | #print(str(iter+1) + " " + str(counter[iter+1]/number_of_arrivals)) 65 | sum+= counter[iter+1] 66 | iter+=1 67 | print(sum/number_of_arrivals) 68 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/NGINX Web Server/www/html/util/memcache_load_data.php: -------------------------------------------------------------------------------- 1 | addServer("10.158.50.15", 11211); 6 | $servername = "10.158.61.204"; 7 | $username = "webserver"; 8 | $password = "DARK@mark37"; 9 | $total_num_db_entries = 34511000; 10 | 11 | $dbh =mysqli_connect($servername, $username, $password); // $backoff->run(function() { 12 | // return mysqli_connect($servername, $username, $password); 13 | // }); 14 | //connect to MySQL server 15 | if (!$dbh) 16 | die("Unable to connect to MySQL: " . mysqli_error()); 17 | 18 | 19 | for($idx= 1653049 ; $idx< 3451100; $idx++){ 20 | $memcached_response = $mem_var->get($idx); 21 | echo "KEY: " . $idx . "\r\n"; 22 | 23 | if ($memcached_response) { 24 | echo "Data was found in memcached\r\n"; 25 | //echo $memcached_response; 26 | } else { 27 | 28 | //echo "inside else block". "\r\n"; 29 | //if connection failed output error message 30 | if (!mysqli_select_db($dbh,'metastable_test_db')) 31 | die("Unable to select database: " . mysqli_error()); 32 | 33 | $sql_stmt = "SELECT * FROM large_test_table where id=" . $idx; 34 | //echo $sql_stmt . "\r\n"; 35 | //SQL select query 36 | $result = mysqli_query($dbh,$sql_stmt); 37 | if (!$result) 38 | die("Database access failed: " . mysqli_error()); 39 | //output error message if query execution failed 40 | //echo "result was fetched.". "\r\n"; 41 | $rows = mysqli_num_rows($result); 42 | //echo "rows was fetched: ". $rows. "\r\n"; 43 | // get number of rows returned 44 | $row = mysqli_fetch_array($result); 45 | $stringified_data = implode("+",$row); 46 | //echo $stringified_data . "\r\n"; 47 | $mem_var->set($idx, $stringified_data) or die(" Key could not be created \r\n"); 48 | //echo "line after mem_set". "\r\n"; 49 | // Free result set 50 | $result -> free_result(); 51 | // echo "line before close". "\r\n"; 52 | //echo "reached here". "\r\n"; 53 | 54 | } 55 | 56 | //echo "reached the end of inner loop". "\r\n"; 57 | } 58 | 59 | $mysqli -> close(); 60 | 61 | ?> 62 | 63 | 64 | -------------------------------------------------------------------------------- /GC_Metastability/plot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import pandas as pd 3 | import sys 4 | from matplotlib import pyplot as plt 5 | from mpl_toolkits.axes_grid1 import host_subplot 6 | from mpl_toolkits import axisartist 7 | 8 | def plot_all_in_one(df: pd.DataFrame): 9 | FS=16 10 | fig = plt.gcf() 11 | fig.set_size_inches(10, 3.5) 12 | plt.style.use('seaborn-colorblind') 13 | 14 | host = host_subplot(111, axes_class=axisartist.Axes) 15 | plt.subplots_adjust(right=0.75) 16 | par1 = host.twinx() 17 | par2 = host.twinx() 18 | 19 | par2.axis['right'] = par2.new_fixed_axis(loc='right', offset=(55, 0)) 20 | 21 | par1.axis['right'].toggle(all=True) 22 | par2.axis['right'].toggle(all=True) 23 | 24 | p1, = host.plot(df['timestamp'], df['rps'], label='Requests per second') 25 | p2, = par1.plot(df['timestamp'], df['GCT'], label='GC duration (ms)') 26 | p3, = par2.plot(df['timestamp'], df['qlen'], label='Queue length') 27 | 28 | host.set_xlabel('Time (s)') 29 | host.set_ylabel('Requests per second') 30 | par1.set_ylabel('GC duration (ms)') 31 | par2.set_ylabel('Queue length') 32 | 33 | host.set_xlim(left=0, right=1200) 34 | par1.set_xlim(left=0, right=1200) 35 | par2.set_xlim(left=0, right=1200) 36 | #host.legend() 37 | 38 | host.axis['bottom'].label.set_fontsize(FS) 39 | host.axis['left'].label.set_fontsize(FS) 40 | par1.axis['right'].label.set_fontsize(FS) 41 | par2.axis['right'].label.set_fontsize(FS) 42 | 43 | host.axis['left'].label.set_color(p1.get_color()) 44 | par1.axis['right'].label.set_color(p2.get_color()) 45 | par2.axis['right'].label.set_color(p3.get_color()) 46 | plt.savefig(f'./measurement_plots.png', bbox_inches='tight') 47 | 48 | def main(): 49 | measurements_path = '' 50 | if len(sys.argv) == 2: 51 | measurements_path = sys.argv[1] 52 | else: 53 | measurements_path = 'measurement.csv' # default input file 54 | 55 | df = pd.read_csv(f'./{measurements_path}') 56 | df = df[df['timestamp']>30] #ignore warming up part 57 | df = df.groupby(df['timestamp']//48).mean() 58 | base = df['timestamp'].iloc[0] 59 | df['timestamp'] = df['timestamp'] - base 60 | plot_all_in_one(df) 61 | 62 | if __name__=="__main__": 63 | main() 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/TraceReader.cpp: -------------------------------------------------------------------------------- 1 | // TraceReader.cpp - Code for reading trace files. 2 | // 3 | // Copyright (c) 2018 Timothy Zhu. 4 | // Licensed under the MIT License. See LICENSE file for details. 5 | // 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include "TraceReader.hpp" 14 | #include "time.hpp" 15 | 16 | using namespace std; 17 | 18 | TraceReader::TraceReader(string filename) 19 | : _curIndex(0) 20 | { 21 | double timestamp; // in microseconds 22 | char requestType[32]; 23 | unsigned long requestSize; // in bytes 24 | unsigned long long index; 25 | ifstream file(filename.c_str()); 26 | pthread_mutex_init(&_mutex, NULL); 27 | if (file.is_open()) { 28 | string line; 29 | while (getline(file, line)) { 30 | // Parse line 31 | //cout<<"line = "<< line< 11 | #include 12 | #include 13 | #include 14 | 15 | using namespace std; 16 | 17 | #define NS_PER_SEC 1000000000ull 18 | 19 | // Converts seconds to nanoseconds 20 | inline uint64_t ConvertSecondsToTime(double sec) 21 | { 22 | return (uint64_t)(sec * (double)NS_PER_SEC); 23 | } 24 | 25 | // Convert nanoseconds to seconds 26 | inline double ConvertTimeToSeconds(uint64_t t) 27 | { 28 | return (double)t / (double)NS_PER_SEC; 29 | } 30 | 31 | // Converts timespec to nanoseconds 32 | inline uint64_t ConvertTimespecToTime(struct timespec* t) 33 | { 34 | return (uint64_t)t->tv_sec * NS_PER_SEC + (uint64_t)t->tv_nsec; 35 | } 36 | 37 | // Converts nanoseconds to timespec 38 | inline void ConvertTimeToTimespec(uint64_t t, struct timespec* out) 39 | { 40 | out->tv_sec = t / NS_PER_SEC; 41 | out->tv_nsec = t % NS_PER_SEC; 42 | } 43 | 44 | // Gets monotonic time 45 | inline uint64_t GetTime() 46 | { 47 | struct timespec now; 48 | clock_gettime(CLOCK_MONOTONIC, &now); 49 | return ConvertTimespecToTime(&now); 50 | } 51 | 52 | // Sleeps until specified time 53 | inline void AbsoluteSleep(uint64_t t) 54 | { 55 | struct timespec request; 56 | ConvertTimeToTimespec(t, &request); 57 | //cout<<"sleep, t= "<< t<<" nano seconds, tv_sec = "<< request.tv_sec <<" seconds" <pconnect($memcached_server,11211); 17 | 18 | $memcached_response = $mem_var->get($request_index) ; 19 | if ($memcached_response) { 20 | #echo "Data was found in memcached\r\n"; 21 | //echo $memcached_response; 22 | echo 1; 23 | 24 | } else 25 | { 26 | 27 | $current_index = $request_index; 28 | $dbh =mysqli_connect($servername, $username, $password); 29 | 30 | if (!$dbh){ 31 | // die("Unable to connect to MySQL: " . mysqli_error($dbh)); 32 | echo -94; 33 | exit(); 34 | } 35 | if (!mysqli_select_db($dbh,'metastable_test_db')) { 36 | echo -94; 37 | exit(); 38 | //die("Unable to select database: " . mysqli_error($dbh)); 39 | } 40 | 41 | for ($y = 0; $y < 100 ; $y++){ 42 | 43 | $sql_stmt2 = "SELECT L2.id from large_test_table L1 join large_test_table L2 on L1.tcol04 = L2.id where L1.id = " . $current_index; 44 | $result = mysqli_query($dbh,$sql_stmt2); 45 | 46 | if (!$result){ 47 | echo -95 ; 48 | exit(); 49 | } 50 | 51 | $rows = mysqli_num_rows($result); 52 | 53 | if(!$rows){ 54 | echo -96; 55 | exit(); 56 | } 57 | 58 | $data_array = mysqli_fetch_array($result); 59 | $current_index = $data_array[0]; 60 | } 61 | 62 | 63 | $sql_stmt3 = "SELECT * from large_test_table L1 join large_test_table L2 on L1.tcol04 = L2.id where L1.id = " . $current_index; 64 | $result = mysqli_query($dbh,$sql_stmt3); 65 | $data_array = mysqli_fetch_array($result); 66 | $result -> free_result(); 67 | $dbh -> close(); 68 | 69 | 70 | $stringified_data = implode("+",$data_array); 71 | #echo $stringified_data . "\n"; 72 | $mem_set_result = $mem_var->set($request_index, $stringified_data); 73 | if(!$mem_set_result){ 74 | echo -97; 75 | exit(); 76 | } 77 | 78 | echo 2; 79 | 80 | } 81 | 82 | $mem_var->close(); 83 | 84 | 85 | ?> 86 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/config_files/default: -------------------------------------------------------------------------------- 1 | ## 2 | # You should look at the following URL's in order to grasp a solid understanding 3 | # of Nginx configuration files in order to fully unleash the power of Nginx. 4 | # https://www.nginx.com/resources/wiki/start/ 5 | # https://www.nginx.com/resources/wiki/start/topics/tutorials/config_pitfalls/ 6 | # https://wiki.debian.org/Nginx/DirectoryStructure 7 | # 8 | # In most cases, administrators will remove this file from sites-enabled/ and 9 | # leave it as reference inside of sites-available where it will continue to be 10 | # updated by the nginx packaging team. 11 | # 12 | # This file will automatically load configuration files provided by other 13 | # applications, such as Drupal or Wordpress. These applications will be made 14 | # available underneath a path with that package name, such as /drupal8. 15 | # 16 | # Please see /usr/share/doc/nginx-doc/examples/ for more detailed examples. 17 | ## 18 | 19 | # Default server configuration 20 | # 21 | server { 22 | listen 80 default_server; 23 | listen [::]:80 default_server; 24 | 25 | # SSL configuration 26 | # 27 | # listen 443 ssl default_server; 28 | # listen [::]:443 ssl default_server; 29 | # 30 | # Note: You should disable gzip for SSL traffic. 31 | # See: https://bugs.debian.org/773332 32 | # 33 | # Read up on ssl_ciphers to ensure a secure configuration. 34 | # See: https://bugs.debian.org/765782 35 | # 36 | # Self signed certs generated by the ssl-cert package 37 | # Don't use them in a production server! 38 | # 39 | # include snippets/snakeoil.conf; 40 | 41 | root /var/www/html; 42 | 43 | # Add index.php to the list if you are using PHP 44 | index index.html index.htm index.nginx-debian.html; 45 | 46 | server_name _; 47 | 48 | location / { 49 | # First attempt to serve request as file, then 50 | # as directory, then fall back to displaying a 404. 51 | try_files $uri $uri/ =404; 52 | proxy_http_version 1.1; 53 | proxy_set_header Connection ""; 54 | } 55 | 56 | # pass PHP scripts to FastCGI server 57 | 58 | location ~ \.php$ { 59 | include snippets/fastcgi-php.conf; 60 | 61 | # With php-fpm (or other unix sockets): 62 | fastcgi_pass unix:/var/run/php/php7.2-fpm.sock; 63 | proxy_http_version 1.1; 64 | proxy_set_header Connection ""; 65 | 66 | # With php-cgi (or other tcp sockets): 67 | # fastcgi_pass 127.0.0.1:9000; 68 | } 69 | 70 | # deny access to .htaccess files, if Apache's document root 71 | # concurs with nginx's one 72 | # 73 | #location ~ /\.ht { 74 | # deny all; 75 | #} 76 | } 77 | 78 | 79 | # Virtual Host configuration for example.com 80 | # 81 | # You can move that to a different file under sites-available/ and symlink that 82 | # to sites-enabled/ to enable it. 83 | # 84 | #server { 85 | # listen 80; 86 | # listen [::]:80; 87 | # 88 | # server_name example.com; 89 | # 90 | # root /var/www/example.com; 91 | # index index.html; 92 | # 93 | # location / { 94 | # try_files $uri $uri/ =404; 95 | # } 96 | #} 97 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/curl/curlver.h: -------------------------------------------------------------------------------- 1 | #ifndef __CURL_CURLVER_H 2 | #define __CURL_CURLVER_H 3 | /*************************************************************************** 4 | * _ _ ____ _ 5 | * Project ___| | | | _ \| | 6 | * / __| | | | |_) | | 7 | * | (__| |_| | _ <| |___ 8 | * \___|\___/|_| \_\_____| 9 | * 10 | * Copyright (C) 1998 - 2014, Daniel Stenberg, , et al. 11 | * 12 | * This software is licensed as described in the file COPYING, which 13 | * you should have received as part of this distribution. The terms 14 | * are also available at http://curl.haxx.se/docs/copyright.html. 15 | * 16 | * You may opt to use, copy, modify, merge, publish, distribute and/or sell 17 | * copies of the Software, and permit persons to whom the Software is 18 | * furnished to do so, under the terms of the COPYING file. 19 | * 20 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 21 | * KIND, either express or implied. 22 | * 23 | ***************************************************************************/ 24 | 25 | /* This header file contains nothing but libcurl version info, generated by 26 | a script at release-time. This was made its own header file in 7.11.2 */ 27 | 28 | /* This is the global package copyright */ 29 | #define LIBCURL_COPYRIGHT "1996 - 2014 Daniel Stenberg, ." 30 | 31 | /* This is the version number of the libcurl package from which this header 32 | file origins: */ 33 | #define LIBCURL_VERSION "7.35.0" 34 | 35 | /* The numeric version number is also available "in parts" by using these 36 | defines: */ 37 | #define LIBCURL_VERSION_MAJOR 7 38 | #define LIBCURL_VERSION_MINOR 35 39 | #define LIBCURL_VERSION_PATCH 0 40 | 41 | /* This is the numeric version of the libcurl version number, meant for easier 42 | parsing and comparions by programs. The LIBCURL_VERSION_NUM define will 43 | always follow this syntax: 44 | 45 | 0xXXYYZZ 46 | 47 | Where XX, YY and ZZ are the main version, release and patch numbers in 48 | hexadecimal (using 8 bits each). All three numbers are always represented 49 | using two digits. 1.2 would appear as "0x010200" while version 9.11.7 50 | appears as "0x090b07". 51 | 52 | This 6-digit (24 bits) hexadecimal number does not show pre-release number, 53 | and it is always a greater number in a more recent release. It makes 54 | comparisons with greater than and less than work. 55 | */ 56 | #define LIBCURL_VERSION_NUM 0x072300 57 | 58 | /* 59 | * This is the date and time when the full source package was created. The 60 | * timestamp is not stored in git, as the timestamp is properly set in the 61 | * tarballs by the maketgz script. 62 | * 63 | * The format of the date should follow this template: 64 | * 65 | * "Mon Feb 12 11:35:33 UTC 2007" 66 | */ 67 | #define LIBCURL_TIMESTAMP "Wed Jan 29 07:09:27 UTC 2014" 68 | 69 | #endif /* __CURL_CURLVER_H */ 70 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/curl/mprintf.h: -------------------------------------------------------------------------------- 1 | #ifndef __CURL_MPRINTF_H 2 | #define __CURL_MPRINTF_H 3 | /*************************************************************************** 4 | * _ _ ____ _ 5 | * Project ___| | | | _ \| | 6 | * / __| | | | |_) | | 7 | * | (__| |_| | _ <| |___ 8 | * \___|\___/|_| \_\_____| 9 | * 10 | * Copyright (C) 1998 - 2013, Daniel Stenberg, , et al. 11 | * 12 | * This software is licensed as described in the file COPYING, which 13 | * you should have received as part of this distribution. The terms 14 | * are also available at http://curl.haxx.se/docs/copyright.html. 15 | * 16 | * You may opt to use, copy, modify, merge, publish, distribute and/or sell 17 | * copies of the Software, and permit persons to whom the Software is 18 | * furnished to do so, under the terms of the COPYING file. 19 | * 20 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 21 | * KIND, either express or implied. 22 | * 23 | ***************************************************************************/ 24 | 25 | #include 26 | #include /* needed for FILE */ 27 | 28 | #include "curl.h" 29 | 30 | #ifdef __cplusplus 31 | extern "C" { 32 | #endif 33 | 34 | CURL_EXTERN int curl_mprintf(const char *format, ...); 35 | CURL_EXTERN int curl_mfprintf(FILE *fd, const char *format, ...); 36 | CURL_EXTERN int curl_msprintf(char *buffer, const char *format, ...); 37 | CURL_EXTERN int curl_msnprintf(char *buffer, size_t maxlength, 38 | const char *format, ...); 39 | CURL_EXTERN int curl_mvprintf(const char *format, va_list args); 40 | CURL_EXTERN int curl_mvfprintf(FILE *fd, const char *format, va_list args); 41 | CURL_EXTERN int curl_mvsprintf(char *buffer, const char *format, va_list args); 42 | CURL_EXTERN int curl_mvsnprintf(char *buffer, size_t maxlength, 43 | const char *format, va_list args); 44 | CURL_EXTERN char *curl_maprintf(const char *format, ...); 45 | CURL_EXTERN char *curl_mvaprintf(const char *format, va_list args); 46 | 47 | #ifdef _MPRINTF_REPLACE 48 | # undef printf 49 | # undef fprintf 50 | # undef sprintf 51 | # undef vsprintf 52 | # undef snprintf 53 | # undef vprintf 54 | # undef vfprintf 55 | # undef vsnprintf 56 | # undef aprintf 57 | # undef vaprintf 58 | # define printf curl_mprintf 59 | # define fprintf curl_mfprintf 60 | #ifdef CURLDEBUG 61 | /* When built with CURLDEBUG we define away the sprintf functions since we 62 | don't want internal code to be using them */ 63 | # define sprintf sprintf_was_used 64 | # define vsprintf vsprintf_was_used 65 | #else 66 | # define sprintf curl_msprintf 67 | # define vsprintf curl_mvsprintf 68 | #endif 69 | # define snprintf curl_msnprintf 70 | # define vprintf curl_mvprintf 71 | # define vfprintf curl_mvfprintf 72 | # define vsnprintf curl_mvsnprintf 73 | # define aprintf curl_maprintf 74 | # define vaprintf curl_mvaprintf 75 | #endif 76 | 77 | #ifdef __cplusplus 78 | } 79 | #endif 80 | 81 | #endif /* __CURL_MPRINTF_H */ 82 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/README.md: -------------------------------------------------------------------------------- 1 | ## Contact 2 | For latest update on this example, please visit: https://github.com/SalmanEstyak/Metastability , Please reach out to salman.estyak@psu.edu for any issues. 3 | 4 | ## Summary 5 | 6 | This directory contains the programs needed to cause metastability issue in the popularly used Lookaside Cache based systems. For more details on how the metastability issue is triggered, please read section 5.3 of our paper. 7 | 8 | ## System Setup 9 | 10 | We need 4 VMs / Servers to run this experiment for the following parts:- 11 | 12 | 1. Load Balancer 13 | 2. Web Server 14 | 3. MySQL Server 15 | 4. Memcached Server 16 | 17 | 18 | Please run the following commands(with appropriate parameters) in each of the VMs to configure & install necessary packages. 19 | 20 | Important Parameters: 21 | 1. WebServer Params: 22 | CACHE_IP, DATABASE_QUERY_WEIGHT, SQL_IP 23 | 2. SqlServer Params: 24 | WEB_IP, DB_ENTRIES 25 | 26 | 3. LoadGenerator VM: 27 | CACHE_IP, MAIN_VM_IP 28 | 29 | 4. Memcached Params: 30 | CACHE_WARMUP_SIZE, CACHE_MEM_SIZE 31 | 32 | Web Server VM: 33 |
 sudo apt-get update && git clone https://github.com/SalmanEstyak/Metastability && cd Metastability &&  cd setup_scripts && sudo chmod +x setup_server.sh && ./setup_server.sh {SQL_IP} {CACHE_IP} {DATABASE_QUERY_WEIGHT} 
34 | 35 | SQL Server VM: 36 |
 sudo apt-get install git && sudo git clone https://github.com/SalmanEstyak/Metastability.git && cd Metastability && cd setup_scripts &&  sudo chmod +x setup_mysql.sh && ./setup_mysql.sh {WEB_IP} {DB_ENTRIES}"
37 | 38 | Memcached Server VM: 39 |
 sudo apt-get update && sudo apt-get install git  &&  sudo  git clone https://github.com/SalmanEstyak/Metastability.git && cd Metastability && cd setup_scripts &&  sudo chmod +x setup_memcached.sh && ./setup_memcached.sh {CACHE_MEM_SIZE} {CACHE_WARMUP_SIZE}"
40 | 41 | Load Generator VM: 42 |
sudo apt-get update && git clone https://github.com/SalmanEstyak/Metastability.git && cd Metastability && cd setup_scripts && sudo chmod +x setup_client.sh && ./setup_client.sh {WEB_IP} {CACHE_IP} {DB_ENTRIES} {MAIN_VM_IP}"
43 | 44 | 45 | After the VMs are setup and every VM has proper IPs to communicate with (the current implement assumes that all the VMs would share a single key to communicate). We can begin running experiments. 46 | 47 | 48 | ## Running Experiments 49 | 50 | In the Load Generator VM, run the following command: 51 | 52 |
sudo python3 run_experiment.py load trigger duration_of_test zipf_parameter num_threads sleep_period_before_trigger timeout test_type 
53 | 54 | Explanation of each parameter: 55 | 56 | 1. load: Requests Per Second 57 | 2. trigger: Trigger indicates a certain drop in cache hit rate. With -1 as the trigger, all cache entries are dropped. 58 | 3. duration_of_test: Experiment run time 59 | 4. zipf_parameter: This controls the job popularity distribution 60 | 5. num_threads: Number of threads to be used in TraceReplay (e.g. value: 64, 128) 61 | 6. sleep_period_before_trigger (This dictates the timepoint where the trigger is applied) 62 | 7. timeout: This is the maximum time a request can run before it gets killed by the server. 63 | 8. test_type: We can provide any name here. 64 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/NGINX Web Server/www/html/bkup/_index.php: -------------------------------------------------------------------------------- 1 | setStrategy(function($attempt) { 13 | echo "attempt: ". $attempt . "\r\n"; 14 | return 10 * $attempt; 15 | }); 16 | break; 17 | case "1": 18 | break; 19 | default: 20 | break; 21 | } 22 | return null; 23 | } 24 | 25 | //retry_policy(0); 26 | 27 | 28 | 29 | $mem_var = new Memcached(); 30 | $mem_var->addServer("10.158.50.15", 11211); 31 | $servername = "10.158.61.204"; 32 | $username = "webserver"; 33 | $password = "DARK@mark37"; 34 | 35 | $request_type = $_GET['request_type']; 36 | $request_size = $_GET['request_size']; 37 | $arrival_time = $_GET['arrival_time']; 38 | $retry_policy = $_GET['retry_policy']; 39 | $request_index = $_GET['request_index']; 40 | 41 | switch ($request_type) { 42 | case "0": 43 | $memcached_response = $mem_var->get($request_index); 44 | 45 | if ($memcached_response) { 46 | //echo "Data was found in memcached\r\n"; 47 | //echo $memcached_response; 48 | echo 1; 49 | } else { 50 | 51 | // echo "Key not found in memcached. Searching in DB\r\n"; 52 | $dbh =mysqli_connect($servername, $username, $password); // $backoff->run(function() { 53 | // return mysqli_connect($servername, $username, $password); 54 | // }); 55 | if (!$dbh){ 56 | // die("Unable to connect to MySQL: " . mysqli_error($dbh)); 57 | echo -1; 58 | exit(); 59 | } 60 | if (!mysqli_select_db($dbh,'metastable_test_db')) { 61 | echo -2; 62 | exit(); 63 | //die("Unable to select database: " . mysqli_error($dbh)); 64 | } 65 | 66 | $sql_stmt = "SELECT * FROM large_test_table where id=" . $request_index; 67 | //execute sql statement 68 | $result = mysqli_query($dbh,$sql_stmt); 69 | 70 | if (!$result){ 71 | echo -3; 72 | exit(); 73 | //echo "SQL QUERY STRING: ". $sql_stmt. "\r\n"; 74 | //die("Database access failed: " . mysqli_error($dbh)); 75 | } 76 | 77 | // check if any item is present in DB 78 | $rows = mysqli_num_rows($result); 79 | if(!$rows){ 80 | echo -4; 81 | exit(); 82 | //die("No entry found for Key = " . $request_index); 83 | } 84 | else{ 85 | //echo "Data found in db" . "\r\n"; 86 | echo 2; 87 | } 88 | $data_array = mysqli_fetch_array($result); 89 | $stringified_data = implode("+",$data_array); 90 | 91 | //insert data to memcached 92 | $mem_set_result = $mem_var->set($request_index, $stringified_data); 93 | if(!$mem_set_result){ 94 | echo -5; 95 | exit(); 96 | //die(" Key could not be created, key = ". $request_index. " value= ". $stringified_data. "\r\n"); 97 | } 98 | else{ 99 | echo 3; 100 | } 101 | 102 | $result -> free_result(); 103 | $dbh -> close(); 104 | } 105 | break; 106 | case "1": 107 | echo "Request type 1!\r\n"; 108 | break; 109 | case "2": 110 | echo "Request type 2!\r\n"; 111 | break; 112 | default: 113 | echo "Unknown Request type!\r\n"; 114 | break; 115 | } 116 | 117 | ?> -------------------------------------------------------------------------------- /Retry_Metastability/client/experiments.sh: -------------------------------------------------------------------------------- 1 | source config.sh 2 | 3 | 4 | #starts up the mongo replica environment 5 | start_servers() { 6 | ssh -i $pem_file -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null ubuntu@${primary} 'sudo docker run -p 27017:27017 -d --name primary mongo:4.4.9 --replSet rsmongo' 7 | sleep 5 8 | ssh -i $pem_file -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null ubuntu@${primary} 'sudo docker cp ./primary/config.js primary:/config.js' 9 | ssh -i $pem_file -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null ubuntu@${secondary1} 'sudo docker run -p 27017:27017 -d --name secondary1 mongo:4.4.9 --replSet rsmongo' 10 | ssh -i $pem_file -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null ubuntu@${secondary2} 'sudo docker run -p 27017:27017 -d --name secondary2 mongo:4.4.9 --replSet rsmongo' 11 | sleep 5 12 | ssh -i $pem_file -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null ubuntu@${primary} 'sudo docker exec primary mongo localhost:27017 /config.js' 13 | sleep 15 14 | } 15 | 16 | #removes mongo container instances 17 | stop_servers() { 18 | ssh -i $pem_file -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null ubuntu@${primary} 'sudo docker stop primary && sudo docker rm primary' 19 | ssh -i $pem_file -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null ubuntu@${primary} 'sudo docker volume rm $(sudo docker volume ls -qf dangling=true)' 20 | ssh -i $pem_file -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null ubuntu@${secondary1} 'sudo docker stop secondary1 && sudo docker rm secondary1' 21 | ssh -i $pem_file -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null ubuntu@${secondary1} 'sudo docker volume rm $(sudo docker volume ls -qf dangling=true)' 22 | ssh -i $pem_file -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null ubuntu@${secondary2} 'sudo docker stop secondary2 && sudo docker rm secondary2' 23 | ssh -i $pem_file -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null ubuntu@${secondary2} 'sudo docker volume rm $(sudo docker volume ls -qf dangling=true)' 24 | } 25 | 26 | pause() { 27 | #pauses until log file stops being updated 28 | while : 29 | 30 | do 31 | echo 'waiting to end' 32 | sleep 3 33 | newRead=$(ls -l $d | grep $fn) 34 | echo $newRead 35 | if [ "$read" = "$newRead" ]; then 36 | break 37 | fi 38 | read=$newRead 39 | 40 | done 41 | sleep 5 42 | 43 | } 44 | 45 | exec_experiment() { 46 | initialWait=$1 47 | triggerLength=$2 48 | triggerCpu=$3 49 | interval=$4 50 | n=$5 51 | retry=$6 52 | 53 | fn=$(echo "mongo_${initialWait}_${triggerLength}_${triggerCpu}_${interval}_${retry}.log") 54 | 55 | start_servers 56 | bash experiment.sh -i $interval -s $initialWait -d $triggerLength -t $triggerCpu -r $retry -f $fn -n $n -o 3 -c t1 57 | pause 58 | stop_servers 59 | 60 | } 61 | 62 | stop_servers 63 | 64 | #baseline 65 | echo baseline 66 | 67 | initialWait=60 68 | triggerLength=10 69 | triggerCpu=0.45 70 | interval=75000 71 | n=1000000 72 | retry=4 73 | 74 | exec_experiment $initialWait $triggerLength $triggerCpu $interval $n $retry 75 | 76 | 77 | #metastable 78 | echo metastable 79 | initialWait=60 80 | triggerLength=10 81 | triggerCpu=0.40 82 | interval=75000 83 | n=1000000 84 | retry=4 85 | 86 | exec_experiment $initialWait $triggerLength $triggerCpu $interval $n $retry 87 | 88 | #not metastable 9 sec 89 | 90 | 91 | echo not metastable 9 sec 92 | initialWait=60 93 | triggerLength=9 94 | triggerCpu=0.40 95 | interval=75000 96 | n=1000000 97 | retry=4 98 | 99 | exec_experiment $initialWait $triggerLength $triggerCpu $interval $n $retry 100 | 101 | #not metastable reduced rate 102 | 103 | echo not metastable reduced rate 104 | initialWait=60 105 | triggerLength=10 106 | triggerCpu=0.40 107 | interval=100000 108 | n=1000000 109 | retry=4 110 | 111 | exec_experiment $initialWait $triggerLength $triggerCpu $interval $n $retry 112 | 113 | echo done 114 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/TraceReader.hpp: -------------------------------------------------------------------------------- 1 | // // TraceReader.hpp - Class definitions for reading trace files. 2 | // // 3 | // // Copyright (c) 2018 Timothy Zhu. 4 | // // Licensed under the MIT License. See LICENSE file for details. 5 | // // 6 | 7 | // #ifndef _TRACE_READER_HPP 8 | // #define _TRACE_READER_HPP 9 | 10 | // #include 11 | // #include 12 | // #include 13 | // #include 14 | 15 | // using namespace std; 16 | 17 | // struct TraceEntry { 18 | // uint64_t arrivalTime; // in nanoseconds 19 | // string requestType; // request type 20 | // uint32_t requestSize; // in bytes 21 | // }; 22 | 23 | // class BaseTraceReader 24 | // { 25 | // public: 26 | // BaseTraceReader() { } 27 | // virtual ~BaseTraceReader() { } 28 | 29 | // // Fills entry with the next request from the trace. Returns false if end of trace. 30 | // virtual bool nextEntry(TraceEntry& entry) = 0; 31 | // // Resets trace reader back to beginning of trace. 32 | // virtual void reset() = 0; 33 | 34 | // virtual void increment_request_counter() = 0; 35 | // virtual void reset_request_counter() = 0; 36 | // virtual int get_request_count() = 0; 37 | // }; 38 | 39 | // // Reads and stores requests from trace file on construction. 40 | // // Trace file must be in CSV format with one request per line. Each line contains 5 columns: 41 | // // 1) (decimal) arrival time of request in microseconds 42 | // // 2) (string) request type 43 | // // 3) (hex) number of bytes in request 44 | // class TraceReader : public BaseTraceReader 45 | // { 46 | // private: 47 | // pthread_mutex_t _mutex; 48 | // vector _trace; 49 | // unsigned int _curIndex; 50 | // uint64_t _experiment_end_time; 51 | // uint64_t _experiment_start_time; 52 | // uint64_t _request_counter; 53 | 54 | // public: 55 | // TraceReader(uint64_t tracer_start_time); 56 | // virtual ~TraceReader(); 57 | 58 | // virtual bool nextEntry(TraceEntry& entry); 59 | // virtual void reset(); 60 | // virtual void increment_request_counter(); 61 | // virtual void reset_request_counter(); 62 | // virtual int get_request_count(); 63 | // }; 64 | 65 | // #endif // _TRACE_READER_HPP 66 | 67 | // TraceReader.hpp - Class definitions for reading trace files. 68 | // 69 | // Copyright (c) 2018 Timothy Zhu. 70 | // Licensed under the MIT License. See LICENSE file for details. 71 | // 72 | 73 | #ifndef _TRACE_READER_HPP 74 | #define _TRACE_READER_HPP 75 | 76 | #include 77 | #include 78 | #include 79 | #include 80 | 81 | using namespace std; 82 | 83 | struct TraceEntry { 84 | uint64_t arrivalTime; // in nanoseconds 85 | string requestType; // request type 86 | uint32_t requestSize; // in bytes 87 | uint64_t index; 88 | }; 89 | 90 | class BaseTraceReader 91 | { 92 | public: 93 | BaseTraceReader() { } 94 | virtual ~BaseTraceReader() { } 95 | 96 | // Fills entry with the next request from the trace. Returns false if end of trace. 97 | virtual bool nextEntry(TraceEntry& entry) = 0; 98 | // Resets trace reader back to beginning of trace. 99 | virtual void reset() = 0; 100 | }; 101 | 102 | // Reads and stores requests from trace file on construction. 103 | // Trace file must be in CSV format with one request per line. Each line contains 5 columns: 104 | // 1) (decimal) arrival time of request in microseconds 105 | // 2) (string) request type 106 | // 3) (hex) number of bytes in request 107 | class TraceReader : public BaseTraceReader 108 | { 109 | private: 110 | pthread_mutex_t _mutex; 111 | vector _trace; 112 | unsigned int _curIndex; 113 | 114 | public: 115 | TraceReader(string filename); 116 | virtual ~TraceReader(); 117 | 118 | virtual bool nextEntry(TraceEntry& entry); 119 | virtual void reset(); 120 | }; 121 | 122 | #endif // _TRACE_READER_HPP -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/curl/easy.h: -------------------------------------------------------------------------------- 1 | #ifndef __CURL_EASY_H 2 | #define __CURL_EASY_H 3 | /*************************************************************************** 4 | * _ _ ____ _ 5 | * Project ___| | | | _ \| | 6 | * / __| | | | |_) | | 7 | * | (__| |_| | _ <| |___ 8 | * \___|\___/|_| \_\_____| 9 | * 10 | * Copyright (C) 1998 - 2008, Daniel Stenberg, , et al. 11 | * 12 | * This software is licensed as described in the file COPYING, which 13 | * you should have received as part of this distribution. The terms 14 | * are also available at http://curl.haxx.se/docs/copyright.html. 15 | * 16 | * You may opt to use, copy, modify, merge, publish, distribute and/or sell 17 | * copies of the Software, and permit persons to whom the Software is 18 | * furnished to do so, under the terms of the COPYING file. 19 | * 20 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 21 | * KIND, either express or implied. 22 | * 23 | ***************************************************************************/ 24 | #ifdef __cplusplus 25 | extern "C" { 26 | #endif 27 | 28 | CURL_EXTERN CURL *curl_easy_init(void); 29 | CURL_EXTERN CURLcode curl_easy_setopt(CURL *curl, CURLoption option, ...); 30 | CURL_EXTERN CURLcode curl_easy_perform(CURL *curl); 31 | CURL_EXTERN void curl_easy_cleanup(CURL *curl); 32 | 33 | /* 34 | * NAME curl_easy_getinfo() 35 | * 36 | * DESCRIPTION 37 | * 38 | * Request internal information from the curl session with this function. The 39 | * third argument MUST be a pointer to a long, a pointer to a char * or a 40 | * pointer to a double (as the documentation describes elsewhere). The data 41 | * pointed to will be filled in accordingly and can be relied upon only if the 42 | * function returns CURLE_OK. This function is intended to get used *AFTER* a 43 | * performed transfer, all results from this function are undefined until the 44 | * transfer is completed. 45 | */ 46 | CURL_EXTERN CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ...); 47 | 48 | 49 | /* 50 | * NAME curl_easy_duphandle() 51 | * 52 | * DESCRIPTION 53 | * 54 | * Creates a new curl session handle with the same options set for the handle 55 | * passed in. Duplicating a handle could only be a matter of cloning data and 56 | * options, internal state info and things like persistent connections cannot 57 | * be transferred. It is useful in multithreaded applications when you can run 58 | * curl_easy_duphandle() for each new thread to avoid a series of identical 59 | * curl_easy_setopt() invokes in every thread. 60 | */ 61 | CURL_EXTERN CURL* curl_easy_duphandle(CURL *curl); 62 | 63 | /* 64 | * NAME curl_easy_reset() 65 | * 66 | * DESCRIPTION 67 | * 68 | * Re-initializes a CURL handle to the default values. This puts back the 69 | * handle to the same state as it was in when it was just created. 70 | * 71 | * It does keep: live connections, the Session ID cache, the DNS cache and the 72 | * cookies. 73 | */ 74 | CURL_EXTERN void curl_easy_reset(CURL *curl); 75 | 76 | /* 77 | * NAME curl_easy_recv() 78 | * 79 | * DESCRIPTION 80 | * 81 | * Receives data from the connected socket. Use after successful 82 | * curl_easy_perform() with CURLOPT_CONNECT_ONLY option. 83 | */ 84 | CURL_EXTERN CURLcode curl_easy_recv(CURL *curl, void *buffer, size_t buflen, 85 | size_t *n); 86 | 87 | /* 88 | * NAME curl_easy_send() 89 | * 90 | * DESCRIPTION 91 | * 92 | * Sends data over the connected socket. Use after successful 93 | * curl_easy_perform() with CURLOPT_CONNECT_ONLY option. 94 | */ 95 | CURL_EXTERN CURLcode curl_easy_send(CURL *curl, const void *buffer, 96 | size_t buflen, size_t *n); 97 | 98 | #ifdef __cplusplus 99 | } 100 | #endif 101 | 102 | #endif 103 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/setup_scripts/setup_mysql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # echo "params: $1 $2" 3 | # sudo chmod 777 /var/cache/debconf/config.dat 4 | # sudo cat ../config_files/deb_conf.dat >> /var/cache/debconf/config.dat 5 | # sudo wget http://repo.mysql.com/mysql-apt-config_0.8.10-1_all.deb 6 | # sudo DEBIAN_FRONTEND=noninteractive dpkg -i mysql-apt-config_0.8.10-1_all.deb 7 | # sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 8 | # sudo apt-get update 9 | # sudo apt update 10 | # sudo apt install -y mysql-client=5.7.*-1ubuntu18.04 11 | # sudo debconf-set-selections <<< 'mysql-community-server mysql-community-server/root-pass password hello@123' 12 | # sudo debconf-set-selections <<< 'mysql-community-server mysql-community-server/re-root-pass password hello@123' 13 | # sudo DEBIAN_FRONTEND=noninteractive apt install -y mysql-community-server=5.7.*-1ubuntu18.04 14 | # sudo DEBIAN_FRONTEND=noninteractive apt install -y mysql-server=5.7.*-1ubuntu18.04 15 | # sudo mysql -u root -phello@123 < init_database.sql 16 | 17 | # sudo sed -i "s/.*bind-address.*/bind-address = 0.0.0.0/" /etc/mysql/mysql.conf.d/mysqld.cnf 18 | # sudo /etc/init.d/mysql stop 19 | # sudo /etc/init.d/mysql start 20 | 21 | # sudo wget https://github.com/Percona-Lab/mysql_random_data_load/releases/download/v0.1.12/mysql_random_data_load_0.1.12_Linux_x86_64.tar.gz 22 | # sudo tar -xvf mysql_random_data_load_* 23 | # ./mysql_random_data_load metastable_test_db large_test_table $2 --user=root --password=hello@123 24 | # wget random data generator , run it with params 25 | # construct a mini sql file from here 26 | # for adding server IP, user to mysql 27 | 28 | # adding webserver IP 29 | # sudo sed -i "/SET @a:=/c\SET @a:= $2;" linearize_column_data.sql 30 | # sudo sed -i "s/remote_server_ip/$1/" add_user.sql 31 | # sudo ufw allow 3306 32 | # sudo mysql -u root -phello@123 < add_user.sql 33 | # sudo mysql -u root -phello@123 < linearize_column_data.sql 34 | 35 | # echo "params: $1 $2" 36 | #sudo chmod 777 /var/cache/debconf/config.dat 37 | #sudo cat ../config_files/deb_conf.dat >> /var/cache/debconf/config.dat 38 | #sudo wget http://repo.mysql.com/mysql-apt-config_0.8.10-1_all.deb 39 | #sudo DEBIAN_FRONTEND=noninteractive dpkg -i mysql-apt-config_0.8.10-1_all.deb 40 | #sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 41 | #sudo apt-get update 42 | #sudo apt update 43 | #sudo apt install -y mysql-client=5.7.*-1ubuntu18.04 44 | #sudo debconf-set-selections <<< 'mysql-community-server mysql-community-server/root-pass password hello@123' 45 | #sudo debconf-set-selections <<< 'mysql-community-server mysql-community-server/re-root-pass password hello@123' 46 | #sudo DEBIAN_FRONTEND=noninteractive apt install -y mysql-community-server=5.7.*-1ubuntu18.04 47 | #sudo DEBIAN_FRONTEND=noninteractive apt install -y mysql-server=5.7.*-1ubuntu18.04 48 | #sudo mysql -u root -phello@123 < init_database.sql 49 | 50 | #sudo sed -i "s/.*bind-address.*/bind-address = 0.0.0.0/" /etc/mysql/mysql.conf.d/mysqld.cnf 51 | #sudo /etc/init.d/mysql stop 52 | #sudo /etc/init.d/mysql start 53 | 54 | #sudo wget https://github.com/Percona-Lab/mysql_random_data_load/releases/download/v0.1.12/mysql_random_data_load_0.1.12_Linux_x86_64.tar.gz 55 | #sudo tar -xvf mysql_random_data_load_* 56 | #./mysql_random_data_load metastable_test_db large_test_table $2 --user=root --password=hello@123 57 | # wget random data generator , run it with params 58 | # construct a mini sql file from here 59 | # for adding server IP, user to mysql 60 | 61 | # adding webserver IP 62 | #sudo sed -i "/SET @a:=/c\SET @a:= $2;" linearize_column_data.sql 63 | 64 | #sudo mysql -u root -phello@123 < linearize_column_data.sql 65 | 66 | sudo touch new_user.sql 67 | sudo chmod 777 new_user.sql 68 | sudo echo "CREATE USER 'metastable'@'$1' IDENTIFIED BY 'hello@123';" > new_user.sql 69 | sudo echo "GRANT CREATE, ALTER, DROP, INSERT, UPDATE, DELETE, SELECT, REFERENCES, RELOAD on *.* TO 'metastable'@'$1' WITH GRANT OPTION;" >> new_user.sql 70 | sudo echo "FLUSH PRIVILEGES;" >> new_user.sql 71 | 72 | #sudo ufw allow 3306 73 | sudo mysql -u root -phello@123 < new_user.sql -------------------------------------------------------------------------------- /LookasideCache_Metastability/NGINX Web Server/www/html/util/server_throughput_test.php: -------------------------------------------------------------------------------- 1 | addServer("10.158.50.15", 11211); 7 | $servername = "10.158.61.204"; 8 | $username = "webserver"; 9 | $password = "DARK@mark37"; 10 | 11 | $request_type = "0"; //$_GET['request_type'] 12 | 13 | echo "server throughput test code\r\n"; 14 | $iter = 1; 15 | $i = 0; 16 | $total_completed_jobs = 0; 17 | $experiment_minutes = 2; 18 | $experiment_time_period = 60 * $experiment_minutes; 19 | 20 | $total_hit_in_memcache = 0; 21 | $total_miss = 0; 22 | 23 | function zipf($input) 24 | { 25 | $flag = 0; 26 | $alpha = 1.2; 27 | 28 | while ($flag < 1) 29 | { 30 | $u1 = (rand(1, 1000) / 1000); 31 | $u2 = (rand(1, 1000) / 1000); 32 | $x = floor(pow($u1, (-1 / ($alpha - 1)))); 33 | $t = pow(1 + (1 / $x) , $alpha - 1); 34 | $z = ($t / ($t - 1)) * ((pow(2, $alpha - 1) - 1) / ($u2 * pow(2, $alpha - 1))); 35 | if ($x <= $z && $x < $input) 36 | { 37 | $flag = 1; 38 | } 39 | } 40 | return $x; 41 | } 42 | 43 | while($i < $iter){ 44 | 45 | $x = 0; 46 | $y = 0; 47 | 48 | $start = microtime(true); 49 | while(1) { 50 | //$idx = rand(1, $total_num_db_entries); 51 | $idx = zipf($total_num_db_entries); 52 | switch ($request_type) { 53 | case "0": 54 | $memcached_response = $mem_var->get($idx); 55 | //echo "idx = " . $idx . " memcache_response = " . $memcached_response. "\r\n"; 56 | if ($memcached_response) { 57 | $x++; 58 | $y++; 59 | } 60 | else { 61 | //echo "Key not found in memcached. Searching in DB\r\n"; 62 | $dbh =mysqli_connect($servername, $username, $password); // $backoff->run(function() { 63 | // return mysqli_connect($servername, $username, $password); 64 | // }); 65 | if (!$dbh) 66 | die("Unable to connect to MySQL: " . mysqli_error($dbh)); 67 | 68 | if (!mysqli_select_db($dbh,'metastable_test_db')) 69 | die("Unable to select database: " . mysqli_error($dbh)); 70 | 71 | $sql_stmt = "SELECT * FROM large_test_table where id=" . $idx; 72 | //execute sql statement 73 | $result = mysqli_query($dbh,$sql_stmt); 74 | 75 | if (!$result){ 76 | echo "SQL QUERY STRING: ". $sql_stmt. "\r\n"; 77 | die("Database access failed: " . mysqli_error($dbh)); 78 | } 79 | 80 | // check if any item is present in DB 81 | $rows = mysqli_num_rows($result); 82 | if(!$rows) 83 | die("No entry found for Key = " . $idx); 84 | else{ 85 | // echo "Data found in db" . "\r\n"; 86 | 87 | } 88 | $data_array = mysqli_fetch_array($result); 89 | $stringified_data = implode("+",$data_array); 90 | 91 | //insert data to memcached 92 | $mem_set_result = $mem_var->set($idx, $stringified_data); 93 | $result -> free_result(); 94 | $dbh -> close(); 95 | $x++; 96 | } 97 | break; 98 | case "1": 99 | echo "Request type 1!\r\n"; 100 | break; 101 | case "2": 102 | echo "Request type 2!\r\n"; 103 | break; 104 | default: 105 | echo "Unknown Request type!\r\n"; 106 | } 107 | 108 | $end = microtime(true); 109 | $elapsed_time = ($end - $start); //1000000000; // time in seconds 110 | 111 | if($elapsed_time >= $experiment_time_period){ 112 | break; 113 | } 114 | } 115 | echo "iteration ". $i . " finished.\r\n"; 116 | $total_completed_jobs+=$x; 117 | $total_hit_in_memcache+= $y; 118 | $i++; 119 | } 120 | 121 | echo "total job completed: ". $total_completed_jobs . " total hit in memcache: " . $total_hit_in_memcache . "\r\n"; 122 | echo "hit rate at memcached: " . ($total_hit_in_memcache/$total_completed_jobs) . "\r\n"; 123 | echo "throughput [server]: " . (($total_completed_jobs/$iter)/$experiment_time_period) . "\r\n"; 124 | 125 | ?> -------------------------------------------------------------------------------- /Retry_Metastability/README.md: -------------------------------------------------------------------------------- 1 | # Set-up 2 | 3 | For the Mongo experiment, we create 4 virtual machines in AWS. Three replica servers and a client machine. 4 | * **Replica servers** AWS EC2 m5a.large with 2 vCPU and 8Gib RAM. Called: 5 | * primary 6 | * secondary1 7 | * secondary2 8 | 9 | * **Client** AWS EC2 m5ad.2xlarge 8 vCPU and 32 Gib RAM 10 | 11 | For all machines, the code files are expecting the username to be **ubuntu** for connecting. 12 | 13 | 14 | ## AWS Environment Set-up ## 15 | Steps for setting up vm environment provided below. 16 | 17 | 18 | ## Client machine 19 | 1. Create virtual machine in AWS in a region 20 | * Amazon Machine Image - Ubuntu Server 20.04 LTS (HVM), SSD Volume Type 64-bit(x86) 21 | * Instance type - m5ad.2xlarge 22 | * For security group: 23 | * Add custom TCP rule inbound allow port 27017 24 | * Allow SSH inbound 25 | * Install: 26 | 1. Upload client folder to client home directory. Contains files 27 | * mongoC3.go 28 | * experiment.sh 29 | * experiments.sh 30 | 2. Upload private key required to access the primary replica to client machine 31 | 3. Update experiment.sh for replica server ip address (you will need to do this after creating the replica machine and it is recommended to use private IPv4 addresses) and private key absolute path 32 | 3. Change permissions on pem file 33 |
chmod 400 evaluator.pem
**Or modify for filename of uploaded private key file** 34 | 4. Install go and compile go client 35 | 36 |
echo installing go
 37 | curl -OL https://golang.org/dl/go1.16.7.linux-amd64.tar.gz
 38 | sudo tar -C /usr/local -xvf go1.16.7.linux-amd64.tar.gz
 39 | printf '\nexport PATH=$PATH:/usr/local/go/bin\n' >> ~/.profile
 40 | source ~/.profile
 41 | go version
 42 | 
43 | 44 |
echo creating client
 45 | cd client
 46 | go mod init example.com
 47 | go mod tidy
 48 | go build mongoC3.go
 49 | 
50 | 51 | 2. Create replicas (3 total) in the same region as client 52 | * Amazon Machine Image - Ubuntu Server 20.04 LTS (HVM), SSD Volume Type 64-bit(x86) 53 | * Instance type - m5a.large 54 | * For security group: 55 | * Add custom TCP rule inbound allow port 27017 56 | * Allow SSH inbound 57 | Install: 1) Docker on each replica 58 | 59 |
 60 | echo installing docker
 61 | sudo apt-get update
 62 | sudo apt-get install \
 63 |     ca-certificates \
 64 |     curl \
 65 |     gnupg \
 66 |     lsb-release
 67 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
 68 | echo \
 69 |   "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
 70 |   $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
 71 | sudo apt-get update
 72 | yes | sudo apt-get install docker-ce docker-ce-cli containerd.io
 73 | 
74 | 75 | 3. Customize Primary 76 | 1. Upload primary folder to home directory of primary replica 77 | 2. Modify config.js for the ip addresses of each replica. It is recommended to use the private Ipv4 address 78 | 79 | 80 | Environment is now setup 81 | 82 | 83 | 84 | # Usage 85 | 86 | * Log into client vm. 87 | *Execute 88 |
cd client
 89 | bash experiments.sh
90 | 91 | **NOTE: there is some variability in the outcome of the experiments due to the stochastic nature of replicating a metastable failure. You may need to run the experiments more than once to replicate the behavior observed for the paper.** 92 | 93 | **NOTE: if after repeating experiments you find that, the baseline case does not recover after the trigger you may need to adjust the request frequency up. If the metastable case does recover after the trigger you may need to adjust the request frequency down. We found that tuning the request frequency maybe required to demonstrate the different behaviors of the system.** 94 | 95 | 96 | 97 | * Each experiment corresponds to one experiment performed in the paper. 98 | 1. **baseline** corresponds to a) Baseline with no metastable failure trigger 10 sec, -78% CPU 99 | 1. **metastable** corresponds to b) Increased trigger magnitude causes metastable failure 10 sec, -80% CPU 100 | 1. **no_metastable1** corresponds to c) Decreased trigger aversts metastable failure, 9 sec, -80% CPU 101 | 1. **no_metastable2** corresponds to d) Reduced load averts metastable failure, 10 sec, ~80% CPU, -30% RPS 102 | 103 | The output of each experiment is written to a log file in a date stamped directory in the client directory. The output of the log is tab delimited with the following fields: 104 | 1. time stamp of log entry 105 | 1. process id 106 | 1. retry attempts 107 | 1. succ or err (representing successful or errror request) 108 | 1. latency (microseconds) 109 | 110 | A tool has been provided to plot the results of the experiment. It is available at https://colab.research.google.com/drive/1kt0pWFr98l9FzynWoTXtG9OvR725R7Mv?usp=sharing 111 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/run_experiment.py: -------------------------------------------------------------------------------- 1 | import os 2 | import math 3 | import sys 4 | import subprocess 5 | from threading import Thread 6 | import time 7 | import matplotlib 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | from datetime import datetime 11 | from math import sqrt 12 | 13 | # helper functions 14 | load= int(sys.argv[1:][0]) 15 | trigger= int(sys.argv[1:][1]) 16 | duration_of_test=int(sys.argv[1:][2]) 17 | zipf_parameter=float(sys.argv[1:][3]) 18 | num_threads=int(sys.argv[1:][4]) 19 | sleep_period_before_trigger=int(sys.argv[1:][5]) 20 | timeout=int(sys.argv[1:][7]) 21 | test_type = sys.argv[1:][8] 22 | 23 | if(sys.argv[1:][6] == "False"): 24 | closed_loop_test= False 25 | else: 26 | closed_loop_test = True 27 | 28 | args_len = len(sys.argv[1:]) 29 | 30 | if(args_len != 9): 31 | print("enter valid parameters: load, trigger, duration of test, zipf_param, num_threads, trigger_offset, closed_loop_test") 32 | exit() 33 | 34 | def run_trace_replay(_trace_file_name, _num_threads, _result_file_name): 35 | os.system("make") # to make sure latest changes are being used 36 | run_TraceReplay_command = "./TraceReplay -t {} {} -n {} -r {}".format(_trace_file_name, closed_loop_flag, _num_threads, _result_file_name) 37 | os.system(run_TraceReplay_command) 38 | 39 | 40 | def calculate_avg_hit_rate(start, end, stats_data): 41 | cache_hits = 0 42 | job_completitions = 0 43 | 44 | for t in range(start, end): 45 | cache_hits+= stats_data[t][1] 46 | job_completitions+= stats_data[t][0] 47 | 48 | avg_cache_hit_rate = cache_hits/job_completitions 49 | return avg_cache_hit_rate 50 | 51 | 52 | #open / closed loop test config 53 | if(closed_loop_test): 54 | closed_loop_flag = " -c" 55 | else: 56 | closed_loop_flag = "" 57 | # config for connecting to memcached server VM 58 | user = "ubuntu" 59 | memcached_host = "172.31.1.84" # memcached server 60 | master_vm = "172.31.27.60" 61 | memcached_VM_command = "python3 warm_up_cache.py" 62 | path_to_rsa_key = "cache_workers.pem" 63 | """ 64 | trigger size == -1 denotes a flush_all 65 | and 66 | trigger size == 0 means no trigger, we would expect stable behaviour 67 | """ 68 | # configs for experiment 69 | #zipf_parameter = 1.00001 70 | metastable_stats_array = [] 71 | 72 | # trace replay config 73 | result_file_name = "results_warm_cache/result_300.0.1.00001.txt" 74 | trace_file_name = "traces/trace_file_300.0_1.00001.txt" 75 | 76 | 77 | #plotting config 78 | colors = ['green', 'red'] # red == metastable, green = not metastable 79 | 80 | 81 | _lambda = load 82 | trigger_size = trigger 83 | 84 | print("\n\n--------------------------------------\n\n") 85 | print("testing with lambda = " + str(_lambda)) 86 | 87 | # step 1: warm up cache 88 | try: 89 | cmd = f"ssh -o StrictHostKeyChecking=no -i {path_to_rsa_key} -p 22 {user}@{memcached_host} \"{memcached_VM_command}\"" 90 | out = subprocess.check_output(cmd , shell= True) 91 | print(out) 92 | except subprocess.CalledProcessError as e: 93 | print("error when warming up cache") 94 | pass 95 | 96 | # step 2: generate a new trace, we want new traces even for same config (to reduce bias) 97 | generate_trace_command = 'python3 TraceFileGenerator.py ' + str(_lambda) + ' ' + str(zipf_parameter) + ' ' + str(duration_of_test) 98 | os.system(generate_trace_command) 99 | 100 | # step 3: run the TraceReplay (using & making the process non blocking) 101 | trace_file_name = "traces/trace_file_" + str(float(_lambda)) + "_" + str(float(zipf_parameter)) +".txt" 102 | results_directory = "results_warm_cache/" 103 | result_file_name = "result_" + str(float(_lambda)) + "_" + str(float(zipf_parameter)) + "_DUR_" + str(duration_of_test) + "_TRSZ_" + str(trigger_size) + "_TMOUT_"+ str(timeout) +".txt" 104 | result_file_path = results_directory + result_file_name 105 | trace_replay_thread = Thread(target= run_trace_replay, args=(trace_file_name, num_threads, result_file_path,)) 106 | trace_replay_thread.start() 107 | 108 | # step 4: sleep for 10 s, then run trigger 109 | time.sleep(sleep_period_before_trigger) 110 | 111 | # step 5: run trigger 112 | trigger_command = "python3 trigger_size_k.py " + str(trigger_size) 113 | 114 | for _t in range(0,5): 115 | os.system(trigger_command) 116 | 117 | 118 | # step 6: wait for TraceReplay to finish 119 | trace_replay_thread.join() 120 | 121 | # step 7: process result file to generate file with cache_hit_rate 122 | # call stats only after experiment has finished 123 | process_stats_command = "python3 collect_stats_over_time.py {} {} {} {} {}".format(result_file_path, _lambda, zipf_parameter, trigger_size,duration_of_test) 124 | os.system(process_stats_command) 125 | 126 | os_command = f"ssh-keyscan {memcached_host} >> $HOME/.ssh/known_hosts" 127 | #os_command = f"ssh-keyscan {master_vm} >> $HOME/.ssh/known_hosts" 128 | 129 | try: 130 | # Set scp and ssh data. 131 | connUser = 'ubuntu' 132 | connHost = master_vm 133 | connPath = '/home/ubuntu/Cache_Experiments/Main/' + test_type + "/" + result_file_name 134 | #os_command = f"ssh-keyscan {master_vm} >> $HOME/.ssh/known_hosts" 135 | #os.system(os_command) 136 | # Use scp to send file from local to host. 137 | scp_command = "sudo sshpass -p metastability scp -o StrictHostKeyChecking=no " + result_file_path + " {}@{}:{}".format(connUser, connHost, connPath) 138 | #scp = subprocess.Popen(['scp', '-i', connPrivateKey, result_file_path, '{}@{}:{}'.format(connUser, connHost, connPath)]) 139 | os.system(scp_command) 140 | except CalledProcessError: 141 | print('ERROR: Connection to host failed!') 142 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/collect_stats_over_time.py: -------------------------------------------------------------------------------- 1 | import os 2 | from os import listdir 3 | from os.path import isfile, join 4 | import math 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import sys 8 | from datetime import datetime 9 | import time 10 | 11 | kill_timeout_for_php = 1 12 | # helper functions 13 | 14 | def plot_data( x_points, y_points, file_with_image_extension): 15 | f, ax = plt.subplots(1) 16 | plt.plot(x_points, y_points) 17 | # plt.plot(time_points, latency_points) 18 | ax.set_ylim(ymin=0) 19 | 20 | 21 | image_directory = "result_plots" 22 | current_directory = os.getcwd() 23 | final_image_directory = os.path.join(current_directory, r'result_plots') 24 | 25 | if not os.path.exists(final_image_directory): 26 | os.makedirs(final_image_directory) 27 | 28 | image_file_path = join(image_directory, file_with_image_extension) # combining file name with directory 29 | plt.savefig( image_file_path, bbox_inches='tight') 30 | 31 | def make_patch_spines_invisible(ax): 32 | ax.set_frame_on(True) 33 | ax.patch.set_visible(False) 34 | for sp in ax.spines.values(): 35 | sp.set_visible(False) 36 | 37 | 38 | def plot_multiple_data( x_points, y_points1, y_points2, y_points3, file_with_image_extension): 39 | # f, ax = plt.subplots() 40 | # plt.plot(x_points, y_points) 41 | # plt.plot(time_points, latency_points) 42 | #ax.set_ylim(ymin=0) 43 | fig, ax1 = plt.subplots() 44 | 45 | color = 'tab:red' 46 | ax1.set_xlabel('time (s)') 47 | ax1.set_ylabel('latency(ns)', color='black') 48 | ax1.plot(x_points, y_points1, color=color, linewidth = 2 , alpha= 0.6, label = "latency") 49 | ax1.tick_params(axis='y', labelcolor='black') 50 | 51 | ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis 52 | 53 | color = 'tab:blue' 54 | ax2.set_ylabel('hit rate/error rate', color='black') # we already handled the x-label with ax1 55 | ax2.plot(x_points, y_points2, color=color, linewidth = 2, alpha= 0.6, label = "cache hit rate") 56 | color = 'tab:orange' 57 | ax2.plot(x_points, y_points3, '--', color=color, linewidth = 2, alpha= 0.6, label = "error rate") 58 | ax2.set_ylim(ymin = 0, ymax = 1) 59 | # plt.arrow(x=10, y=0, dx=0, dy=5, width=.08, facecolor='red') 60 | ax2.tick_params(axis='y', labelcolor='black') 61 | fig.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left", 62 | mode="expand", borderaxespad=0, ncol=3) 63 | fig.tight_layout() # otherwise the right y-label is slightly clipped 64 | 65 | image_directory = "result_plots" 66 | current_directory = os.getcwd() 67 | final_image_directory = os.path.join(current_directory, r'result_plots') 68 | 69 | if not os.path.exists(final_image_directory): 70 | os.makedirs(final_image_directory) 71 | 72 | image_file_path = join(image_directory, file_with_image_extension) # combining file name with directory 73 | plt.savefig( image_file_path, bbox_inches='tight') 74 | 75 | 76 | ## helper functions end 77 | 78 | 79 | 80 | 81 | 82 | args_len = len(sys.argv[1:]) 83 | 84 | if(args_len != 5): 85 | print("enter valid parameter, provide absolute file path for resultFile from TraceReplay, arrival_rate, trigger_size, test_duration") 86 | exit() 87 | 88 | file_name = sys.argv[1:][0] 89 | arrival_rate = sys.argv[1:][1] 90 | alpha = sys.argv[1:][2] 91 | trigger_size = sys.argv[1:][3] 92 | test_duration = sys.argv[1:][4] 93 | 94 | ns_in_a_sec = 1000000000 95 | num_seconds = -1 96 | hit_rates = [0] * 100000 # upper bound , assuuming experiment goes on for 1000 seconds 97 | error_rates = [0] * 100000 98 | job_completions = [0] * 100000 99 | latency_per_second = [0] * 100000 100 | 101 | with open(file_name) as file: 102 | first_line = file.readline() 103 | first_line = first_line.strip() 104 | experiment_start_time = int(first_line) 105 | for line in file: 106 | split_line = line.split(" ") 107 | stripped = [s.strip() for s in split_line] 108 | start_time = int(stripped[0]) 109 | duration = int(stripped[1]) 110 | end_time = (start_time + duration) - experiment_start_time 111 | cache_hits = int(stripped[2]) 112 | errors = int(stripped[3]) 113 | 114 | t_th_second = math.ceil(end_time/ns_in_a_sec) 115 | num_seconds = max( num_seconds , t_th_second) 116 | hit_rates[ t_th_second ] += cache_hits # cache_hits will be either 0 or 1 117 | error_rates[ t_th_second ] += errors # error will be either 0 or 1 118 | job_completions[ t_th_second ]+= 1 # as each entry correspond to a job completion 119 | latency_per_second[t_th_second]+=duration 120 | 121 | hit_rates = hit_rates[0: num_seconds+1] 122 | error_rates = error_rates[0: num_seconds+1] 123 | latency_per_second = latency_per_second[0: num_seconds +1] 124 | time_points = [0] * (num_seconds + 1) 125 | 126 | 127 | 128 | current_directory = os.getcwd() 129 | stats_directory = "result_stats" 130 | final_stats_directory = os.path.join(current_directory, r'result_stats') 131 | 132 | if not os.path.exists(final_stats_directory): 133 | os.makedirs(final_stats_directory) 134 | stats_file_name = "STATS_ARV_RATE_{}_ALPHA_{}_TSZ_{}_DUR_{}.txt".format(arrival_rate, alpha, trigger_size, test_duration) 135 | stats_file_path = os.path.join( final_stats_directory, stats_file_name) 136 | 137 | stats_file = open(stats_file_path, "w") 138 | for k in range(0, num_seconds + 1): 139 | stats_file.write( str(job_completions[k]) + " " + str(hit_rates[k]) + " " + str(error_rates[k]) + "\n") 140 | 141 | 142 | for j in range (0, num_seconds+1): 143 | if(job_completions[j]!= 0): 144 | hit_rates[j]/= job_completions[j] 145 | error_rates[j]/= job_completions[j] 146 | latency_per_second[j]/= job_completions[j] 147 | time_points[j] = j 148 | 149 | # print("max cache hit rate: " + str(max( hit_rates))) 150 | # print("max cache hit rate index : " + str(hit_rates.index(max(hit_rates)))) 151 | # print("job completions : " + str(job_completions[1])) 152 | 153 | hit_rate_points = np.array( hit_rates) 154 | error_rate_points = np.array( error_rates) 155 | time_points = np.array(time_points) 156 | latency_points = np.array(latency_per_second) 157 | today_date_time = datetime.today().strftime('%Y-%m-%d-%H:%M:%S') 158 | H_file_with_image_extension = f"H_IMG_ARV_RATE_{arrival_rate}_ALPHA_{alpha}_TSZ_{trigger_size}_DUR_{test_duration}_{today_date_time}_TO_{kill_timeout_for_php}.png" 159 | L_file_with_image_extension = f"L_IMG_ARV_RATE_{arrival_rate}_ALPHA_{alpha}_TSZ_{trigger_size}_DUR_{test_duration}_{today_date_time}_TO_{kill_timeout_for_php}.png" 160 | E_file_with_image_extension = f"E_IMG_ARV_RATE_{arrival_rate}_ALPHA_{alpha}_TSZ_{trigger_size}_DUR_{test_duration}_{today_date_time}_TO_{kill_timeout_for_php}.png" 161 | C_file_with_image_extension = f"C_IMG_ARV_RATE_{arrival_rate}_ALPHA_{alpha}_TSZ_{trigger_size}_DUR_{test_duration}_{today_date_time}_TO_{kill_timeout_for_php}.png" 162 | plot_data(time_points, hit_rate_points, H_file_with_image_extension) 163 | plot_data(time_points, error_rate_points, E_file_with_image_extension) 164 | plot_data(time_points, latency_points, L_file_with_image_extension) 165 | plot_multiple_data(time_points, latency_points, hit_rate_points, error_rate_points, C_file_with_image_extension) 166 | # store completions cache_hit_rate error_rate 167 | 168 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/curl/curlbuild.h: -------------------------------------------------------------------------------- 1 | /* include/curl/curlbuild.h. Generated from curlbuild.h.in by configure. */ 2 | #ifndef __CURL_CURLBUILD_H 3 | #define __CURL_CURLBUILD_H 4 | /*************************************************************************** 5 | * _ _ ____ _ 6 | * Project ___| | | | _ \| | 7 | * / __| | | | |_) | | 8 | * | (__| |_| | _ <| |___ 9 | * \___|\___/|_| \_\_____| 10 | * 11 | * Copyright (C) 1998 - 2012, Daniel Stenberg, , et al. 12 | * 13 | * This software is licensed as described in the file COPYING, which 14 | * you should have received as part of this distribution. The terms 15 | * are also available at http://curl.haxx.se/docs/copyright.html. 16 | * 17 | * You may opt to use, copy, modify, merge, publish, distribute and/or sell 18 | * copies of the Software, and permit persons to whom the Software is 19 | * furnished to do so, under the terms of the COPYING file. 20 | * 21 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 22 | * KIND, either express or implied. 23 | * 24 | ***************************************************************************/ 25 | 26 | /* ================================================================ */ 27 | /* NOTES FOR CONFIGURE CAPABLE SYSTEMS */ 28 | /* ================================================================ */ 29 | 30 | /* 31 | * NOTE 1: 32 | * ------- 33 | * 34 | * Nothing in this file is intended to be modified or adjusted by the 35 | * curl library user nor by the curl library builder. 36 | * 37 | * If you think that something actually needs to be changed, adjusted 38 | * or fixed in this file, then, report it on the libcurl development 39 | * mailing list: http://cool.haxx.se/mailman/listinfo/curl-library/ 40 | * 41 | * This header file shall only export symbols which are 'curl' or 'CURL' 42 | * prefixed, otherwise public name space would be polluted. 43 | * 44 | * NOTE 2: 45 | * ------- 46 | * 47 | * Right now you might be staring at file include/curl/curlbuild.h.in or 48 | * at file include/curl/curlbuild.h, this is due to the following reason: 49 | * 50 | * On systems capable of running the configure script, the configure process 51 | * will overwrite the distributed include/curl/curlbuild.h file with one that 52 | * is suitable and specific to the library being configured and built, which 53 | * is generated from the include/curl/curlbuild.h.in template file. 54 | * 55 | */ 56 | 57 | /* ================================================================ */ 58 | /* DEFINITION OF THESE SYMBOLS SHALL NOT TAKE PLACE ANYWHERE ELSE */ 59 | /* ================================================================ */ 60 | 61 | #ifdef CURL_SIZEOF_LONG 62 | #error "CURL_SIZEOF_LONG shall not be defined except in curlbuild.h" 63 | Error Compilation_aborted_CURL_SIZEOF_LONG_already_defined 64 | #endif 65 | 66 | #ifdef CURL_TYPEOF_CURL_SOCKLEN_T 67 | #error "CURL_TYPEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h" 68 | Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_already_defined 69 | #endif 70 | 71 | #ifdef CURL_SIZEOF_CURL_SOCKLEN_T 72 | #error "CURL_SIZEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h" 73 | Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_already_defined 74 | #endif 75 | 76 | #ifdef CURL_TYPEOF_CURL_OFF_T 77 | #error "CURL_TYPEOF_CURL_OFF_T shall not be defined except in curlbuild.h" 78 | Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_already_defined 79 | #endif 80 | 81 | #ifdef CURL_FORMAT_CURL_OFF_T 82 | #error "CURL_FORMAT_CURL_OFF_T shall not be defined except in curlbuild.h" 83 | Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_already_defined 84 | #endif 85 | 86 | #ifdef CURL_FORMAT_CURL_OFF_TU 87 | #error "CURL_FORMAT_CURL_OFF_TU shall not be defined except in curlbuild.h" 88 | Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_already_defined 89 | #endif 90 | 91 | #ifdef CURL_FORMAT_OFF_T 92 | #error "CURL_FORMAT_OFF_T shall not be defined except in curlbuild.h" 93 | Error Compilation_aborted_CURL_FORMAT_OFF_T_already_defined 94 | #endif 95 | 96 | #ifdef CURL_SIZEOF_CURL_OFF_T 97 | #error "CURL_SIZEOF_CURL_OFF_T shall not be defined except in curlbuild.h" 98 | Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_already_defined 99 | #endif 100 | 101 | #ifdef CURL_SUFFIX_CURL_OFF_T 102 | #error "CURL_SUFFIX_CURL_OFF_T shall not be defined except in curlbuild.h" 103 | Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_already_defined 104 | #endif 105 | 106 | #ifdef CURL_SUFFIX_CURL_OFF_TU 107 | #error "CURL_SUFFIX_CURL_OFF_TU shall not be defined except in curlbuild.h" 108 | Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_already_defined 109 | #endif 110 | 111 | /* ================================================================ */ 112 | /* EXTERNAL INTERFACE SETTINGS FOR CONFIGURE CAPABLE SYSTEMS ONLY */ 113 | /* ================================================================ */ 114 | 115 | /* Configure process defines this to 1 when it finds out that system */ 116 | /* header file ws2tcpip.h must be included by the external interface. */ 117 | /* #undef CURL_PULL_WS2TCPIP_H */ 118 | #ifdef CURL_PULL_WS2TCPIP_H 119 | # ifndef WIN32_LEAN_AND_MEAN 120 | # define WIN32_LEAN_AND_MEAN 121 | # endif 122 | # include 123 | # include 124 | # include 125 | #endif 126 | 127 | /* Configure process defines this to 1 when it finds out that system */ 128 | /* header file sys/types.h must be included by the external interface. */ 129 | #define CURL_PULL_SYS_TYPES_H 1 130 | #ifdef CURL_PULL_SYS_TYPES_H 131 | # include 132 | #endif 133 | 134 | /* Configure process defines this to 1 when it finds out that system */ 135 | /* header file stdint.h must be included by the external interface. */ 136 | /* #undef CURL_PULL_STDINT_H */ 137 | #ifdef CURL_PULL_STDINT_H 138 | # include 139 | #endif 140 | 141 | /* Configure process defines this to 1 when it finds out that system */ 142 | /* header file inttypes.h must be included by the external interface. */ 143 | /* #undef CURL_PULL_INTTYPES_H */ 144 | #ifdef CURL_PULL_INTTYPES_H 145 | # include 146 | #endif 147 | 148 | /* Configure process defines this to 1 when it finds out that system */ 149 | /* header file sys/socket.h must be included by the external interface. */ 150 | #define CURL_PULL_SYS_SOCKET_H 1 151 | #ifdef CURL_PULL_SYS_SOCKET_H 152 | # include 153 | #endif 154 | 155 | /* Configure process defines this to 1 when it finds out that system */ 156 | /* header file sys/poll.h must be included by the external interface. */ 157 | /* #undef CURL_PULL_SYS_POLL_H */ 158 | #ifdef CURL_PULL_SYS_POLL_H 159 | # include 160 | #endif 161 | 162 | /* The size of `long', as computed by sizeof. */ 163 | #define CURL_SIZEOF_LONG 8 164 | 165 | /* Integral data type used for curl_socklen_t. */ 166 | #define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t 167 | 168 | /* The size of `curl_socklen_t', as computed by sizeof. */ 169 | #define CURL_SIZEOF_CURL_SOCKLEN_T 4 170 | 171 | /* Data type definition of curl_socklen_t. */ 172 | typedef CURL_TYPEOF_CURL_SOCKLEN_T curl_socklen_t; 173 | 174 | /* Signed integral data type used for curl_off_t. */ 175 | #define CURL_TYPEOF_CURL_OFF_T long 176 | 177 | /* Data type definition of curl_off_t. */ 178 | typedef CURL_TYPEOF_CURL_OFF_T curl_off_t; 179 | 180 | /* curl_off_t formatting string directive without "%" conversion specifier. */ 181 | #define CURL_FORMAT_CURL_OFF_T "ld" 182 | 183 | /* unsigned curl_off_t formatting string without "%" conversion specifier. */ 184 | #define CURL_FORMAT_CURL_OFF_TU "lu" 185 | 186 | /* curl_off_t formatting string directive with "%" conversion specifier. */ 187 | #define CURL_FORMAT_OFF_T "%ld" 188 | 189 | /* The size of `curl_off_t', as computed by sizeof. */ 190 | #define CURL_SIZEOF_CURL_OFF_T 8 191 | 192 | /* curl_off_t constant suffix. */ 193 | #define CURL_SUFFIX_CURL_OFF_T L 194 | 195 | /* unsigned curl_off_t constant suffix. */ 196 | #define CURL_SUFFIX_CURL_OFF_TU UL 197 | 198 | #endif /* __CURL_CURLBUILD_H */ 199 | -------------------------------------------------------------------------------- /GC_Metastability/analyze.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import pandas as pd 3 | import sys 4 | import pdb 5 | import math 6 | from enum import Enum 7 | 8 | TIME_WINDOW = 1000 #in ms 9 | 10 | #event types 11 | class E(Enum): 12 | arrival = 1 13 | start = 2 14 | completion = 3 15 | 16 | def analyze_job_table(job_tbl_path: str)->pd.DataFrame: 17 | job_df = pd.read_csv(job_tbl_path) 18 | 19 | job_stats = {'timestamp':[], 'qlen':[], 'latency':[]} 20 | 21 | #event driven programming: calculating qlen at each timestamp 22 | arrival_df = job_df[['arrival_uptime']].copy() 23 | arrival_df['event_type'] = E.arrival.value 24 | arrival_df.rename(columns={'arrival_uptime': 't'}, inplace = True) 25 | 26 | completion_df = job_df[['completion_uptime']].copy() 27 | completion_df['event_type'] = E.completion.value 28 | completion_df.rename(columns={'completion_uptime': 't'}, inplace = True) 29 | 30 | events_df = pd.concat([arrival_df, completion_df]) 31 | events_df.sort_values(by=['t'], inplace = True) 32 | events_df = events_df[events_df['t'] > 0] 33 | 34 | rps_stats = {'timestamp':[], 'rps':[]} 35 | #calculate arrival_rate 36 | arrival_uptime_df = job_df[['arrival_uptime']].copy() 37 | arrival_uptime_sorted = job_df.sort_values(by=['arrival_uptime']) 38 | 39 | curr_ts = 0 40 | accu_num_reqs = 0 41 | for index, row in arrival_uptime_df.iterrows(): 42 | if curr_ts < math.floor(row['arrival_uptime']/TIME_WINDOW): 43 | rps_stats['timestamp'].append(curr_ts) 44 | rps_stats['rps'].append(accu_num_reqs) 45 | accu_num_reqs = 0 46 | curr_ts = math.floor(row['arrival_uptime']/TIME_WINDOW) 47 | accu_num_reqs += 1 48 | 49 | #calculate qlen using arrival_uptime and completion_uptime 50 | curr_ts = 0 51 | curr_qlen = 0 52 | for index, row in events_df.iterrows(): 53 | curr_ts = math.floor(row['t']/TIME_WINDOW) 54 | if curr_ts not in job_stats['timestamp']: 55 | job_stats['timestamp'].append(curr_ts) 56 | job_stats['qlen'].append(curr_qlen) 57 | if row['event_type'] == E.arrival.value: 58 | curr_qlen += 1 59 | elif row['event_type'] == E.completion.value: 60 | curr_qlen -= 1 61 | else: 62 | print(f"Error! Unknown event type: {row['event_type']}") 63 | 64 | #calculate latency 65 | job_df_sorted_by_completion = job_df.sort_values(by=['completion_uptime']) 66 | curr_ts = 0 67 | completed_job_counts = 0 68 | accumulated_latency = 0 69 | for index, row in job_df_sorted_by_completion.iterrows(): 70 | if curr_ts < math.floor(row['completion_uptime']/TIME_WINDOW): 71 | job_stats['latency'].append(accumulated_latency/completed_job_counts if completed_job_counts>0 else 0) 72 | accumulated_latency = 0 73 | completed_job_counts = 0 74 | curr_ts = math.floor(row['completion_uptime']/TIME_WINDOW) 75 | assert curr_ts in job_stats['timestamp'], f'Error! curr_ts: {curr_ts} not in job_stats' 76 | if row['completion_t'] > 0: 77 | completed_job_counts += 1 78 | accumulated_latency += (row['completion_t'] - row['arrival_t']) 79 | 80 | min_len = min(len(job_stats['timestamp']), len(job_stats['qlen']), len(job_stats['latency'])) 81 | job_stats['timestamp'] = job_stats['timestamp'][:min_len] 82 | job_stats['qlen'] = job_stats['qlen'][:min_len] 83 | job_stats['latency'] = job_stats['latency'][:min_len] 84 | 85 | job_stats_df = pd.DataFrame(job_stats) 86 | job_stats_df['latency'] = (job_stats_df['latency']/1e6).round(3) #in ms 87 | job_stats_df.rename(columns={'latency': 'latency_ms'}, inplace = True) 88 | 89 | rps_stats_df = pd.DataFrame(rps_stats) 90 | merged_df = job_stats_df.merge(rps_stats_df, on='timestamp', how='outer') 91 | return merged_df 92 | 93 | def analyze_gcutil_table(gcutil_tbl_path: str)->pd.DataFrame: 94 | df = pd.read_csv(gcutil_tbl_path, delim_whitespace=True) 95 | gc_stats ={'timestamp':[], 'S0':[], 'S1':[], 'E':[], 'O':[], 'M':[], 'YGC':[], 'YGCT':[], 'FGC':[], 'FGCT':[], 'GCT':[]} 96 | last_YGC = 0 97 | last_YGCT = 0 98 | last_FGC = 0 99 | last_FGCT = 0 100 | last_GCT = 0 101 | for index, row in df.iterrows(): 102 | if row['Timestamp'] == int(row['Timestamp']): #TODO: deal with possible cases of missing data 103 | gc_stats['timestamp'].append(row['Timestamp']) 104 | gc_stats['S0'].append(row['S0']) 105 | gc_stats['S1'].append(row['S1']) 106 | gc_stats['E'].append(row['E']) 107 | gc_stats['O'].append(row['O']) 108 | gc_stats['M'].append(row['M']) 109 | gc_stats['YGC'].append(row['YGC'] - last_YGC) 110 | last_YGC = row['YGC'] 111 | gc_stats['YGCT'].append(row['YGCT'] - last_YGCT) 112 | last_YGCT = row['YGCT'] 113 | gc_stats['FGC'].append(row['FGC'] - last_FGC) 114 | last_FGC = row['FGC'] 115 | gc_stats['FGCT'].append(row['FGCT'] - last_FGCT) 116 | last_FGCT = row['FGCT'] 117 | gc_stats['GCT'].append(row['GCT'] - last_GCT) 118 | last_GCT = row['GCT'] 119 | 120 | gc_stats_df = pd.DataFrame(gc_stats) 121 | gc_stats_df['timestamp'] = gc_stats_df['timestamp'].astype('int64') 122 | gc_stats_df['YGCT'] = (1000*gc_stats_df['YGCT']).round(1) #in ms 123 | gc_stats_df['FGCT'] = (1000*gc_stats_df['FGCT']).round(1) #in ms 124 | gc_stats_df['GCT'] = (1000*gc_stats_df['GCT']).round(1) #in ms 125 | 126 | return gc_stats_df 127 | 128 | def main(): 129 | job_tbl_path = '' 130 | gc_tbl_path = '' 131 | 132 | if len(sys.argv) != 3: 133 | print(f"Please give a job table and a GC table:") 134 | sys.exit(0) 135 | else: 136 | job_tbl_path = sys.argv[1] 137 | gc_tbl_path = sys.argv[2] 138 | 139 | job_df = analyze_job_table(job_tbl_path) 140 | gc_df = analyze_gcutil_table(gc_tbl_path) 141 | measurement_df = pd.merge(job_df, gc_df, how='right') 142 | 143 | measurement_df.to_csv('measurement.csv', index=False) 144 | 145 | if __name__=="__main__": 146 | main() 147 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/curl/curlrules.h: -------------------------------------------------------------------------------- 1 | #ifndef __CURL_CURLRULES_H 2 | #define __CURL_CURLRULES_H 3 | /*************************************************************************** 4 | * _ _ ____ _ 5 | * Project ___| | | | _ \| | 6 | * / __| | | | |_) | | 7 | * | (__| |_| | _ <| |___ 8 | * \___|\___/|_| \_\_____| 9 | * 10 | * Copyright (C) 1998 - 2012, Daniel Stenberg, , et al. 11 | * 12 | * This software is licensed as described in the file COPYING, which 13 | * you should have received as part of this distribution. The terms 14 | * are also available at http://curl.haxx.se/docs/copyright.html. 15 | * 16 | * You may opt to use, copy, modify, merge, publish, distribute and/or sell 17 | * copies of the Software, and permit persons to whom the Software is 18 | * furnished to do so, under the terms of the COPYING file. 19 | * 20 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 21 | * KIND, either express or implied. 22 | * 23 | ***************************************************************************/ 24 | 25 | /* ================================================================ */ 26 | /* COMPILE TIME SANITY CHECKS */ 27 | /* ================================================================ */ 28 | 29 | /* 30 | * NOTE 1: 31 | * ------- 32 | * 33 | * All checks done in this file are intentionally placed in a public 34 | * header file which is pulled by curl/curl.h when an application is 35 | * being built using an already built libcurl library. Additionally 36 | * this file is also included and used when building the library. 37 | * 38 | * If compilation fails on this file it is certainly sure that the 39 | * problem is elsewhere. It could be a problem in the curlbuild.h 40 | * header file, or simply that you are using different compilation 41 | * settings than those used to build the library. 42 | * 43 | * Nothing in this file is intended to be modified or adjusted by the 44 | * curl library user nor by the curl library builder. 45 | * 46 | * Do not deactivate any check, these are done to make sure that the 47 | * library is properly built and used. 48 | * 49 | * You can find further help on the libcurl development mailing list: 50 | * http://cool.haxx.se/mailman/listinfo/curl-library/ 51 | * 52 | * NOTE 2 53 | * ------ 54 | * 55 | * Some of the following compile time checks are based on the fact 56 | * that the dimension of a constant array can not be a negative one. 57 | * In this way if the compile time verification fails, the compilation 58 | * will fail issuing an error. The error description wording is compiler 59 | * dependent but it will be quite similar to one of the following: 60 | * 61 | * "negative subscript or subscript is too large" 62 | * "array must have at least one element" 63 | * "-1 is an illegal array size" 64 | * "size of array is negative" 65 | * 66 | * If you are building an application which tries to use an already 67 | * built libcurl library and you are getting this kind of errors on 68 | * this file, it is a clear indication that there is a mismatch between 69 | * how the library was built and how you are trying to use it for your 70 | * application. Your already compiled or binary library provider is the 71 | * only one who can give you the details you need to properly use it. 72 | */ 73 | 74 | /* 75 | * Verify that some macros are actually defined. 76 | */ 77 | 78 | #ifndef CURL_SIZEOF_LONG 79 | # error "CURL_SIZEOF_LONG definition is missing!" 80 | Error Compilation_aborted_CURL_SIZEOF_LONG_is_missing 81 | #endif 82 | 83 | #ifndef CURL_TYPEOF_CURL_SOCKLEN_T 84 | # error "CURL_TYPEOF_CURL_SOCKLEN_T definition is missing!" 85 | Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_is_missing 86 | #endif 87 | 88 | #ifndef CURL_SIZEOF_CURL_SOCKLEN_T 89 | # error "CURL_SIZEOF_CURL_SOCKLEN_T definition is missing!" 90 | Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_is_missing 91 | #endif 92 | 93 | #ifndef CURL_TYPEOF_CURL_OFF_T 94 | # error "CURL_TYPEOF_CURL_OFF_T definition is missing!" 95 | Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_is_missing 96 | #endif 97 | 98 | #ifndef CURL_FORMAT_CURL_OFF_T 99 | # error "CURL_FORMAT_CURL_OFF_T definition is missing!" 100 | Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_is_missing 101 | #endif 102 | 103 | #ifndef CURL_FORMAT_CURL_OFF_TU 104 | # error "CURL_FORMAT_CURL_OFF_TU definition is missing!" 105 | Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_is_missing 106 | #endif 107 | 108 | #ifndef CURL_FORMAT_OFF_T 109 | # error "CURL_FORMAT_OFF_T definition is missing!" 110 | Error Compilation_aborted_CURL_FORMAT_OFF_T_is_missing 111 | #endif 112 | 113 | #ifndef CURL_SIZEOF_CURL_OFF_T 114 | # error "CURL_SIZEOF_CURL_OFF_T definition is missing!" 115 | Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_is_missing 116 | #endif 117 | 118 | #ifndef CURL_SUFFIX_CURL_OFF_T 119 | # error "CURL_SUFFIX_CURL_OFF_T definition is missing!" 120 | Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_is_missing 121 | #endif 122 | 123 | #ifndef CURL_SUFFIX_CURL_OFF_TU 124 | # error "CURL_SUFFIX_CURL_OFF_TU definition is missing!" 125 | Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_is_missing 126 | #endif 127 | 128 | /* 129 | * Macros private to this header file. 130 | */ 131 | 132 | #define CurlchkszEQ(t, s) sizeof(t) == s ? 1 : -1 133 | 134 | #define CurlchkszGE(t1, t2) sizeof(t1) >= sizeof(t2) ? 1 : -1 135 | 136 | /* 137 | * Verify that the size previously defined and expected for long 138 | * is the same as the one reported by sizeof() at compile time. 139 | */ 140 | 141 | typedef char 142 | __curl_rule_01__ 143 | [CurlchkszEQ(long, CURL_SIZEOF_LONG)]; 144 | 145 | /* 146 | * Verify that the size previously defined and expected for 147 | * curl_off_t is actually the the same as the one reported 148 | * by sizeof() at compile time. 149 | */ 150 | 151 | typedef char 152 | __curl_rule_02__ 153 | [CurlchkszEQ(curl_off_t, CURL_SIZEOF_CURL_OFF_T)]; 154 | 155 | /* 156 | * Verify at compile time that the size of curl_off_t as reported 157 | * by sizeof() is greater or equal than the one reported for long 158 | * for the current compilation. 159 | */ 160 | 161 | typedef char 162 | __curl_rule_03__ 163 | [CurlchkszGE(curl_off_t, long)]; 164 | 165 | /* 166 | * Verify that the size previously defined and expected for 167 | * curl_socklen_t is actually the the same as the one reported 168 | * by sizeof() at compile time. 169 | */ 170 | 171 | typedef char 172 | __curl_rule_04__ 173 | [CurlchkszEQ(curl_socklen_t, CURL_SIZEOF_CURL_SOCKLEN_T)]; 174 | 175 | /* 176 | * Verify at compile time that the size of curl_socklen_t as reported 177 | * by sizeof() is greater or equal than the one reported for int for 178 | * the current compilation. 179 | */ 180 | 181 | typedef char 182 | __curl_rule_05__ 183 | [CurlchkszGE(curl_socklen_t, int)]; 184 | 185 | /* ================================================================ */ 186 | /* EXTERNALLY AND INTERNALLY VISIBLE DEFINITIONS */ 187 | /* ================================================================ */ 188 | 189 | /* 190 | * CURL_ISOCPP and CURL_OFF_T_C definitions are done here in order to allow 191 | * these to be visible and exported by the external libcurl interface API, 192 | * while also making them visible to the library internals, simply including 193 | * curl_setup.h, without actually needing to include curl.h internally. 194 | * If some day this section would grow big enough, all this should be moved 195 | * to its own header file. 196 | */ 197 | 198 | /* 199 | * Figure out if we can use the ## preprocessor operator, which is supported 200 | * by ISO/ANSI C and C++. Some compilers support it without setting __STDC__ 201 | * or __cplusplus so we need to carefully check for them too. 202 | */ 203 | 204 | #if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) || \ 205 | defined(__HP_aCC) || defined(__BORLANDC__) || defined(__LCC__) || \ 206 | defined(__POCC__) || defined(__SALFORDC__) || defined(__HIGHC__) || \ 207 | defined(__ILEC400__) 208 | /* This compiler is believed to have an ISO compatible preprocessor */ 209 | #define CURL_ISOCPP 210 | #else 211 | /* This compiler is believed NOT to have an ISO compatible preprocessor */ 212 | #undef CURL_ISOCPP 213 | #endif 214 | 215 | /* 216 | * Macros for minimum-width signed and unsigned curl_off_t integer constants. 217 | */ 218 | 219 | #if defined(__BORLANDC__) && (__BORLANDC__ == 0x0551) 220 | # define __CURL_OFF_T_C_HLPR2(x) x 221 | # define __CURL_OFF_T_C_HLPR1(x) __CURL_OFF_T_C_HLPR2(x) 222 | # define CURL_OFF_T_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \ 223 | __CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_T) 224 | # define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \ 225 | __CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_TU) 226 | #else 227 | # ifdef CURL_ISOCPP 228 | # define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val ## Suffix 229 | # else 230 | # define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val/**/Suffix 231 | # endif 232 | # define __CURL_OFF_T_C_HLPR1(Val,Suffix) __CURL_OFF_T_C_HLPR2(Val,Suffix) 233 | # define CURL_OFF_T_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_T) 234 | # define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_TU) 235 | #endif 236 | 237 | /* 238 | * Get rid of macros private to this header file. 239 | */ 240 | 241 | #undef CurlchkszEQ 242 | #undef CurlchkszGE 243 | 244 | /* 245 | * Get rid of macros not intended to exist beyond this point. 246 | */ 247 | 248 | #undef CURL_PULL_WS2TCPIP_H 249 | #undef CURL_PULL_SYS_TYPES_H 250 | #undef CURL_PULL_SYS_SOCKET_H 251 | #undef CURL_PULL_SYS_POLL_H 252 | #undef CURL_PULL_STDINT_H 253 | #undef CURL_PULL_INTTYPES_H 254 | 255 | #undef CURL_TYPEOF_CURL_SOCKLEN_T 256 | #undef CURL_TYPEOF_CURL_OFF_T 257 | 258 | #ifdef CURL_NO_OLDIES 259 | #undef CURL_FORMAT_OFF_T /* not required since 7.19.0 - obsoleted in 7.20.0 */ 260 | #endif 261 | 262 | #endif /* __CURL_CURLRULES_H */ 263 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [2022] [Lexiang Huang, Matthew Magnusson, Salman Estyak] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/TraceReplay.cpp: -------------------------------------------------------------------------------- 1 | // TraceReplay.cpp - Trace replay client code. 2 | // 3 | // Copyright (c) 2018 Timothy Zhu. 4 | // Licensed under the MIT License. See LICENSE file for details. 5 | // 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include "curl/curl.h" 16 | #include "TraceReader.hpp" 17 | #include "time.hpp" 18 | #include 19 | 20 | using namespace std; 21 | string ngnix_server_ip = "172.31.14.78"; 22 | string baseUrl = "http://"+ngnix_server_ip+ "/index.php"; 23 | 24 | BaseTraceReader* traceReader = NULL; 25 | bool openLoop = true; 26 | string resultsFilename = "results"; 27 | int numThreads = 8; 28 | uint64_t traceStartTime; 29 | 30 | int total_hits = 0; 31 | int num_error_responses = 0; 32 | int killed_at_trace_replay = 0; 33 | int dropped_requests = 0; 34 | int error_responses[1000]; 35 | int total_database_hits = 0; 36 | 37 | 38 | pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; 39 | 40 | struct request_info{ 41 | uint64_t request_start_time; 42 | uint64_t response_time; 43 | uint64_t hit; 44 | uint64_t error; 45 | }; 46 | 47 | vector requestInfos; 48 | 49 | size_t WriteFunction(void *ptr, size_t size, size_t nmemb, std::string *s) 50 | { 51 | s->append(static_cast(ptr), size*nmemb); 52 | return size*nmemb; 53 | } 54 | 55 | class TraceReplay { 56 | public: 57 | virtual ~TraceReplay() {} 58 | virtual int Replay(const TraceEntry& entry) = 0; 59 | }; 60 | 61 | class WebTraceReplay : public TraceReplay { 62 | private: 63 | CURL* curl; 64 | 65 | public: 66 | WebTraceReplay() { 67 | curl = curl_easy_init(); 68 | curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); // Fix curl bug with signals 69 | curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteFunction); 70 | //curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); 71 | } 72 | virtual ~WebTraceReplay() { 73 | curl_easy_cleanup(curl); 74 | } 75 | 76 | 77 | void AddParam(string& url, const TraceEntry& entry){ 78 | url = url + "?"; 79 | url = url + "request_type=" + entry.requestType 80 | + "&request_size=" + to_string(entry.requestSize) 81 | + "&arrival_time="+ to_string(entry.arrivalTime) + 82 | + "&retry_policy=" + to_string(0) 83 | + "&request_index=" + to_string(entry.index); 84 | 85 | } 86 | 87 | void process_return_code(int return_code){ 88 | 89 | pthread_mutex_lock(&mutex); 90 | 91 | if(return_code == 1){ 92 | total_hits++; 93 | } 94 | else if(return_code < 0){ 95 | error_responses[ -1 * return_code]++; 96 | num_error_responses++; 97 | } 98 | else if(return_code == 2){ 99 | total_database_hits++; 100 | } 101 | else{ 102 | error_responses[ return_code]++; 103 | } 104 | 105 | 106 | pthread_mutex_unlock(&mutex); 107 | 108 | /* 109 | switch (return_code) 110 | { 111 | case 1: 112 | total_hits++; 113 | break; 114 | default: 115 | break; 116 | } 117 | */ 118 | } 119 | 120 | virtual int Replay(const TraceEntry& entry) { 121 | // Set URL 122 | string response; 123 | string url = baseUrl ; //+ entry.requestType; 124 | AddParam(url, entry); //to be refactored 125 | //cout<<"request: "<< url<>return_code; 142 | 143 | if(response.size() > 4){ 144 | return_code = -99; 145 | } 146 | else if(response.size() == 0){ 147 | return_code = -98; 148 | } 149 | // else{ 150 | // cout<<"response size: " << response.size()<nextEntry(entry)) { 171 | // Open loop sleeps until arrival time 172 | // Closed loop just executes requests as soon as possible 173 | uint64_t startTime; 174 | 175 | if (openLoop) { 176 | uint64_t arrivalTime = entry.arrivalTime + traceStartTime; 177 | 178 | AbsoluteSleepUninterruptible(arrivalTime); 179 | startTime = arrivalTime; 180 | // get current time 181 | uint64_t currentTime = GetTime(); 182 | 183 | 184 | int difference_in_second = (currentTime - startTime)/ (NS_PER_SEC); 185 | 186 | if(difference_in_second >= 1){ 187 | 188 | pthread_mutex_lock(&mutex); 189 | request_info req_info; 190 | req_info.response_time = currentTime - startTime; 191 | req_info.request_start_time = startTime; 192 | req_info.hit = 0; 193 | req_info.error = 1; 194 | killed_at_trace_replay++; 195 | requestInfos.push_back(req_info); 196 | pthread_mutex_unlock(&mutex); 197 | 198 | continue; 199 | } 200 | 201 | 202 | } 203 | else { 204 | startTime = GetTime(); 205 | } 206 | 207 | 208 | // Replay request 209 | int64_t return_code = replay->Replay(entry); 210 | // Record response time 211 | uint64_t endTime = GetTime(); 212 | uint64_t responseTime = endTime - startTime; 213 | pthread_mutex_lock(&mutex); 214 | request_info req_info; 215 | req_info.response_time = responseTime; 216 | req_info.request_start_time = startTime; 217 | req_info.hit = return_code == 1 ? 1 : 0; 218 | req_info.error = return_code < 0 ? 1 : 0; 219 | 220 | requestInfos.push_back(req_info); 221 | pthread_mutex_unlock(&mutex); 222 | } 223 | delete replay; 224 | return NULL; 225 | } 226 | 227 | void usage(char* argv0) 228 | { 229 | cerr << "Usage: " << argv0 << " -t traceFile [-r resultsFilename] [-u baseUrl] [-n numThreads] [-o] [-c]" << endl; 230 | exit(-1); 231 | } 232 | 233 | int main(int argc, char** argv) 234 | { 235 | // Process command line options 236 | 237 | /* 238 | le: Loadshedding enabled l 239 | dst: Drop Session Start Time Offset d (relative to TraceReplay start time) 240 | tdp: Total Drop Period e 241 | dwl: Drop Window Length f 242 | dp: Drop Probability g 243 | tlp: Total Loadshedding period (dictates how windows will be spread ) h 244 | */ 245 | int opt = 0; 246 | string filename; 247 | do { 248 | opt = getopt(argc, argv, "t:r:u:n:oc"); 249 | switch (opt) { 250 | case 't': 251 | filename.assign(optarg); 252 | break; 253 | case 'r': 254 | resultsFilename.assign(optarg); 255 | break; 256 | 257 | case 'u': 258 | baseUrl.assign(optarg); 259 | break; 260 | 261 | case 'n': 262 | numThreads = atoi(optarg); 263 | break; 264 | 265 | case 'o': 266 | openLoop = true; 267 | break; 268 | 269 | case 'c': 270 | openLoop = false; 271 | break; 272 | 273 | case -1: 274 | break; 275 | 276 | default: 277 | usage(argv[0]); 278 | break; 279 | } 280 | } while (opt != -1); 281 | 282 | // Create trace and check arguments 283 | traceReader = new TraceReader(filename); 284 | if ((traceReader == NULL) || (numThreads < 1)) { 285 | usage(argv[0]); 286 | } 287 | 288 | // Loadshedding related calculations 289 | cout<<"Experiment: IsOpen = " << openLoop << endl; 290 | 291 | // Allow 5 seconds for warm up and initialization 292 | traceStartTime = GetTime() + ConvertSecondsToTime(5); 293 | 294 | // Create worker threads 295 | pthread_attr_t attr; 296 | pthread_attr_init(&attr); 297 | pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); 298 | pthread_t* threadArray = new pthread_t[numThreads]; 299 | if (threadArray == NULL) { 300 | cerr << "Failed to create thread array" << endl; 301 | exit(1); 302 | } 303 | for (int i = 0; i < numThreads; i++) { 304 | int rc = pthread_create(&threadArray[i], 305 | &attr, 306 | WorkerThread, 307 | NULL); 308 | if (rc) { 309 | cerr << "Error creating thread: " << rc << " errno: " << errno << endl; 310 | exit(-1); 311 | } 312 | } 313 | 314 | // Join all threads 315 | for (int i = 0; i < numThreads; i++) { 316 | int rc = pthread_join(threadArray[i], NULL); 317 | if (rc) { 318 | cerr << "Error joining thread: " << rc << " errno: " << errno << endl; 319 | exit(-1); 320 | } 321 | } 322 | uint64_t end_time = GetTime(); 323 | uint64_t total_latency = 0; 324 | double experiment_runtime = ((end_time - traceStartTime) * 1.0) /NS_PER_SEC; 325 | // Output results 326 | pthread_mutex_lock(&mutex); 327 | ofstream resultsFile(resultsFilename.c_str()); 328 | // stores differential hits, error responses within each (end - start) times 329 | int recalc_total_error = 0; 330 | resultsFile< 3 | 4 | 9 | 10 | 11 | Apache2 Ubuntu Default Page: It works 12 | 192 | 193 | 194 |
195 | 201 | 219 |
220 | 221 | 222 |
223 |
224 | It works! 225 |
226 |
227 |

228 | This is the default welcome page used to test the correct 229 | operation of the Apache2 server after installation on Ubuntu systems. 230 | It is based on the equivalent page on Debian, from which the Ubuntu Apache 231 | packaging is derived. 232 | If you can read this page, it means that the Apache HTTP server installed at 233 | this site is working properly. You should replace this file (located at 234 | /var/www/html/index.html) before continuing to operate your HTTP server. 235 |

236 | 237 | 238 |

239 | If you are a normal user of this web site and don't know what this page is 240 | about, this probably means that the site is currently unavailable due to 241 | maintenance. 242 | If the problem persists, please contact the site's administrator. 243 |

244 | 245 |
246 |
247 |
248 | Configuration Overview 249 |
250 |
251 |

252 | Ubuntu's Apache2 default configuration is different from the 253 | upstream default configuration, and split into several files optimized for 254 | interaction with Ubuntu tools. The configuration system is 255 | fully documented in 256 | /usr/share/doc/apache2/README.Debian.gz. Refer to this for the full 257 | documentation. Documentation for the web server itself can be 258 | found by accessing the manual if the apache2-doc 259 | package was installed on this server. 260 | 261 |

262 |

263 | The configuration layout for an Apache2 web server installation on Ubuntu systems is as follows: 264 |

265 |
266 | /etc/apache2/
267 | |-- apache2.conf
268 | |       `--  ports.conf
269 | |-- mods-enabled
270 | |       |-- *.load
271 | |       `-- *.conf
272 | |-- conf-enabled
273 | |       `-- *.conf
274 | |-- sites-enabled
275 | |       `-- *.conf
276 |           
277 |
    278 |
  • 279 | apache2.conf is the main configuration 280 | file. It puts the pieces together by including all remaining configuration 281 | files when starting up the web server. 282 |
  • 283 | 284 |
  • 285 | ports.conf is always included from the 286 | main configuration file. It is used to determine the listening ports for 287 | incoming connections, and this file can be customized anytime. 288 |
  • 289 | 290 |
  • 291 | Configuration files in the mods-enabled/, 292 | conf-enabled/ and sites-enabled/ directories contain 293 | particular configuration snippets which manage modules, global configuration 294 | fragments, or virtual host configurations, respectively. 295 |
  • 296 | 297 |
  • 298 | They are activated by symlinking available 299 | configuration files from their respective 300 | *-available/ counterparts. These should be managed 301 | by using our helpers 302 | 303 | a2enmod, 304 | a2dismod, 305 | 306 | 307 | a2ensite, 308 | a2dissite, 309 | 310 | and 311 | 312 | a2enconf, 313 | a2disconf 314 | . See their respective man pages for detailed information. 315 |
  • 316 | 317 |
  • 318 | The binary is called apache2. Due to the use of 319 | environment variables, in the default configuration, apache2 needs to be 320 | started/stopped with /etc/init.d/apache2 or apache2ctl. 321 | Calling /usr/bin/apache2 directly will not work with the 322 | default configuration. 323 |
  • 324 |
325 |
326 | 327 |
328 |
329 | Document Roots 330 |
331 | 332 |
333 |

334 | By default, Ubuntu does not allow access through the web browser to 335 | any file apart of those located in /var/www, 336 | public_html 337 | directories (when enabled) and /usr/share (for web 338 | applications). If your site is using a web document root 339 | located elsewhere (such as in /srv) you may need to whitelist your 340 | document root directory in /etc/apache2/apache2.conf. 341 |

342 |

343 | The default Ubuntu document root is /var/www/html. You 344 | can make your own virtual hosts under /var/www. This is different 345 | to previous releases which provides better security out of the box. 346 |

347 |
348 | 349 |
350 |
351 | Reporting Problems 352 |
353 |
354 |

355 | Please use the ubuntu-bug tool to report bugs in the 356 | Apache2 package with Ubuntu. However, check existing bug reports before reporting a new bug. 359 |

360 |

361 | Please report bugs specific to modules (such as PHP and others) 362 | to respective packages, not to the web server itself. 363 |

364 |
365 | 366 | 367 | 368 | 369 |
370 |
371 |
372 |
373 | 374 | 375 | 376 | -------------------------------------------------------------------------------- /GC_Metastability/GCMetastability.java: -------------------------------------------------------------------------------- 1 | import java.util.Random; 2 | import java.lang.Math; 3 | import java.lang.Thread; 4 | import java.util.concurrent.ExecutorService; 5 | import java.util.concurrent.Executors; 6 | import java.lang.management.ManagementFactory; 7 | import java.lang.management.RuntimeMXBean; 8 | import java.io.FileWriter; 9 | import java.io.IOException; 10 | import java.time.format.DateTimeFormatter; 11 | import java.time.LocalDateTime; 12 | import java.util.concurrent.TimeUnit; 13 | 14 | class Result{ 15 | public long arrival_t; 16 | public long start_t; 17 | public long completion_t; 18 | public long arrival_uptime; 19 | public long completion_uptime; 20 | } 21 | 22 | class Global{ 23 | public static int MAX_T = 100; 24 | public static String record_filepath="./exp_record.csv"; 25 | 26 | public static int num_reqs; 27 | public static Result[] results; 28 | 29 | public static boolean warmup = true; 30 | public static int warmup_endtime_offset=20; //in second 31 | public static int warmup_sleep_dur=10000; //in ms, happens after warmup 32 | public static double warmup_interarrival_time = 0.01; // in s 33 | public static int num_warmup_reqs = (int)((1/warmup_interarrival_time) * warmup_endtime_offset); 34 | public static int curr_arrival_rate; 35 | 36 | // load-spike trigger specific configs 37 | public static int stage_dur; 38 | public static int original_arrival_rate; 39 | public static int highest_arrival_rate; 40 | public static int arr_after_first_load_shedding; 41 | public static int arr_after_second_load_shedding; 42 | public static int arr_after_thrid_load_shedding; 43 | 44 | // capacity degradation trigger specific configs 45 | public static int trigger_dur; 46 | public static int trigger_offset; // determine when to add a trigger 47 | public static boolean apply_trigger = false; 48 | 49 | public static boolean auto_detect = false; // detect whether the current experiment shows a metastable failure or not 50 | public static int num_data_before_trigger=0; 51 | public static long sum_latency_before_trigger=0; 52 | public static int num_reqs_back_to_normal_latency = 0; 53 | public static int window_for_success = 30; //30sec samples after trigger back to normal is a success 54 | } 55 | 56 | class Task implements Runnable{ 57 | private RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean(); 58 | private int i; 59 | 60 | public Task(int index){ 61 | i = index; 62 | } 63 | 64 | public void run() 65 | { 66 | try{ 67 | //Each thread is 0.5MB 68 | Global.results[i].start_t = System.nanoTime(); 69 | int[][] intArray = new int[256*256][]; 70 | 71 | for (int i = 0; i < 256*256; i++){ 72 | intArray[i] = new int[2]; 73 | for (int j = 0; j < 2; j++){ 74 | intArray[i][j] = i; 75 | } 76 | } 77 | Thread.sleep(100); //sleep 0.1s 78 | int local_elem = intArray[0][0]; 79 | Global.results[i].completion_t = System.nanoTime(); 80 | Global.results[i].completion_uptime = bean.getUptime(); 81 | 82 | // recording latency info for detecting metastable failures 83 | if (Global.apply_trigger && i > Global.num_warmup_reqs && i < Global.num_warmup_reqs + Global.curr_arrival_rate * Global.trigger_offset){ 84 | Global.num_data_before_trigger += 1; 85 | Global.sum_latency_before_trigger += Global.results[i].completion_t - Global.results[i].arrival_t; 86 | } else if (Global.apply_trigger && Global.auto_detect && i > Global.num_warmup_reqs + Global.curr_arrival_rate * (Global.trigger_offset + Global.trigger_dur/1e3 + 10)) { 87 | // start detecting after 10s for system to stabalize 88 | double avg_latency_before_trigger = Global.sum_latency_before_trigger/Global.num_data_before_trigger; 89 | long curr_latency = Global.results[i].completion_t-Global.results[i].arrival_t; 90 | 91 | if ((double)curr_latency < 1.1*avg_latency_before_trigger){ 92 | Global.num_reqs_back_to_normal_latency += 1; 93 | } 94 | 95 | if(Global.num_reqs_back_to_normal_latency > Global.window_for_success * Global.curr_arrival_rate){ 96 | System.out.println("System succeeds due to latency(ms): " + curr_latency/1e6 + " is below or about the same as the average before the trigger: " + avg_latency_before_trigger/1e6); 97 | FileWriter fw = new FileWriter(Global.record_filepath, true); 98 | fw.write(Global.curr_arrival_rate + "," + Global.trigger_dur + "," + Global.num_reqs/Global.curr_arrival_rate + "," + "Success\n"); 99 | fw.close(); 100 | System.exit(0); 101 | } 102 | } 103 | } 104 | catch (Exception e){ 105 | System.out.println("Exception is caught"); 106 | e.printStackTrace(System.out); 107 | System.exit(0); 108 | } 109 | } 110 | } 111 | 112 | public class GCMetastability{ 113 | static Random rand = new Random(); 114 | 115 | //given arrival rate lambda, generate waiting time in exponetial distribution 116 | public static double getExp(double lambda){ 117 | return Math.log(1-rand.nextDouble())/(-lambda); 118 | } 119 | 120 | public static void main(String[] args){ 121 | if (args.length < 4){ 122 | System.out.println("Please enter arrival rate, trigger duration(ms), experiments duration(s) and metastable failure detection enabled"); 123 | System.exit(0); 124 | } 125 | 126 | Global.curr_arrival_rate = Integer.parseInt(args[0]); 127 | Global.trigger_dur = Integer.parseInt(args[1]); 128 | 129 | if (Integer.parseInt(args[3]) > 0) { 130 | Global.auto_detect = true; 131 | } 132 | 133 | if (Global.trigger_dur < 0) { // load-spike trigger 134 | // setting up arrival rate pattern 135 | int rps_level_interval = 40; 136 | Global.original_arrival_rate = Global.curr_arrival_rate; 137 | Global.highest_arrival_rate = Global.original_arrival_rate + rps_level_interval; // load-spike 138 | Global.arr_after_first_load_shedding = Global.highest_arrival_rate - rps_level_interval; 139 | Global.arr_after_second_load_shedding = Global.arr_after_first_load_shedding - rps_level_interval; 140 | Global.arr_after_thrid_load_shedding = Global.arr_after_second_load_shedding - rps_level_interval; 141 | 142 | // setting up experiment durations 143 | Global.stage_dur = (int)(Integer.parseInt(args[2]) / 5); // 5 stages of diffferent load levels 144 | Global.num_reqs = Global.num_warmup_reqs + (Global.original_arrival_rate * Global.stage_dur) + (Global.highest_arrival_rate * Global.stage_dur) + (Global.arr_after_first_load_shedding * Global.stage_dur) + (Global.arr_after_second_load_shedding * Global.stage_dur) + (Global.arr_after_thrid_load_shedding * Global.stage_dur); 145 | System.out.println("Running experiment with load-spike trigger"); 146 | } else {// capacity degradation trigger 147 | if (Global.trigger_dur >= 0) { 148 | Global.apply_trigger = true; 149 | } 150 | Global.num_reqs = Global.curr_arrival_rate * Integer.parseInt(args[2]); 151 | Global.trigger_offset = Global.warmup_endtime_offset + (int)(Global.warmup_sleep_dur/1000); // apply trigger right after warmup 152 | System.out.println("Running experiment with capacity degradation trigger [rps_triggerDur(ms)_expDur(s)]: " + Global.curr_arrival_rate + "_" + Global.trigger_dur + "_" + Global.num_reqs/Global.curr_arrival_rate); 153 | } 154 | 155 | //add time padding for jvm initialization 156 | try{ 157 | Thread.sleep(2000); 158 | }catch (Exception e){ 159 | System.out.println(e); 160 | System.exit(0); 161 | } 162 | 163 | ExecutorService pool = Executors.newFixedThreadPool(Global.MAX_T); 164 | RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean(); 165 | 166 | String filepath = "job.csv"; 167 | 168 | Global.results = new Result[Global.num_reqs]; 169 | for(int i=0; i current_time){ 194 | Thread.sleep((long)((arrival_time - current_time)/1e6)); 195 | } 196 | 197 | Global.results[i].arrival_t = arrival_time; 198 | Global.results[i].arrival_uptime = bean.getUptime(); 199 | Runnable r = new Task(i); 200 | pool.execute(r); 201 | 202 | if (Global.warmup && (i == Global.num_warmup_reqs)){ 203 | // For warmup 204 | System.out.println("Warm-up finished, sleeping..."); 205 | Thread.sleep(Global.warmup_sleep_dur); 206 | arrival_time += Global.warmup_sleep_dur * 1e6; 207 | System.out.println("Start running experiment at RPS=" + Global.curr_arrival_rate); 208 | measurement_start_time = System.nanoTime(); 209 | } else if (Global.apply_trigger && i == Global.num_warmup_reqs + (Global.curr_arrival_rate * Global.trigger_offset)) { 210 | // For applying capacity degradation trigger 211 | System.out.println("Applying capacity degradation trigger..."); 212 | Thread.sleep(Global.trigger_dur); 213 | System.out.println("Finished applying capacity degradation trigger."); 214 | } 215 | 216 | if (Global.trigger_dur < 0) { //load-spike trigger 217 | if (i == Global.num_warmup_reqs + (Global.original_arrival_rate * Global.stage_dur)){ 218 | System.out.println("Goes up to highest RPS level..."); 219 | Global.curr_arrival_rate = Global.highest_arrival_rate; 220 | } 221 | else if (i == Global.num_warmup_reqs + (Global.original_arrival_rate * Global.stage_dur) + (Global.highest_arrival_rate * Global.stage_dur)){ 222 | System.out.println("1st load shedding..."); 223 | Global.curr_arrival_rate = Global.arr_after_first_load_shedding; 224 | } 225 | else if (i == Global.num_warmup_reqs + (Global.original_arrival_rate * Global.stage_dur) + (Global.highest_arrival_rate * Global.stage_dur) + (Global.arr_after_first_load_shedding * Global.stage_dur)){ 226 | System.out.println("2nd load shedding..."); 227 | Global.curr_arrival_rate = Global.arr_after_second_load_shedding; 228 | } 229 | else if (i == Global.num_warmup_reqs + (Global.original_arrival_rate * Global.stage_dur) + (Global.highest_arrival_rate * Global.stage_dur) + (Global.arr_after_first_load_shedding * Global.stage_dur) + (Global.arr_after_second_load_shedding * Global.stage_dur)){ 230 | System.out.println("3rd load shedding..."); 231 | Global.curr_arrival_rate = Global.arr_after_thrid_load_shedding; 232 | } 233 | } 234 | 235 | } catch (Exception e){ 236 | System.out.println("Exception is caught"); 237 | e.printStackTrace(System.out); 238 | System.exit(0); 239 | } catch (OutOfMemoryError E){ 240 | System.exit(0); 241 | } 242 | } 243 | pool.shutdown(); 244 | 245 | try{ 246 | pool.awaitTermination(60, TimeUnit.SECONDS); 247 | } catch (InterruptedException ex) { 248 | pool.shutdownNow(); 249 | Thread.currentThread().interrupt(); 250 | } 251 | 252 | long completion_time = System.nanoTime(); 253 | double throughput = (double)(Global.num_reqs - Global.num_warmup_reqs)/((double)(completion_time-measurement_start_time)/1e9); 254 | System.out.println("Throughput is: " + throughput); 255 | 256 | if(Global.apply_trigger){ 257 | double avg_latency_before_trigger = Global.sum_latency_before_trigger/Global.num_data_before_trigger; 258 | System.out.println("Average latency before trigger is: " + avg_latency_before_trigger/1e6 + "ms from " + Global.num_data_before_trigger + " samples."); 259 | } 260 | 261 | //write to csv: arrival_time, start_time, completion_time, completion_uptimeime (using bean) 262 | try{ 263 | FileWriter fw = new FileWriter(filepath, true); 264 | fw.write("arrival_t," + "start_t," + "completion_t," + "arrival_uptime," +"completion_uptime\n"); 265 | for(int i = 0; i < Global.num_reqs; i++){ 266 | fw.write(Global.results[i].arrival_t + "," + Global.results[i].start_t + "," + Global.results[i].completion_t + "," + Global.results[i].arrival_uptime + "," + Global.results[i].completion_uptime + "\n"); 267 | } 268 | fw.close(); 269 | 270 | if (Global.auto_detect){ 271 | FileWriter fw2 = new FileWriter(Global.record_filepath, true); 272 | fw2.write(Global.curr_arrival_rate + "," + Global.trigger_dur + "," + Global.num_reqs/Global.curr_arrival_rate + ",Failure\n"); 273 | fw2.close(); 274 | System.out.println("System showed metastable failure due to high latency persists till the end of the experiment."); 275 | } 276 | } catch (IOException ioe){ 277 | System.out.println(ioe); 278 | ioe.printStackTrace(System.out); 279 | System.exit(0); 280 | } 281 | } 282 | } 283 | 284 | -------------------------------------------------------------------------------- /LookasideCache_Metastability/LoadGenerator/curl/multi.h: -------------------------------------------------------------------------------- 1 | #ifndef __CURL_MULTI_H 2 | #define __CURL_MULTI_H 3 | /*************************************************************************** 4 | * _ _ ____ _ 5 | * Project ___| | | | _ \| | 6 | * / __| | | | |_) | | 7 | * | (__| |_| | _ <| |___ 8 | * \___|\___/|_| \_\_____| 9 | * 10 | * Copyright (C) 1998 - 2013, Daniel Stenberg, , et al. 11 | * 12 | * This software is licensed as described in the file COPYING, which 13 | * you should have received as part of this distribution. The terms 14 | * are also available at http://curl.haxx.se/docs/copyright.html. 15 | * 16 | * You may opt to use, copy, modify, merge, publish, distribute and/or sell 17 | * copies of the Software, and permit persons to whom the Software is 18 | * furnished to do so, under the terms of the COPYING file. 19 | * 20 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 21 | * KIND, either express or implied. 22 | * 23 | ***************************************************************************/ 24 | /* 25 | This is an "external" header file. Don't give away any internals here! 26 | 27 | GOALS 28 | 29 | o Enable a "pull" interface. The application that uses libcurl decides where 30 | and when to ask libcurl to get/send data. 31 | 32 | o Enable multiple simultaneous transfers in the same thread without making it 33 | complicated for the application. 34 | 35 | o Enable the application to select() on its own file descriptors and curl's 36 | file descriptors simultaneous easily. 37 | 38 | */ 39 | 40 | /* 41 | * This header file should not really need to include "curl.h" since curl.h 42 | * itself includes this file and we expect user applications to do #include 43 | * without the need for especially including multi.h. 44 | * 45 | * For some reason we added this include here at one point, and rather than to 46 | * break existing (wrongly written) libcurl applications, we leave it as-is 47 | * but with this warning attached. 48 | */ 49 | #include "curl.h" 50 | 51 | #ifdef __cplusplus 52 | extern "C" { 53 | #endif 54 | 55 | typedef void CURLM; 56 | 57 | typedef enum { 58 | CURLM_CALL_MULTI_PERFORM = -1, /* please call curl_multi_perform() or 59 | curl_multi_socket*() soon */ 60 | CURLM_OK, 61 | CURLM_BAD_HANDLE, /* the passed-in handle is not a valid CURLM handle */ 62 | CURLM_BAD_EASY_HANDLE, /* an easy handle was not good/valid */ 63 | CURLM_OUT_OF_MEMORY, /* if you ever get this, you're in deep sh*t */ 64 | CURLM_INTERNAL_ERROR, /* this is a libcurl bug */ 65 | CURLM_BAD_SOCKET, /* the passed in socket argument did not match */ 66 | CURLM_UNKNOWN_OPTION, /* curl_multi_setopt() with unsupported option */ 67 | CURLM_ADDED_ALREADY, /* an easy handle already added to a multi handle was 68 | attempted to get added - again */ 69 | CURLM_LAST 70 | } CURLMcode; 71 | 72 | /* just to make code nicer when using curl_multi_socket() you can now check 73 | for CURLM_CALL_MULTI_SOCKET too in the same style it works for 74 | curl_multi_perform() and CURLM_CALL_MULTI_PERFORM */ 75 | #define CURLM_CALL_MULTI_SOCKET CURLM_CALL_MULTI_PERFORM 76 | 77 | typedef enum { 78 | CURLMSG_NONE, /* first, not used */ 79 | CURLMSG_DONE, /* This easy handle has completed. 'result' contains 80 | the CURLcode of the transfer */ 81 | CURLMSG_LAST /* last, not used */ 82 | } CURLMSG; 83 | 84 | struct CURLMsg { 85 | CURLMSG msg; /* what this message means */ 86 | CURL *easy_handle; /* the handle it concerns */ 87 | union { 88 | void *whatever; /* message-specific data */ 89 | CURLcode result; /* return code for transfer */ 90 | } data; 91 | }; 92 | typedef struct CURLMsg CURLMsg; 93 | 94 | /* Based on poll(2) structure and values. 95 | * We don't use pollfd and POLL* constants explicitly 96 | * to cover platforms without poll(). */ 97 | #define CURL_WAIT_POLLIN 0x0001 98 | #define CURL_WAIT_POLLPRI 0x0002 99 | #define CURL_WAIT_POLLOUT 0x0004 100 | 101 | struct curl_waitfd { 102 | curl_socket_t fd; 103 | short events; 104 | short revents; /* not supported yet */ 105 | }; 106 | 107 | /* 108 | * Name: curl_multi_init() 109 | * 110 | * Desc: inititalize multi-style curl usage 111 | * 112 | * Returns: a new CURLM handle to use in all 'curl_multi' functions. 113 | */ 114 | CURL_EXTERN CURLM *curl_multi_init(void); 115 | 116 | /* 117 | * Name: curl_multi_add_handle() 118 | * 119 | * Desc: add a standard curl handle to the multi stack 120 | * 121 | * Returns: CURLMcode type, general multi error code. 122 | */ 123 | CURL_EXTERN CURLMcode curl_multi_add_handle(CURLM *multi_handle, 124 | CURL *curl_handle); 125 | 126 | /* 127 | * Name: curl_multi_remove_handle() 128 | * 129 | * Desc: removes a curl handle from the multi stack again 130 | * 131 | * Returns: CURLMcode type, general multi error code. 132 | */ 133 | CURL_EXTERN CURLMcode curl_multi_remove_handle(CURLM *multi_handle, 134 | CURL *curl_handle); 135 | 136 | /* 137 | * Name: curl_multi_fdset() 138 | * 139 | * Desc: Ask curl for its fd_set sets. The app can use these to select() or 140 | * poll() on. We want curl_multi_perform() called as soon as one of 141 | * them are ready. 142 | * 143 | * Returns: CURLMcode type, general multi error code. 144 | */ 145 | CURL_EXTERN CURLMcode curl_multi_fdset(CURLM *multi_handle, 146 | fd_set *read_fd_set, 147 | fd_set *write_fd_set, 148 | fd_set *exc_fd_set, 149 | int *max_fd); 150 | 151 | /* 152 | * Name: curl_multi_wait() 153 | * 154 | * Desc: Poll on all fds within a CURLM set as well as any 155 | * additional fds passed to the function. 156 | * 157 | * Returns: CURLMcode type, general multi error code. 158 | */ 159 | CURL_EXTERN CURLMcode curl_multi_wait(CURLM *multi_handle, 160 | struct curl_waitfd extra_fds[], 161 | unsigned int extra_nfds, 162 | int timeout_ms, 163 | int *ret); 164 | 165 | /* 166 | * Name: curl_multi_perform() 167 | * 168 | * Desc: When the app thinks there's data available for curl it calls this 169 | * function to read/write whatever there is right now. This returns 170 | * as soon as the reads and writes are done. This function does not 171 | * require that there actually is data available for reading or that 172 | * data can be written, it can be called just in case. It returns 173 | * the number of handles that still transfer data in the second 174 | * argument's integer-pointer. 175 | * 176 | * Returns: CURLMcode type, general multi error code. *NOTE* that this only 177 | * returns errors etc regarding the whole multi stack. There might 178 | * still have occurred problems on invidual transfers even when this 179 | * returns OK. 180 | */ 181 | CURL_EXTERN CURLMcode curl_multi_perform(CURLM *multi_handle, 182 | int *running_handles); 183 | 184 | /* 185 | * Name: curl_multi_cleanup() 186 | * 187 | * Desc: Cleans up and removes a whole multi stack. It does not free or 188 | * touch any individual easy handles in any way. We need to define 189 | * in what state those handles will be if this function is called 190 | * in the middle of a transfer. 191 | * 192 | * Returns: CURLMcode type, general multi error code. 193 | */ 194 | CURL_EXTERN CURLMcode curl_multi_cleanup(CURLM *multi_handle); 195 | 196 | /* 197 | * Name: curl_multi_info_read() 198 | * 199 | * Desc: Ask the multi handle if there's any messages/informationals from 200 | * the individual transfers. Messages include informationals such as 201 | * error code from the transfer or just the fact that a transfer is 202 | * completed. More details on these should be written down as well. 203 | * 204 | * Repeated calls to this function will return a new struct each 205 | * time, until a special "end of msgs" struct is returned as a signal 206 | * that there is no more to get at this point. 207 | * 208 | * The data the returned pointer points to will not survive calling 209 | * curl_multi_cleanup(). 210 | * 211 | * The 'CURLMsg' struct is meant to be very simple and only contain 212 | * very basic informations. If more involved information is wanted, 213 | * we will provide the particular "transfer handle" in that struct 214 | * and that should/could/would be used in subsequent 215 | * curl_easy_getinfo() calls (or similar). The point being that we 216 | * must never expose complex structs to applications, as then we'll 217 | * undoubtably get backwards compatibility problems in the future. 218 | * 219 | * Returns: A pointer to a filled-in struct, or NULL if it failed or ran out 220 | * of structs. It also writes the number of messages left in the 221 | * queue (after this read) in the integer the second argument points 222 | * to. 223 | */ 224 | CURL_EXTERN CURLMsg *curl_multi_info_read(CURLM *multi_handle, 225 | int *msgs_in_queue); 226 | 227 | /* 228 | * Name: curl_multi_strerror() 229 | * 230 | * Desc: The curl_multi_strerror function may be used to turn a CURLMcode 231 | * value into the equivalent human readable error string. This is 232 | * useful for printing meaningful error messages. 233 | * 234 | * Returns: A pointer to a zero-terminated error message. 235 | */ 236 | CURL_EXTERN const char *curl_multi_strerror(CURLMcode); 237 | 238 | /* 239 | * Name: curl_multi_socket() and 240 | * curl_multi_socket_all() 241 | * 242 | * Desc: An alternative version of curl_multi_perform() that allows the 243 | * application to pass in one of the file descriptors that have been 244 | * detected to have "action" on them and let libcurl perform. 245 | * See man page for details. 246 | */ 247 | #define CURL_POLL_NONE 0 248 | #define CURL_POLL_IN 1 249 | #define CURL_POLL_OUT 2 250 | #define CURL_POLL_INOUT 3 251 | #define CURL_POLL_REMOVE 4 252 | 253 | #define CURL_SOCKET_TIMEOUT CURL_SOCKET_BAD 254 | 255 | #define CURL_CSELECT_IN 0x01 256 | #define CURL_CSELECT_OUT 0x02 257 | #define CURL_CSELECT_ERR 0x04 258 | 259 | typedef int (*curl_socket_callback)(CURL *easy, /* easy handle */ 260 | curl_socket_t s, /* socket */ 261 | int what, /* see above */ 262 | void *userp, /* private callback 263 | pointer */ 264 | void *socketp); /* private socket 265 | pointer */ 266 | /* 267 | * Name: curl_multi_timer_callback 268 | * 269 | * Desc: Called by libcurl whenever the library detects a change in the 270 | * maximum number of milliseconds the app is allowed to wait before 271 | * curl_multi_socket() or curl_multi_perform() must be called 272 | * (to allow libcurl's timed events to take place). 273 | * 274 | * Returns: The callback should return zero. 275 | */ 276 | typedef int (*curl_multi_timer_callback)(CURLM *multi, /* multi handle */ 277 | long timeout_ms, /* see above */ 278 | void *userp); /* private callback 279 | pointer */ 280 | 281 | CURL_EXTERN CURLMcode curl_multi_socket(CURLM *multi_handle, curl_socket_t s, 282 | int *running_handles); 283 | 284 | CURL_EXTERN CURLMcode curl_multi_socket_action(CURLM *multi_handle, 285 | curl_socket_t s, 286 | int ev_bitmask, 287 | int *running_handles); 288 | 289 | CURL_EXTERN CURLMcode curl_multi_socket_all(CURLM *multi_handle, 290 | int *running_handles); 291 | 292 | #ifndef CURL_ALLOW_OLD_MULTI_SOCKET 293 | /* This macro below was added in 7.16.3 to push users who recompile to use 294 | the new curl_multi_socket_action() instead of the old curl_multi_socket() 295 | */ 296 | #define curl_multi_socket(x,y,z) curl_multi_socket_action(x,y,0,z) 297 | #endif 298 | 299 | /* 300 | * Name: curl_multi_timeout() 301 | * 302 | * Desc: Returns the maximum number of milliseconds the app is allowed to 303 | * wait before curl_multi_socket() or curl_multi_perform() must be 304 | * called (to allow libcurl's timed events to take place). 305 | * 306 | * Returns: CURLM error code. 307 | */ 308 | CURL_EXTERN CURLMcode curl_multi_timeout(CURLM *multi_handle, 309 | long *milliseconds); 310 | 311 | #undef CINIT /* re-using the same name as in curl.h */ 312 | 313 | #ifdef CURL_ISOCPP 314 | #define CINIT(name,type,num) CURLMOPT_ ## name = CURLOPTTYPE_ ## type + num 315 | #else 316 | /* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */ 317 | #define LONG CURLOPTTYPE_LONG 318 | #define OBJECTPOINT CURLOPTTYPE_OBJECTPOINT 319 | #define FUNCTIONPOINT CURLOPTTYPE_FUNCTIONPOINT 320 | #define OFF_T CURLOPTTYPE_OFF_T 321 | #define CINIT(name,type,number) CURLMOPT_/**/name = type + number 322 | #endif 323 | 324 | typedef enum { 325 | /* This is the socket callback function pointer */ 326 | CINIT(SOCKETFUNCTION, FUNCTIONPOINT, 1), 327 | 328 | /* This is the argument passed to the socket callback */ 329 | CINIT(SOCKETDATA, OBJECTPOINT, 2), 330 | 331 | /* set to 1 to enable pipelining for this multi handle */ 332 | CINIT(PIPELINING, LONG, 3), 333 | 334 | /* This is the timer callback function pointer */ 335 | CINIT(TIMERFUNCTION, FUNCTIONPOINT, 4), 336 | 337 | /* This is the argument passed to the timer callback */ 338 | CINIT(TIMERDATA, OBJECTPOINT, 5), 339 | 340 | /* maximum number of entries in the connection cache */ 341 | CINIT(MAXCONNECTS, LONG, 6), 342 | 343 | /* maximum number of (pipelining) connections to one host */ 344 | CINIT(MAX_HOST_CONNECTIONS, LONG, 7), 345 | 346 | /* maximum number of requests in a pipeline */ 347 | CINIT(MAX_PIPELINE_LENGTH, LONG, 8), 348 | 349 | /* a connection with a content-length longer than this 350 | will not be considered for pipelining */ 351 | CINIT(CONTENT_LENGTH_PENALTY_SIZE, OFF_T, 9), 352 | 353 | /* a connection with a chunk length longer than this 354 | will not be considered for pipelining */ 355 | CINIT(CHUNK_LENGTH_PENALTY_SIZE, OFF_T, 10), 356 | 357 | /* a list of site names(+port) that are blacklisted from 358 | pipelining */ 359 | CINIT(PIPELINING_SITE_BL, OBJECTPOINT, 11), 360 | 361 | /* a list of server types that are blacklisted from 362 | pipelining */ 363 | CINIT(PIPELINING_SERVER_BL, OBJECTPOINT, 12), 364 | 365 | /* maximum number of open connections in total */ 366 | CINIT(MAX_TOTAL_CONNECTIONS, LONG, 13), 367 | 368 | CURLMOPT_LASTENTRY /* the last unused */ 369 | } CURLMoption; 370 | 371 | 372 | /* 373 | * Name: curl_multi_setopt() 374 | * 375 | * Desc: Sets options for the multi handle. 376 | * 377 | * Returns: CURLM error code. 378 | */ 379 | CURL_EXTERN CURLMcode curl_multi_setopt(CURLM *multi_handle, 380 | CURLMoption option, ...); 381 | 382 | 383 | /* 384 | * Name: curl_multi_assign() 385 | * 386 | * Desc: This function sets an association in the multi handle between the 387 | * given socket and a private pointer of the application. This is 388 | * (only) useful for curl_multi_socket uses. 389 | * 390 | * Returns: CURLM error code. 391 | */ 392 | CURL_EXTERN CURLMcode curl_multi_assign(CURLM *multi_handle, 393 | curl_socket_t sockfd, void *sockp); 394 | 395 | #ifdef __cplusplus 396 | } /* end of extern "C" */ 397 | #endif 398 | 399 | #endif 400 | --------------------------------------------------------------------------------