├── .gitignore ├── .gitmodules ├── README.md ├── apps ├── build_all.sh ├── httpd │ ├── build.sh │ ├── config-appendix │ ├── config-appendix-segfault │ ├── gen_config.sh │ └── watchdog-inject-wait.patch ├── leveldb │ └── build.sh ├── mysql │ ├── .gitignore │ ├── build.sh │ ├── my.cnf │ ├── sync-mode.patch │ └── throughputfix.patch ├── nginx │ ├── build.sh │ └── nginx.conf ├── redis │ ├── build.sh │ ├── force-1000.patch │ ├── inject-cpu-hog.patch │ ├── inject-delay-orbit.patch │ ├── inject-delay.patch │ ├── inject-fault.patch │ └── inject-oom.patch └── varnish │ └── build.sh ├── experiments ├── README.md ├── async-sync │ ├── README.md │ ├── plot.py │ ├── plot.sh │ ├── qps.py │ └── run.sh ├── fork-ob-orig │ ├── README.md │ ├── plot.py │ ├── plot.sh │ ├── qps.py │ └── run.sh ├── incone.sh ├── isolation │ ├── apache-segfault.md │ ├── nginx-segfault.md │ ├── rdb-segfault.md │ ├── redis-cpu-hog.md │ ├── redis-memleak-payload │ ├── redis-memleak.md │ ├── redis-oom.md │ └── watchdog-diagnosis.md ├── micro-call │ ├── .gitignore │ ├── Makefile │ ├── README.md │ ├── avg.py │ ├── plot.py │ ├── plot.sh │ ├── run.sh │ └── snap.cpp ├── throughput │ ├── .gitignore │ ├── README.md │ ├── collect.sh │ ├── leveldb │ │ └── run.sh │ ├── mysql │ │ ├── run.sh │ │ └── sysbench.sh │ ├── nginx │ │ ├── Makefile │ │ ├── mix.cpp │ │ └── run.sh │ ├── plot.py │ ├── plot.sh │ ├── proxy │ │ ├── Makefile │ │ ├── mix.cpp │ │ └── run.sh │ ├── rdb │ │ ├── param │ │ ├── redis.conf │ │ └── run.sh │ ├── run_batch.sh │ ├── slowlog │ │ ├── param │ │ ├── redis.conf │ │ └── run.sh │ ├── varnish │ │ ├── config.vcl │ │ └── run.sh │ └── watchdog │ │ └── run.sh └── tools │ ├── .gitignore │ ├── build_all.sh │ ├── install_sysbench.sh │ ├── install_ycsb.sh │ └── ycsb-customize.patch ├── modulefiles ├── httpd │ ├── orig │ ├── proxy │ ├── segfault │ ├── watchdog │ └── watchdog-inject ├── leveldb │ ├── orbit │ └── orig ├── mysql │ ├── fork │ ├── orbit │ ├── orig │ └── sync ├── nginx │ ├── orbit │ ├── orig │ └── segfault ├── redis │ ├── cpu-hog │ ├── memleak │ ├── oom │ ├── orig │ ├── rdb │ ├── rdb-fault │ ├── slowlog │ └── slowlog-delay ├── userlib │ ├── dealloc │ ├── plain │ └── reuse └── varnish │ ├── orbit │ └── orig ├── patches └── userlib_reuse.patch └── scripts ├── 1k.html ├── alias.sh ├── build_compiler_support.sh ├── build_kernel.sh ├── build_userlib.sh ├── fix-modulefiles.sh ├── guest_setup.sh ├── mkimg.sh ├── nginx-orbit-test ├── orbit.config └── run-kernel.sh /.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | *.log 3 | *.err 4 | *.out 5 | *.rdb 6 | rel-*/ 7 | *.csv 8 | *.eps 9 | *.png 10 | *.pdf 11 | *.swp 12 | qemu-image.img 13 | mount-point.dir/ 14 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "kernel"] 2 | path = kernel 3 | url = https://github.com/OrderLab/orbit-kernel.git 4 | branch = orbit 5 | [submodule "userlib"] 6 | path = userlib 7 | url = https://github.com/OrderLab/orbit-userlib.git 8 | branch = orbit 9 | [submodule "compiler"] 10 | path = compiler 11 | url = https://github.com/OrderLab/orbit-compiler.git 12 | branch = master 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Orbit 2 | 3 | Orbit is an OS support for safely and efficiently execute various 4 | types of auxiliary tasks common in modern applications. 5 | 6 | The orbit project consists of: 7 | 8 | - a kernel based on Linux 5.4.91 that implements the orbit abstractions 9 | - a user-level library to provide the APIs for using orbit 10 | - a companion static analyzer to assist developers for porting their existing applications in using orbit. 11 | 12 | 13 | This root repository contains scripts for setting up the host and virtual machine 14 | for running orbit and the experiments. 15 | 16 | Table of Contents 17 | ====== 18 | * [Getting Started Instructions](#getting-started-instructions) 19 | * [Requirements](#requirements) 20 | * [Host Setup](#host-setup) 21 | * [1. Install toolchain (~2 min)](#1-install-toolchain-2-min) 22 | * [2. Clone the repository](#2-clone-the-repository) 23 | * [3. Build the orbit kernel (~4 min)](#3-build-the-orbit-kernel-4-min) 24 | * [4. Create VM image (~2 min)](#4-create-vm-image-2-min) 25 | * [5. Import shorthands](#5-import-shorthands) 26 | * [Guest VM Setup](#guest-vm-setup) 27 | * [6. Guest environment setup (~3 min)](#6-guest-environment-setup-3-min) 28 | * [7. Environment modules setup (~3 min)](#7-environment-modules-setup-3-min) 29 | * [8. Build the orbit userlib (~ 1min)](#8-build-the-orbit-userlib--1min) 30 | * [Test VM](#test-vm) 31 | * [9. Boot into the VM](#9-boot-into-the-vm) 32 | * [Detailed Instructions](#detailed-instructions) 33 | * [Experiment Setup](#experiment-setup) 34 | * [10. Build applications (~25 min)](#10-build-applications-25-min) 35 | * [11. Build test frameworks (~1 min)](#11-build-test-frameworks-1-min) 36 | * [Run Experiments](#run-experiments-4-h) 37 | 38 | *The estimated build time shown in this doc is based on a 10C 20T CPU machine.* 39 | 40 | # Getting Started Instructions 41 | 42 | ## Requirements 43 | 44 | We will create a QEMU VM to run the orbit kernel, userlib, analyzer, and 45 | evaluated applications. Thus, it is recommended to run the following 46 | instructions on a *bare-metal machine*. 47 | 48 | - Linux with KVM support 49 | - Run `ls /dev/kvm` to see if it exists on the system or not. If it exists, KVM support should work fine. 50 | - Ubuntu 18.04 LTS is recommended, but any system that can run `debootstrap` should work. 51 | - On x86-64 platform with 10+GB memory, and at least **45GB free disk space** (most space used for the VM image). 52 | - You should have *root privilege* (to install dependent packages and use KVM) 53 | - *Bash* shell 54 | 55 | ## Host Setup 56 | 57 | ### 1. Install toolchain (~2 min) 58 | 59 | - **1.1 Install build dependencies** 60 | 61 | Assuming the host OS is Ubuntu: 62 | 63 | ```bash 64 | sudo apt-get install debootstrap libguestfs-tools qemu-system-x86 qemu-kvm build-essential git 65 | ``` 66 | 67 | - **1.2 Add user to `KVM` group** 68 | 69 | ```bash 70 | sudo usermod -aG kvm $USER 71 | ``` 72 | 73 | This is needed for using KVM support with QEMU. Otherwise, you may get 74 | `Could not access KVM kernel module: Permission denied` error message when 75 | launching the QEMU VM. 76 | 77 | - **1.3 Log out and re-login** 78 | 79 | Before proceeding to the next step, make sure you log out and re-login 80 | in order for the new group membership to take effect. 81 | 82 | ### 2. Clone the repository 83 | 84 | ```bash 85 | git clone https://github.com/OrderLab/orbit.git 86 | cd orbit 87 | ``` 88 | 89 | The remaining operations on the host will all be running at the orbit root directory. 90 | 91 | ### 3. Build the orbit kernel (~4 min) 92 | 93 | Run the provided build script to download the kernel source code and compile it: 94 | 95 | ```bash 96 | ./scripts/build_kernel.sh 97 | ``` 98 | 99 | This takes about 200MB network download and 4 mins build time (10C CPU). 100 | 101 | You will then see a `kernel` folder in the orbit root directory. 102 | 103 | ### 4. Create VM image (~2 min) 104 | 105 | Run the VM image creation script in the orbit directory: 106 | 107 | ```bash 108 | ./scripts/mkimg.sh 109 | ``` 110 | 111 | This creates a ~300MB base image and takes about 2min. 112 | 113 | You will see a 40GB `qemu-image.img` file and a `mount-point.dir` directory in the root directory. 114 | 115 | ### 5. Import shorthands 116 | 117 | We also provide a set of shorthands for common operations such as mounting and running on the host: 118 | 119 | | Shorthand | Explanation | 120 | | ---- | ---- | 121 | | `m` | Mount disk image (does not mount if QEMU is running) | 122 | | `um` | Unmount disk image | 123 | | `ch` | `chroot` into mounted disk image (internally requires `sudo`) | 124 | | `r` | Run the VM (fail if image is still mounted) | 125 | | `k` | Force kill QEMU | 126 | 127 | Import the shorthands into the current shell: 128 | ```bash 129 | source scripts/alias.sh 130 | ``` 131 | 132 | For their implementation, see the [scripts/alias.sh](scripts/alias.sh) source code. 133 | 134 | ## Guest VM Setup 135 | 136 | Before running the experiments, we need to setup the guest environment and compile all the applications. This require mounting the VM image, therefore the VM needs to be in shutdown state. 137 | 138 | ### 6. Guest environment setup (~3 min) 139 | 140 | - **6.1 Mount VM image and chroot** 141 | 142 | Mount the VM image with shorthand `m`, and `chroot` to the image root using the shorthand `ch`. You will be dropped into a new interactive shell at the root of the image: 143 | 144 | ```bash 145 | m 146 | ch 147 | root@hostname:/# 148 | ``` 149 | You can run `exit` or press CTRL-D if you want to exit the chroot environment. 150 | 151 | - **6.2 Clone orbit repo in VM** 152 | 153 | In the chroot environment, `cd` to home directory, and clone the orbit root directory again. 154 | 155 | ```bash 156 | apt update && apt install git 157 | cd ~ 158 | git clone https://github.com/OrderLab/orbit.git 159 | cd orbit 160 | ``` 161 | 162 | - **6.3 Install guest toolchain** 163 | 164 | Setup guest environment by running: 165 | 166 | ```bash 167 | ./scripts/guest_setup.sh 168 | ``` 169 | 170 | This downloads ~450MB package and ~2min to setup. 171 | 172 | #### 7. Environment modules setup (~3 min) 173 | 174 | Some experiments would require running different versions of applications and/or orbit 175 | userlib. For easier version management, we use [Environment Modules](http://modules.sourceforge.net) 176 | to manage versions. The `guest_setup.sh` script in 6.3 has installed this dependency. 177 | 178 | Re-enter the chroot environment and try `module` command to 179 | see if it has been successfully setup. If no `module` command can be found, run the 180 | following in the guest image and re-enter chroot environment again. 181 | 182 | ```bash 183 | echo '[ -z ${MODULESHOME+x} ] && source /usr/share/modules/init/bash' >> ~/.bashrc 184 | ``` 185 | 186 | We provide a set of pre-written [modulefile](https://modules.readthedocs.io/en/latest/modulefile.html)s in the `modulefiles` directory. By default, they assume that this repository is cloned into `/root/orbit` in the guest VM. If you have a different clone path, run the fix-up script in `./scripts/fix-modulefiles.sh`. 187 | 188 | Setup `MODULEPATH` by running: 189 | 190 | ```bash 191 | echo 'export MODULEPATH=/root/orbit/modulefiles' >> ~/.bashrc 192 | ``` 193 | or run the command that `./scripts/fix-modulefiles.sh` generated in its output. 194 | 195 | Exit the chroot environment and `ch` back again, try `module avail`, and you 196 | would see a list of different versions of softwares. Note that at this point, 197 | those softwares are not actually available yet since we have not compiled them. 198 | We will compile them in the next two sections. 199 | 200 | #### 8. Build the orbit userlib (~ 1min) 201 | 202 | We need to install the user-level library for the applications to use 203 | orbit. 204 | 205 | Run the userlib build script inside the orbit directory in the chroot environment: 206 | 207 | ```bash 208 | ./scripts/build_userlib.sh 209 | ``` 210 | 211 | This will download userlib and compile. 212 | 213 | ## Test VM 214 | 215 | ### 9. Boot into the VM 216 | 217 | At this point, we can boot into the built VM. 218 | 219 | Run the shorthand: 220 | ```bash 221 | r 222 | ``` 223 | 224 | You will be dropped into a guest VM's tty. The default login user is `root`, and password is empty. 225 | 226 | To shutdown the VM, run `shutdown -h now` in the guest's shell. Or, since we 227 | also added shutdown to the bash logout script when executing `guest_setup.sh`, 228 | you can shutdown the VM by pressing `CTRL-D` or run `logout` in the VM. 229 | 230 | **Note 1**: By default, we run VM with the `-nographic` QEMU option, i.e., no 231 | video output. In this mode, the kernel outputs through emulated serial console, 232 | and serial console protocol does not support automatic geometry resizing. 233 | Therefore, every time after your terminal has been resized, make sure to run 234 | `resize` in the guest VM. 235 | 236 | **Note 2**: If in some cases the kernel stuck during shutdown due to orbit's 237 | bug in kernel code, you can press `Ctrl-A x` to force shutdown the QEMU, but 238 | this has the risk of corrupting data. 239 | 240 | # Detailed Instructions 241 | 242 | Now we proceed to test six real-world applications with orbit: MySQL, 243 | Apache HTTPD, Nginx, Redis, LevelDB, and Varnish. 244 | 245 | ## Experiment Setup 246 | 247 | ### 10. Build applications (~25 min) 248 | 249 | Run our script to automatically download and compile all application versions 250 | for the experiments: 251 | 252 | ```bash 253 | ./apps/build_all.sh 254 | ``` 255 | 256 | This will download ~160MB and takes additional 25 min to build. 257 | 258 | ### 11. Build test frameworks (~1 min) 259 | 260 | Run our script to automatically download and compile the test frameworks: 261 | 262 | ```bash 263 | ./experiments/tools/build_all.sh 264 | ``` 265 | This will download ~60MB and takes 1 min to build. 266 | 267 | ## Run Experiments (~4 h) 268 | 269 | Please go to [`experiments`](experiments) directory to see the list of experiments and their usages. 270 | -------------------------------------------------------------------------------- /apps/build_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | for f in httpd leveldb nginx redis varnish mysql; do 7 | $f/build.sh 8 | done 9 | -------------------------------------------------------------------------------- /apps/httpd/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | git clone https://github.com/OrderLab/obiwan-httpd.git code 7 | cd code 8 | 9 | module load userlib/reuse 10 | 11 | function build { 12 | commit=$1 13 | target=$2 14 | appendix=$3 15 | flags=$4 16 | 17 | git checkout $commit 18 | ./buildconf 19 | ./configure --with-included-apr --prefix=$SCRIPT_DIR/$target CFLAGS="-O3" $flags 20 | make -j$(nproc) 21 | make install 22 | make clean 23 | $SCRIPT_DIR/gen_config.sh $SCRIPT_DIR/$target/ $appendix 24 | } 25 | 26 | build 3e3b673 rel-proxy config-appendix LDFLAGS="-lorbit" 27 | build 48ff29d rel-watchdog config-appendix LDFLAGS="-lorbit" 28 | 29 | git checkout 48ff29d 30 | git apply < $SCRIPT_DIR/watchdog-inject-wait.patch 31 | ./buildconf 32 | ./configure --with-included-apr --prefix=$SCRIPT_DIR/rel-watchdog-inject CFLAGS="-O3" LDFLAGS="-lorbit" 33 | make -j$(nproc) 34 | make install 35 | make clean 36 | git checkout -- . 37 | $SCRIPT_DIR/gen_config.sh $SCRIPT_DIR/rel-watchdog-inject/ config-appendix 38 | 39 | build 2c250083 rel-orig config-appendix 40 | build 296a0ae5d rel-segfault config-appendix-segfault 41 | -------------------------------------------------------------------------------- /apps/httpd/config-appendix: -------------------------------------------------------------------------------- 1 | Header add Set-Cookie "ROUTEID=.%{BALANCER_WORKER_ROUTE}e; path=/" env=BALANCER_ROUTE_CHANGED 2 | 3 | BalancerMember http://fe01:1111 min=0 smax=30 retry=30 max=100 loadfactor=50 route=fe01 4 | BalancerMember http://fe02:1112 min=0 smax=30 retry=30 max=100 loadfactor=50 route=fe02 5 | ProxySet stickysession=ROUTEID 6 | 7 | 8 | ProxyPass "/somepath/" "balancer://myset/" 9 | ProxyPass "/hello" "http://localhost:1115" 10 | 11 | 12 | SetHandler balancer-manager 13 | 14 | 15 | 16 | StartServers 1 17 | MinSpareThreads 4 18 | MaxSpareThreads 4 19 | ThreadsPerChild 4 20 | MaxRequestWorkers 4 21 | MaxConnectionsPerChild 0 22 | ServerLimit 1 23 | 24 | -------------------------------------------------------------------------------- /apps/httpd/config-appendix-segfault: -------------------------------------------------------------------------------- 1 | Header add Set-Cookie "ROUTEID=.%{BALANCER_WORKER_ROUTE}e; path=/" env=BALANCER_ROUTE_CHANGED 2 | 3 | BalancerMember http://fe01:1111 min=0 smax=30 retry=30 max=100 loadfactor=50 route=fe01 redirect=fe02 4 | BalancerMember http://fe02:1112 min=0 smax=30 retry=30 max=100 loadfactor=50 route=fe02 redirect=fe01 5 | BalancerMember http://fe03:1113 min=0 smax=30 retry=30 max=100 loadfactor=50 route=fe03 6 | ProxySet stickysession=ROUTEID 7 | 8 | 9 | ProxyPass "/somepath/" "balancer://myset/" 10 | ProxyPass "/hello" "http://localhost:1115" 11 | 12 | 13 | SetHandler balancer-manager 14 | 15 | 16 | 17 | StartServers 1 18 | MinSpareThreads 4 19 | MaxSpareThreads 4 20 | ThreadsPerChild 4 21 | MaxRequestWorkers 4 22 | MaxConnectionsPerChild 0 23 | ServerLimit 1 24 | 25 | -------------------------------------------------------------------------------- /apps/httpd/gen_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | if [[ "x$1" == 'x' ]]; then 7 | echo "Need first argument as HTTPD install root" 8 | exit 1; 9 | fi 10 | root=$1 11 | 12 | if [[ "x$2" == 'x' ]]; then 13 | echo "Need second argument as appendix path" 14 | exit 1; 15 | fi 16 | appendix=$2 17 | 18 | origpath=$root/conf/original/httpd.conf 19 | targetpath=$root/conf/httpd.conf 20 | 21 | if [ ! -f $origpath ]; then 22 | echo "Cannot find original httpd.conf file at path" $origpath 23 | exit 1 24 | fi 25 | 26 | cat $origpath \ 27 | | sed 's/^Listen 80$/Listen 8080/' \ 28 | | sed 's/^#LoadModule proxy_module/LoadModule proxy_module/' \ 29 | | sed 's/^#LoadModule proxy_http_module/LoadModule proxy_http_module/' \ 30 | | sed 's/^#LoadModule proxy_balancer_module/LoadModule proxy_balancer_module/' \ 31 | | sed 's/^#LoadModule slotmem_shm_module/LoadModule slotmem_shm_module/' \ 32 | | sed 's/^#LoadModule lbmethod_byrequests_module/LoadModule lbmethod_byrequests_module/' \ 33 | | sed 's/^User daemon/#User daemon/' \ 34 | | sed 's/^Group daemon/#Group daemon/' \ 35 | | sed 's/^#ServerName www.example.com:80/ServerName localhost:8080/' \ 36 | > $targetpath 37 | 38 | cat $appendix >> $targetpath 39 | -------------------------------------------------------------------------------- /apps/httpd/watchdog-inject-wait.patch: -------------------------------------------------------------------------------- 1 | diff --git a/modules/http/http_request.c b/modules/http/http_request.c 2 | index 666b099..4e633b8 100644 3 | --- a/modules/http/http_request.c 4 | +++ b/modules/http/http_request.c 5 | @@ -50,7 +50,7 @@ 6 | #endif 7 | 8 | #include 9 | -#define INJECT_WAIT 0 10 | +#define INJECT_WAIT 1 11 | 12 | APLOG_USE_MODULE(http); 13 | 14 | diff --git a/srclib/apr/locks/unix/thread_mutex.c b/srclib/apr/locks/unix/thread_mutex.c 15 | index 01858a6..f079c28 100644 16 | --- a/srclib/apr/locks/unix/thread_mutex.c 17 | +++ b/srclib/apr/locks/unix/thread_mutex.c 18 | @@ -30,7 +30,7 @@ 19 | // FIXME: using a fixed size slots and expensive traverse just for PoC 20 | #define NSLOTS 1024 21 | #define OBWDG_INTERVAL 1 22 | -#define OBWDG_TIMEOUT 60 23 | +#define OBWDG_TIMEOUT 10 24 | // #define OBWDG_TIMEOUT 10 25 | 26 | #if 0 27 | -------------------------------------------------------------------------------- /apps/leveldb/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | module load userlib/dealloc 4 | 5 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 6 | cd $SCRIPT_DIR 7 | 8 | git clone --recurse-submodules https://github.com/OrderLab/obiwan-leveldb.git code 9 | cd code 10 | mkdir -p build && cd build 11 | 12 | git checkout acf8ea1a 13 | cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$SCRIPT_DIR/rel-orbit .. 14 | make -j$(nproc) 15 | mkdir -p $SCRIPT_DIR/rel-orbit/bin 16 | cp db_bench $SCRIPT_DIR/rel-orbit/bin 17 | 18 | git checkout 1730a1a0 19 | cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$SCRIPT_DIR/rel-orig .. 20 | make -j$(nproc) 21 | mkdir -p $SCRIPT_DIR/rel-orig/bin 22 | cp db_bench $SCRIPT_DIR/rel-orig/bin 23 | -------------------------------------------------------------------------------- /apps/mysql/.gitignore: -------------------------------------------------------------------------------- 1 | dist/ 2 | data/ 3 | boost*/ 4 | -------------------------------------------------------------------------------- /apps/mysql/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | module load userlib/plain 7 | 8 | boost_dir=$SCRIPT_DIR/boost_1_59_0 9 | 10 | build_dir=$SCRIPT_DIR/build 11 | 12 | git clone https://github.com/OrderLab/obiwan-mysql.git code 13 | srcdir=$SCRIPT_DIR/code 14 | 15 | function build { 16 | commit=$1 17 | dest=$SCRIPT_DIR/$2 18 | patch=$3 19 | 20 | rm -rf $build_dir 21 | mkdir -p $build_dir 22 | 23 | dist=$dest/dist 24 | data=$dest/data 25 | 26 | mkdir -p $dist $data 27 | 28 | cd $srcdir 29 | git checkout $commit 30 | if [ ! -z "$patch" ]; then 31 | git apply $SCRIPT_DIR/$patch 32 | fi 33 | 34 | cd $build_dir 35 | cmake $srcdir -DCMAKE_INSTALL_PREFIX=$dist \ 36 | -DMYSQL_DATADIR=$data -DCMAKE_BUILD_TYPE=RelWithDebInfo \ 37 | -DCMAKE_EXPORT_COMPILE_COMMANDS=1 -DMYSQL_MAINTAINER_MODE=false \ 38 | -DDOWNLOAD_BOOST=1 -DWITH_BOOST=$boost_dir \ 39 | -DWITH_EMBEDDED_SERVER=0 -DWITH_EMBEDDED_SHARED_LIBRARY=0 40 | 41 | make -j$(nproc) 42 | make install 43 | 44 | if [ ! -f $dist/etc/my.cnf ]; then 45 | mkdir -p $dist/etc 46 | cp $SCRIPT_DIR/my.cnf $dist/etc/ 47 | echo "log-error = ${dist}/error.log" >> $dist/etc/my.cnf 48 | 49 | cd $dist 50 | bin/mysqld --initialize-insecure --user=root 51 | fi 52 | 53 | cd $srcdir 54 | git checkout -- . 55 | 56 | rm -rf $build_dir 57 | } 58 | 59 | function build_orig { 60 | build 0fff8c36 rel-orig 61 | } 62 | function build_orbit { 63 | build 23ef2177 rel-orbit 64 | } 65 | function build_fork { 66 | build 98308f96 rel-fork 67 | pkill -9 mysqld 68 | } 69 | function build_sync { 70 | build 011edc32 rel-sync sync-mode.patch 71 | } 72 | function build_all { 73 | for t in orig orbit fork sync; do 74 | build_$t 75 | done 76 | } 77 | 78 | for t in "$@"; do 79 | build_$t 80 | done 81 | 82 | if [[ "$@" == "" ]]; then 83 | build_all 84 | fi 85 | -------------------------------------------------------------------------------- /apps/mysql/my.cnf: -------------------------------------------------------------------------------- 1 | # http://dev.mysql.com/doc/mysql/en/server-system-variables.html 2 | [mysqld] 3 | pid-file = /tmp/mysqld.pid 4 | socket = /tmp/mysql.sock 5 | # By default we only accept connections from localhost 6 | bind-address = 127.0.0.1 7 | # Disabling symbolic-links is recommended to prevent assorted security risks 8 | symbolic-links=0 9 | innodb_deadlock_detect=1 10 | max_connections=500 11 | innodb_rollback_on_timeout=1 12 | innodb_lock_wait_timeout=10 13 | -------------------------------------------------------------------------------- /apps/mysql/sync-mode.patch: -------------------------------------------------------------------------------- 1 | diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc 2 | index e72566ea..7f5a660f 100644 3 | --- a/storage/innobase/lock/lock0lock.cc 4 | +++ b/storage/innobase/lock/lock0lock.cc 5 | @@ -71,7 +71,7 @@ Created 5/7/1996 Heikki Tuuri 6 | orbit_scratch_push_update((scratch), &(lval), sizeof(rval)); \ 7 | } while (0) 8 | 9 | -#define OB_ASYNC_MODE 10 | +// #define OB_ASYNC_MODE 11 | 12 | /* Flag to enable/disable deadlock detector. */ 13 | my_bool innobase_deadlock_detect = TRUE; 14 | -------------------------------------------------------------------------------- /apps/mysql/throughputfix.patch: -------------------------------------------------------------------------------- 1 | diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc 2 | index e72566ea..677ae173 100644 3 | --- a/storage/innobase/lock/lock0lock.cc 4 | +++ b/storage/innobase/lock/lock0lock.cc 5 | @@ -77,16 +77,14 @@ Created 5/7/1996 Heikki Tuuri 6 | my_bool innobase_deadlock_detect = TRUE; 7 | 8 | /** Total number of cached record locks */ 9 | -static const ulint REC_LOCK_CACHE = 1; 10 | -// static const ulint REC_LOCK_CACHE = 8; 11 | +static const ulint REC_LOCK_CACHE = 8; 12 | 13 | /** Maximum record lock size in bytes */ 14 | static const ulint REC_LOCK_SIZE = sizeof(ib_lock_t); 15 | static const ulint REC_LOCK_BITMAP_SIZE = 256; 16 | 17 | /** Total number of cached table locks */ 18 | -static const ulint TABLE_LOCK_CACHE = 1; 19 | -// static const ulint TABLE_LOCK_CACHE = 8; 20 | +static const ulint TABLE_LOCK_CACHE = 8; 21 | 22 | /** Size in bytes, of the table lock instance */ 23 | static const ulint TABLE_LOCK_SIZE = sizeof(ib_lock_t); 24 | @@ -1546,6 +1544,8 @@ RecLock::lock_alloc( 25 | orbit_alloc(table_lock_oballoc, sizeof(*lock))); 26 | lock->un_member.rec_lock.bits = reinterpret_cast( 27 | orbit_alloc(rec_lock_oballoc, size)); 28 | + trx->lock.rec_pool.push_back(lock); 29 | + ++trx->lock.rec_cached; 30 | obprintf(stderr, "Orbit allocated rec lock %p from pool\n", lock); 31 | } else { 32 | 33 | @@ -2050,10 +2050,10 @@ lock_rec_lock_fast( 34 | } 35 | 36 | /* Orbit task cancellation */ 37 | - if (status == LOCK_REC_SUCCESS_CREATED && trx->has_orbit) { 38 | + /* if (status == LOCK_REC_SUCCESS_CREATED && trx->has_orbit) { 39 | orbit_cancel_by_task(&trx->dl_ck_task); 40 | trx->has_orbit = false; 41 | - } 42 | + } */ 43 | 44 | return(status); 45 | } 46 | @@ -3842,6 +3842,8 @@ lock_table_create( 47 | obprintf(stderr, "Orbit allocating table lock from orbit pool.\n"); 48 | lock = reinterpret_cast( 49 | orbit_alloc(table_lock_oballoc, sizeof(*lock))); 50 | + trx->lock.table_pool.push_back(lock); 51 | + ++trx->lock.table_cached; 52 | obprintf(stderr, "Orbit allocated table lock %p from pool\n", lock); 53 | 54 | /* lock = static_cast( 55 | -------------------------------------------------------------------------------- /apps/nginx/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | module load userlib/reuse 7 | 8 | git clone https://github.com/OrderLab/obiwan-nginx.git code 9 | cd code 10 | 11 | function build { 12 | commit=$1 13 | target=$2 14 | flags=$3 15 | 16 | git checkout $commit 17 | ./auto/configure --with-http_dav_module --with-debug --prefix=$SCRIPT_DIR/$target $flags 18 | make -j$(nproc) 19 | make install 20 | mkdir -p $SCRIPT_DIR/$target/html/dd/ 21 | chmod 777 $SCRIPT_DIR/$target/html/dd/ 22 | cp $SCRIPT_DIR/nginx.conf $SCRIPT_DIR/$target/conf/nginx.conf 23 | } 24 | 25 | build 69983de8 rel-orbit --with-ld-opt=-lorbit 26 | build 967b1216 rel-orig 27 | build 0a199f00 rel-segfault 28 | -------------------------------------------------------------------------------- /apps/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | worker_processes 1; 2 | 3 | events { 4 | worker_connections 1024; 5 | } 6 | 7 | http { 8 | include mime.types; 9 | default_type application/octet-stream; 10 | 11 | sendfile on; 12 | 13 | keepalive_timeout 65; 14 | 15 | server { 16 | listen 80; 17 | server_name localhost; 18 | 19 | location / { 20 | root html; 21 | index index.html index.htm; 22 | } 23 | 24 | location /dd { 25 | root html; 26 | client_body_temp_path dav_temp; 27 | 28 | dav_methods PUT DELETE MKCOL; 29 | 30 | create_full_put_path off; 31 | dav_access user:r all:r; 32 | autoindex on; 33 | 34 | if (-d $request_filename) { 35 | rewrite ^(.*[^/])$ $1/ break; 36 | } 37 | } 38 | 39 | error_page 500 502 503 504 /50x.html; 40 | location = /50x.html { 41 | root html; 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /apps/redis/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | module load userlib/dealloc 7 | 8 | git clone https://github.com/OrderLab/obiwan-redis.git code 9 | cd code 10 | make distclean 11 | 12 | function build { 13 | commit=$1 14 | target=$2 15 | patch=$3 16 | 17 | git checkout $commit 18 | [ ! -z "$patch" ] && git apply $SCRIPT_DIR/$patch 19 | make -j$(nproc) 20 | make install PREFIX=$SCRIPT_DIR/$target 21 | make distclean 22 | git checkout -- . 23 | } 24 | 25 | build 0122d7a rel-slowlog force-1000.patch 26 | build 0122d7a rel-oom inject-oom.patch 27 | build 0122d7a rel-cpu-hog inject-cpu-hog.patch 28 | build 7d8197d rel-rdb 29 | build 7d8197d rel-rdb-fault inject-fault.patch 30 | build 7a0dc14 rel-orig 31 | build 843b3d9 rel-memleak inject-delay.patch 32 | build 0122d7a rel-slowlog-delay inject-delay-orbit.patch 33 | -------------------------------------------------------------------------------- /apps/redis/force-1000.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/slowlog.c b/src/slowlog.c 2 | index 2bbb9b2..4ba445a 100644 3 | --- a/src/slowlog.c 4 | +++ b/src/slowlog.c 5 | @@ -180,10 +180,16 @@ unsigned long slowlogPushEntry_orbit(void *store, void *_args) { 6 | } 7 | 8 | void slowlogPushEntryIfNeeded(client *c, robj **argv, int argc, long long duration) { 9 | + static int counter = 0; 10 | + if (++counter >= 1000) { 11 | + counter = 0; 12 | + goto force_run; 13 | + } 14 | if (server.slowlog_log_slower_than < 0) return; /* Slowlog disabled */ 15 | /* FIXME: this changes default behavior that trims slowlog entires every time */ 16 | if (!(duration >= server.slowlog_log_slower_than)) return; 17 | 18 | +force_run: 19 | printd("In slowlog push\n"); 20 | 21 | // pre-get c->peerid 22 | -------------------------------------------------------------------------------- /apps/redis/inject-cpu-hog.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/slowlog.c b/src/slowlog.c 2 | index 2bbb9b2c8..94a33ad21 100644 3 | --- a/src/slowlog.c 4 | +++ b/src/slowlog.c 5 | @@ -142,6 +142,14 @@ void *slowlogInit_orbit(void) { 6 | return NULL; 7 | } 8 | 9 | +void slowlog_orbit_create() { 10 | + char cmd[60]; 11 | + slowlog_orbit = orbit_create("slowlog", slowlogPushEntry_orbit, slowlogInit_orbit); 12 | + sprintf(cmd, "cgclassify -g cpu:/cpulimit %d", slowlog_orbit->gobid); 13 | + fprintf(stderr, "cmd: %s\n", cmd); 14 | + system(cmd); 15 | +} 16 | + 17 | /* Initialize the slow log. This function should be called a single time 18 | * at server startup. */ 19 | void slowlogInit(void) { 20 | @@ -152,7 +160,7 @@ void slowlogInit(void) { 21 | slowlog_scratch_pool->mode = ORBIT_MOVE; 22 | orbit_scratch_set_pool(slowlog_scratch_pool); 23 | 24 | - slowlog_orbit = orbit_create("slowlog", slowlogPushEntry_orbit, slowlogInit_orbit); 25 | + slowlog_orbit_create(); 26 | } 27 | 28 | /* Push a new entry into the slow log. 29 | @@ -176,14 +184,20 @@ unsigned long slowlogPushEntry_orbit(void *store, void *_args) { 30 | (void)store; 31 | slowlog_push_orbit_args *args = (slowlog_push_orbit_args*)_args; 32 | slowlogPushEntry_real(args->c, args->argv, args->argc, args->duration); 33 | + time_t t1 = time(NULL); 34 | + while (time(NULL) - t1 < 10) 35 | + ; 36 | return 0; 37 | } 38 | 39 | void slowlogPushEntryIfNeeded(client *c, robj **argv, int argc, long long duration) { 40 | + int ret; 41 | + goto force_run; 42 | if (server.slowlog_log_slower_than < 0) return; /* Slowlog disabled */ 43 | /* FIXME: this changes default behavior that trims slowlog entires every time */ 44 | if (!(duration >= server.slowlog_log_slower_than)) return; 45 | 46 | +force_run: 47 | printd("In slowlog push\n"); 48 | 49 | // pre-get c->peerid 50 | @@ -192,7 +206,11 @@ void slowlogPushEntryIfNeeded(client *c, robj **argv, int argc, long long durati 51 | 52 | // slowlogPushEntryIfNeededReal(c, argv, argc, duration); 53 | slowlog_push_orbit_args args = { c, argv, argc, duration, }; 54 | - orbit_call_async(slowlog_orbit, ORBIT_NORETVAL, 1, &slowlog_pool, NULL, &args, sizeof(args), NULL); 55 | + ret = orbit_call_async(slowlog_orbit, ORBIT_NORETVAL, 1, &slowlog_pool, NULL, &args, sizeof(args), NULL); 56 | + if (ret != 0) { 57 | + fprintf(stderr, "recreating slowlog orbit task\n"); 58 | + slowlog_orbit_create(); 59 | + } 60 | } 61 | 62 | /* Remove all the entries from the current slow log. */ 63 | -------------------------------------------------------------------------------- /apps/redis/inject-delay-orbit.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/object.c b/src/object.c 2 | index 466ec98..cf5bf9e 100644 3 | --- a/src/object.c 4 | +++ b/src/object.c 5 | @@ -358,8 +358,13 @@ void incrRefCount(robj *o) { 6 | if (o->refcount != OBJ_SHARED_REFCOUNT) o->refcount++; 7 | } 8 | 9 | +extern robj *barptr; 10 | void decrRefCount(robj *o) { 11 | if (o->refcount == 1) { 12 | + if (o == barptr) { 13 | + usleep(10000); 14 | + fprintf(stderr, "bar obj free\n"); 15 | + } 16 | switch(o->type) { 17 | case OBJ_STRING: freeStringObject(o); break; 18 | case OBJ_LIST: freeListObject(o); break; 19 | @@ -374,6 +379,10 @@ void decrRefCount(robj *o) { 20 | else 21 | zfree(o); 22 | } else { 23 | + if (o == barptr) { 24 | + usleep(10000); 25 | + fprintf(stderr, "bar obj dec\n"); 26 | + } 27 | if (o->refcount <= 0) serverPanic("decrRefCount against refcount <= 0"); 28 | if (o->refcount != OBJ_SHARED_REFCOUNT) o->refcount--; 29 | } 30 | diff --git a/src/t_string.c b/src/t_string.c 31 | index 75375f4..1d95471 100644 32 | --- a/src/t_string.c 33 | +++ b/src/t_string.c 34 | @@ -64,6 +64,7 @@ static int checkStringLength(client *c, long long size) { 35 | #define OBJ_SET_EX (1<<2) /* Set if time in seconds is given */ 36 | #define OBJ_SET_PX (1<<3) /* Set if time in ms in given */ 37 | 38 | +robj *barptr; 39 | void setGenericCommand(client *c, int flags, robj *key, robj *val, robj *expire, int unit, robj *ok_reply, robj *abort_reply) { 40 | long long milliseconds = 0; /* initialized to avoid any harmness warning */ 41 | 42 | @@ -84,6 +85,7 @@ void setGenericCommand(client *c, int flags, robj *key, robj *val, robj *expire, 43 | return; 44 | } 45 | setKey(c->db,key,val); 46 | + barptr = val; 47 | server.dirty++; 48 | if (expire) setExpire(c,c->db,key,mstime()+milliseconds); 49 | notifyKeyspaceEvent(NOTIFY_STRING,"set",key,c->db->id); 50 | -------------------------------------------------------------------------------- /apps/redis/inject-delay.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/object.c b/src/object.c 2 | index 4e3080f..e0906c2 100644 3 | --- a/src/object.c 4 | +++ b/src/object.c 5 | @@ -307,8 +307,13 @@ void incrRefCount(robj *o) { 6 | if (o->refcount != OBJ_SHARED_REFCOUNT) o->refcount++; 7 | } 8 | 9 | +extern robj *barptr; 10 | void decrRefCount(robj *o) { 11 | if (o->refcount == 1) { 12 | + if (o == barptr) { 13 | + usleep(10000); 14 | + fprintf(stderr, "bar obj free\n"); 15 | + } 16 | switch(o->type) { 17 | case OBJ_STRING: freeStringObject(o); break; 18 | case OBJ_LIST: freeListObject(o); break; 19 | @@ -320,6 +325,10 @@ void decrRefCount(robj *o) { 20 | } 21 | zfree(o); 22 | } else { 23 | + if (o == barptr) { 24 | + usleep(10000); 25 | + fprintf(stderr, "bar obj dec\n"); 26 | + } 27 | if (o->refcount <= 0) serverPanic("decrRefCount against refcount <= 0"); 28 | if (o->refcount != OBJ_SHARED_REFCOUNT) o->refcount--; 29 | } 30 | diff --git a/src/t_string.c b/src/t_string.c 31 | index 75375f4..1d95471 100644 32 | --- a/src/t_string.c 33 | +++ b/src/t_string.c 34 | @@ -64,6 +64,7 @@ static int checkStringLength(client *c, long long size) { 35 | #define OBJ_SET_EX (1<<2) /* Set if time in seconds is given */ 36 | #define OBJ_SET_PX (1<<3) /* Set if time in ms in given */ 37 | 38 | +robj *barptr; 39 | void setGenericCommand(client *c, int flags, robj *key, robj *val, robj *expire, int unit, robj *ok_reply, robj *abort_reply) { 40 | long long milliseconds = 0; /* initialized to avoid any harmness warning */ 41 | 42 | @@ -84,6 +85,7 @@ void setGenericCommand(client *c, int flags, robj *key, robj *val, robj *expire, 43 | return; 44 | } 45 | setKey(c->db,key,val); 46 | + barptr = val; 47 | server.dirty++; 48 | if (expire) setExpire(c,c->db,key,mstime()+milliseconds); 49 | notifyKeyspaceEvent(NOTIFY_STRING,"set",key,c->db->id); 50 | -------------------------------------------------------------------------------- /apps/redis/inject-fault.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/rdb.c b/src/rdb.c 2 | index 972dac4..d2e9a2d 100644 3 | --- a/src/rdb.c 4 | +++ b/src/rdb.c 5 | @@ -1159,7 +1159,7 @@ int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) { 6 | fprintf(stderr, "orbit: RDB process is gone, spawn a new one\n"); 7 | rdb_orbit = orbit_create("rdb save", rdbSave_orbit, NULL); 8 | } 9 | - static bool do_inject = false; 10 | + static bool do_inject = true; 11 | static int inject_counter = 0; 12 | 13 | start = ustime(); 14 | -------------------------------------------------------------------------------- /apps/redis/inject-oom.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/slowlog.c b/src/slowlog.c 2 | index 2bbb9b2c8..e1a2eab60 100644 3 | --- a/src/slowlog.c 4 | +++ b/src/slowlog.c 5 | @@ -142,6 +142,14 @@ void *slowlogInit_orbit(void) { 6 | return NULL; 7 | } 8 | 9 | +void slowlog_orbit_create() { 10 | + char cmd[60]; 11 | + slowlog_orbit = orbit_create("slowlog", slowlogPushEntry_orbit, slowlogInit_orbit); 12 | + sprintf(cmd, "cgclassify -g memory:/memorylimit %d", slowlog_orbit->gobid); 13 | + fprintf(stderr, "cmd: %s\n", cmd); 14 | + system(cmd); 15 | +} 16 | + 17 | /* Initialize the slow log. This function should be called a single time 18 | * at server startup. */ 19 | void slowlogInit(void) { 20 | @@ -152,7 +160,7 @@ void slowlogInit(void) { 21 | slowlog_scratch_pool->mode = ORBIT_MOVE; 22 | orbit_scratch_set_pool(slowlog_scratch_pool); 23 | 24 | - slowlog_orbit = orbit_create("slowlog", slowlogPushEntry_orbit, slowlogInit_orbit); 25 | + slowlog_orbit_create(); 26 | } 27 | 28 | /* Push a new entry into the slow log. 29 | @@ -176,14 +184,20 @@ unsigned long slowlogPushEntry_orbit(void *store, void *_args) { 30 | (void)store; 31 | slowlog_push_orbit_args *args = (slowlog_push_orbit_args*)_args; 32 | slowlogPushEntry_real(args->c, args->argv, args->argc, args->duration); 33 | + store = zmalloc(512 * 1024 * 1024); 34 | + for (char *t = (char*)store; t < (char*)store + 512 * 1024 * 1024; t += 4096) 35 | + *t = 'h'; 36 | return 0; 37 | } 38 | 39 | void slowlogPushEntryIfNeeded(client *c, robj **argv, int argc, long long duration) { 40 | + int ret; 41 | + goto force_run; 42 | if (server.slowlog_log_slower_than < 0) return; /* Slowlog disabled */ 43 | /* FIXME: this changes default behavior that trims slowlog entires every time */ 44 | if (!(duration >= server.slowlog_log_slower_than)) return; 45 | 46 | +force_run: 47 | printd("In slowlog push\n"); 48 | 49 | // pre-get c->peerid 50 | @@ -192,7 +206,11 @@ void slowlogPushEntryIfNeeded(client *c, robj **argv, int argc, long long durati 51 | 52 | // slowlogPushEntryIfNeededReal(c, argv, argc, duration); 53 | slowlog_push_orbit_args args = { c, argv, argc, duration, }; 54 | - orbit_call_async(slowlog_orbit, ORBIT_NORETVAL, 1, &slowlog_pool, NULL, &args, sizeof(args), NULL); 55 | + ret = orbit_call_async(slowlog_orbit, ORBIT_NORETVAL, 1, &slowlog_pool, NULL, &args, sizeof(args), NULL); 56 | + if (ret != 0) { 57 | + fprintf(stderr, "recreating slowlog orbit task\n"); 58 | + slowlog_orbit_create(); 59 | + } 60 | } 61 | 62 | /* Remove all the entries from the current slow log. */ 63 | -------------------------------------------------------------------------------- /apps/varnish/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | module load userlib/reuse 7 | 8 | git clone https://github.com/OrderLab/obiwan-varnish.git code 9 | cd code 10 | 11 | git checkout c5626ecea 12 | ./autogen.sh 13 | ./configure --prefix=$SCRIPT_DIR/rel-orig CFLAGS="-O2 -ggdb" 14 | make -j$(nproc) 15 | make install 16 | make clean -f 17 | 18 | git checkout 4d36eae9 19 | ./autogen.sh 20 | ./configure --prefix=$SCRIPT_DIR/rel-orbit LDFLAGS='-lorbit' CFLAGS="-O2 -ggdb" 21 | make -j$(nproc) 22 | make install 23 | make clean -f 24 | -------------------------------------------------------------------------------- /experiments/README.md: -------------------------------------------------------------------------------- 1 | # Experiments (~4 h) 2 | 3 | We provide scripts for the following list of experiments mentioned in the paper. Click on each experiment's link for its usage. 4 | 5 | ## Microbenchmark (section 5.2) 6 | 7 | - [Orbit call (async)](micro-call) (~30 s) 8 | 9 | ## Fault Isolation (section 5.4) 10 | 11 | We provide a set of cases to show the effect of isolation by orbit. Click on each case's link to see the usage. 12 | 13 | Some of the case would require two or more operations running at the same time. It is recommended to use `tmux` to create a split view of terminals for better demonstration purpose. 14 | 15 | - Real-world cases 16 | - [Apache proxy balancer segfault](isolation/apache-segfault.md) (~5 min human) 17 | - [Nginx WebDAV segfault](isolation/nginx-segfault.md) (~3 min human) 18 | - [Redis Slowlog memory leak](isolation/redis-memleak.md) (~3 min human) 19 | - Injection cases 20 | - [Apache lock watchdog diagnosis](isolation/watchdog-diagnosis.md) (~3 min human) 21 | - [Redis RDB segfault](isolation/rdb-segfault.md) (~3 min human) 22 | - [Redis Slowlog OOM](isolation/redis-oom.md) (~3 min human) 23 | - [Redis Slowlog CPU hogging](isolation/redis-cpu-hog.md) (~3 min human) 24 | 25 | ## Performance Overhead (section 5.5) 26 | 27 | - [Throughput](throughput) (~2.5 h) 28 | - [Fork vs Vanilla vs Orbit](fork-ob-orig) (~30 min) 29 | - [Sync vs Async orbit calls](async-sync) (~20 min) 30 | 31 | ## Known issues 32 | 33 | **1 Warning message about `rss-counter`** 34 | 35 | During the experiment, you may see some warning messages from the kernel like 36 | the following. This is showing up when an orbit process exits, but due to some 37 | implementation error the stats was not correctly updated. You can safely ignore them. 38 | ``` 39 | [ 41.754599] BUG: Bad rss-counter state mm:00000000b1f76ba0 type:MM_ANONPAGES val:131072 40 | ``` 41 | 42 | **2 Flaky Orbit version Redis RDB background save stuck** 43 | 44 | When Redis RDB is saving an RDB file to disk, you should be able to see the four following lines: 45 | ``` 46 | 269:M 16 Apr 01:31:45.120 * Background saving started by pid 287 47 | 287:C 16 Apr 01:31:45.123 * DB saved on disk 48 | 287:C 16 Apr 01:31:45.123 * RDB: 0 MB of memory used by copy-on-write 49 | 50 | 269:M 16 Apr 01:31:45.125 * Background saving terminated with success 51 | ``` 52 | 53 | However, due to a bug in implementation, the fourth line may sometimes be 54 | missing. When you noticed a missing fourth line, kill the Redis server by 55 | CTRL-C on it, and run `pkill -9 redis-server`. And then restart the experiment. 56 | 57 | **3 Flaky Orbit version Varnish segfault issue** 58 | 59 | When running the varnish performance test, due to a bug in implementation, 60 | sometimes the varnish server will segfault. Press CTRL-C on the running script 61 | and re-run the experiment. 62 | -------------------------------------------------------------------------------- /experiments/async-sync/README.md: -------------------------------------------------------------------------------- 1 | # Async vs sync orbit calls 2 | 3 | This experiment compares async and sync version of orbit calls in MySQL by running a user workload. 4 | 5 | ## Running the experiment 6 | 7 | This experiment requires running another version of kernel. Start the VM by running `r formysql`. 8 | 9 | To run the experiment, run `./run.sh` in this directory. 10 | 11 | ### Repeat times 12 | 13 | By default, the `run.sh` script repeat for only 1 time, taking about 4 min. To 14 | run it for multiple times (we repeated for 5 times in the paper), run with an 15 | argument as follows. 16 | ```bash 17 | ./run.sh 5 18 | ``` 19 | 20 | ## Analyse results 21 | 22 | Generate figure by running `./plot.sh`. 23 | 24 | The VM does not contain GUI environment, so to view the figure, it needs to be copied out first. Shutdown the VM, then on the host machine, mount and copy: 25 | ```bash 26 | m 27 | sudo cp mount-point.dir/root/orbit/experiments/async-sync/figure.pdf . 28 | ``` 29 | -------------------------------------------------------------------------------- /experiments/async-sync/plot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | import os 5 | import pandas as pd 6 | from datetime import datetime 7 | import matplotlib.pyplot as plt 8 | from matplotlib.ticker import MultipleLocator 9 | import numpy as np 10 | from matplotlib import cm 11 | import argparse 12 | import matplotlib 13 | 14 | #matplotlib.rcParams['pdf.fonttype'] = 42 15 | #matplotlib.rcParams['ps.fonttype'] = 42 16 | # matplotlib.rc('text', usetex=True) 17 | 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument('-o', '--output', help="path to output image file") 20 | parser.add_argument('input', help="input data file") 21 | 22 | def plot_line(ax, df, label, marker, color): 23 | ax.plot(df.index.values, df.values, label=label, 24 | marker=marker, markersize=4, color=color, linewidth=2, zorder=3) 25 | 26 | def plot(args): 27 | df = pd.read_csv(args.input, index_col=0) 28 | async_orbit_df = df['async_orbit_qps'] 29 | sync_orbit_df = df['sync_orbit_qps'] 30 | 31 | figure, ax = plt.subplots(figsize=(4.6, 2.3)) 32 | plot_line(ax, sync_orbit_df, 'orbit_call (sync) to dl detector', '|', '#A00000') 33 | plot_line(ax, async_orbit_df, 'orbit_call_async to dl detector ', 'o', '#00A000') 34 | 35 | ax.legend(loc='lower center', ncol=1, frameon=True, edgecolor='black', fontsize=9, 36 | bbox_to_anchor=(0.6, 0.1), columnspacing=1.0) 37 | ax.grid(axis='y', linestyle='-', lw=0.3, zorder=0) 38 | ax.set_ylabel('Throughput (QPS)', fontsize=10) 39 | ax.set_xlabel('Time (s)', fontsize=10) 40 | ax.set_ylim(bottom=0) 41 | ax.yaxis.set_major_locator(MultipleLocator(1000)) 42 | 43 | figure.tight_layout() 44 | if args.output: 45 | ax.margins(0,0) 46 | plt.savefig(args.output, bbox_inches='tight', pad_inches=0) 47 | plt.show() 48 | 49 | if __name__ == '__main__': 50 | args = parser.parse_args() 51 | if not args.input or not os.path.isfile(args.input): 52 | sys.stderr.write("Input file " + args.input + " does not exist\n") 53 | sys.exit(1) 54 | plot(args) 55 | -------------------------------------------------------------------------------- /experiments/async-sync/plot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | ./qps.py 7 | ./plot.py -o figure.pdf async-sync-orbit.csv 8 | -------------------------------------------------------------------------------- /experiments/async-sync/qps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import csv 4 | from statistics import mean 5 | from os import listdir 6 | 7 | # 2 sec gap between two data points 8 | timegap = 2 9 | 10 | def parse(prefix): 11 | groups = [] 12 | for filename in listdir('.'): 13 | if filename.startswith(prefix): 14 | with open(filename) as f: 15 | ename = f.readline() 16 | lines = [int(line.strip()) for line in f.readlines()] 17 | groups.append(lines) 18 | new_groups = [] 19 | for nums in groups: 20 | new_nums = [] 21 | last_num = 0 22 | for x in nums: 23 | new_nums.append((x - last_num) / timegap) 24 | last_num = x 25 | new_groups.append(new_nums) 26 | assert(len(new_nums) == 60) 27 | new_groups = list(map(mean, zip(*new_groups))) 28 | return new_groups 29 | 30 | asy_data = parse('incone-orbit-16-report-') 31 | syn_data = parse('incone-sync-16-report-') 32 | 33 | with open('async-sync-orbit.csv', 'w') as f: 34 | writer = csv.DictWriter(f, fieldnames=['time', 'async_orbit_qps', 'sync_orbit_qps']) 35 | writer.writeheader() 36 | 37 | for i, (asy, syn) in enumerate(zip(asy_data, syn_data)): 38 | writer.writerow({ 'time': (i+1)*timegap, 'async_orbit_qps': asy, 'sync_orbit_qps': syn }) 39 | -------------------------------------------------------------------------------- /experiments/async-sync/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | repeat=1 4 | if [ ! -z "$1" ]; then 5 | repeat=$1 6 | fi 7 | 8 | function run_once { 9 | mysqld --user=root & 10 | mysqlpid=$! 11 | sleep 3 12 | 13 | ../incone.sh $1 $2 $3 $mysqlpid 14 | 15 | sleep 1 16 | killall mysql 17 | sleep 5 18 | 19 | killall mysqld 20 | sleep 1 21 | killall -9 mysqld 22 | sleep 1 23 | } 24 | 25 | function run_all { 26 | name=$1 27 | echo Running $name ... 28 | module load mysql/$name 29 | 30 | for thds in 16; do 31 | for t in `seq $repeat`; do 32 | run_once $name $thds $t 33 | done 34 | done 35 | 36 | module unload mysql 37 | } 38 | 39 | run_all orbit 40 | run_all sync 41 | -------------------------------------------------------------------------------- /experiments/fork-ob-orig/README.md: -------------------------------------------------------------------------------- 1 | # Fork vs Vanilla vs Orbit 2 | 3 | This experiment compares fork, vanilla, and orbit versions of MySQL by running a user workload. 4 | 5 | ## Running the experiment 6 | 7 | This experiment requires running another version of kernel. Start the VM by running `r formysql`. 8 | 9 | To run the experiment, run `./run.sh` in this directory. 10 | 11 | ### Repeat times 12 | 13 | By default, the `run.sh` script repeat for only 1 time, taking about 6 min. To 14 | run it for multiple times (we repeated for 5 times in the paper), run with an 15 | argument as follows. 16 | ```bash 17 | ./run.sh 5 18 | ``` 19 | 20 | ## Analyse results 21 | 22 | Generate figure by running `./plot.sh`. 23 | 24 | The VM does not contain GUI environment, so to view the figure, it needs to be copied out first. Shutdown the VM, then on the host machine, mount and copy: 25 | ```bash 26 | m 27 | sudo cp mount-point.dir/root/orbit/experiments/fork-ob-orig/figure.pdf . 28 | ``` 29 | -------------------------------------------------------------------------------- /experiments/fork-ob-orig/plot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | import os 5 | import pandas as pd 6 | from datetime import datetime 7 | import matplotlib.pyplot as plt 8 | from matplotlib.ticker import MultipleLocator 9 | import numpy as np 10 | from matplotlib import cm 11 | import argparse 12 | import matplotlib 13 | 14 | #matplotlib.rcParams['pdf.fonttype'] = 42 15 | #matplotlib.rcParams['ps.fonttype'] = 42 16 | # matplotlib.rc('text', usetex=True) 17 | 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument('-o', '--output', help="path to output image file") 20 | parser.add_argument('input', help="input data file") 21 | 22 | def plot_line(ax, df, label, marker, color): 23 | ax.plot(df.index.values, df.values, label=label, 24 | marker=marker, color=color, markersize=4, linewidth=2, zorder=3) 25 | 26 | def plot(args): 27 | df = pd.read_csv(args.input, index_col=0) 28 | vanilla_df = df['vanilla_qps'] 29 | orbit_df = df['orbit_qps'] 30 | fork_df = df['fork_qps'] 31 | 32 | vanilla_mean = vanilla_df.mean() 33 | orbit_mean = orbit_df.mean() 34 | fork_mean = fork_df.mean() 35 | 36 | print(vanilla_mean, orbit_mean, fork_mean, 1 - (orbit_mean / vanilla_mean), orbit_mean / fork_mean) 37 | 38 | figure, ax = plt.subplots(figsize=(4.6, 2.3)) 39 | plot_line(ax, vanilla_df, 'vanilla (unsafe) dl detector', '^', '#A00000') 40 | plot_line(ax, orbit_df, 'orbit (safe) dl detector ', 'o', '#00A000') 41 | plot_line(ax, fork_df, 'fork (safe) dl detector ', '+', '#041D37') 42 | 43 | ax.legend(loc='lower center', ncol=1, frameon=True, edgecolor='black', fontsize=9, 44 | bbox_to_anchor=(0.5, 0.35), columnspacing=1.0) 45 | ax.grid(axis='y', linestyle='-', lw=0.3, zorder=0) 46 | ax.set_ylabel('Throughput (QPS)', fontsize=10) 47 | ax.set_xlabel('Time (s)', fontsize=10) 48 | ax.set_ylim(bottom=0) 49 | ax.yaxis.set_major_locator(MultipleLocator(800)) 50 | 51 | figure.tight_layout() 52 | if args.output: 53 | ax.margins(0,0) 54 | plt.savefig(args.output, bbox_inches='tight', pad_inches=0) 55 | plt.show() 56 | 57 | if __name__ == '__main__': 58 | args = parser.parse_args() 59 | if not args.input or not os.path.isfile(args.input): 60 | sys.stderr.write("Input file " + args.input + " does not exist\n") 61 | sys.exit(1) 62 | plot(args) 63 | -------------------------------------------------------------------------------- /experiments/fork-ob-orig/plot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | ./qps.py 7 | ./plot.py -o figure.pdf orbit-comparison.csv 8 | -------------------------------------------------------------------------------- /experiments/fork-ob-orig/qps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import csv 4 | from statistics import mean 5 | from os import listdir 6 | 7 | # 2 sec gap between two data points 8 | timegap = 2 9 | 10 | def parse(prefix): 11 | groups = [] 12 | for filename in listdir('.'): 13 | if filename.startswith(prefix): 14 | with open(filename) as f: 15 | ename = f.readline() 16 | lines = [int(line.strip()) for line in f.readlines()] 17 | groups.append(lines) 18 | new_groups = [] 19 | for nums in groups: 20 | new_nums = [] 21 | last_num = 0 22 | for x in nums: 23 | new_nums.append((x - last_num) / timegap) 24 | last_num = x 25 | new_groups.append(new_nums) 26 | assert(len(new_nums) == 60) 27 | new_groups = list(map(mean, zip(*new_groups))) 28 | return new_groups 29 | 30 | orbit = parse('incone-orbit-8-report-') 31 | orig = parse('incone-orig-8-report-') 32 | fork = parse('incone-fork-8-report-') 33 | 34 | with open('orbit-comparison.csv', 'w') as f: 35 | writer = csv.DictWriter(f, fieldnames=['time', 'vanilla_qps', 'orbit_qps', 'fork_qps']) 36 | writer.writeheader() 37 | 38 | for i, (va, ob, fo) in enumerate(zip(orig, orbit, fork)): 39 | writer.writerow({ 'time': (i+1)*timegap, 'vanilla_qps': va, 40 | 'orbit_qps': ob, 'fork_qps': fo }) 41 | -------------------------------------------------------------------------------- /experiments/fork-ob-orig/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | repeat=1 4 | if [ ! -z "$1" ]; then 5 | repeat=$1 6 | fi 7 | 8 | function run_once { 9 | mysqld --user=root & 10 | mysqlpid=$! 11 | sleep 3 12 | 13 | ../incone.sh $1 $2 $3 $mysqlpid 14 | 15 | sleep 1 16 | killall mysql 17 | sleep 5 18 | 19 | killall mysqld 20 | sleep 1 21 | killall -9 mysqld 22 | sleep 1 23 | } 24 | 25 | function run_all { 26 | name=$1 27 | echo Running $name ... 28 | module load mysql/$name 29 | 30 | for thds in 8; do 31 | for t in `seq $repeat`; do 32 | run_once $name $thds $t 33 | done 34 | done 35 | 36 | module unload mysql 37 | } 38 | 39 | run_all fork 40 | run_all orig 41 | run_all orbit 42 | -------------------------------------------------------------------------------- /experiments/incone.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Increment by one 4 | # https://dom.as/2009/12/21/on-deadlock-detection/ 5 | # https://bugs.mysql.com/bug.php?id=49047 6 | 7 | if [[ $1 == "" ]]; then 8 | echo 'Please specify experiment name' 9 | exit 1 10 | fi 11 | testname=$1 12 | 13 | sockfile=/tmp/mysql.sock 14 | 15 | function mymysql { 16 | mysql -S ${sockfile} -u root "${@}" 17 | } 18 | 19 | function run_once { 20 | thds=$1 21 | trial=$2 22 | 23 | runname=incone-${testname}-${thds}-${trial}-$(date +%s) 24 | logfile=incone-${testname}-${thds}.log 25 | cpulogfile=inconecpu-${testname}-${thds}.log 26 | reportfile=incone-${testname}-${thds}-report-${trial}.log 27 | # those are append 28 | echo $runname >> $logfile 29 | echo $runname >> $cpulogfile 30 | # this will overwrite 31 | echo $runname > $reportfile 32 | 33 | mymysql -e "SELECT NAME, COUNT FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE NAME = 'lock_deadlock_checker_forks';" | tee -a $logfile 34 | 35 | ps -p $mysqldpid -o pid,comm,etime,time >> $cpulogfile 36 | cat /proc/$mysqldpid/stat >> $cpulogfile 37 | cat /proc/stat | grep '^cpu ' >> $cpulogfile 38 | # First half EOF 39 | echo 'EOF FIRST' >> $cpulogfile 40 | 41 | mymysql play -e "UPDATE t1 SET a = 0" 42 | for i in `seq $thds`; do ( yes "UPDATE t1 SET a=a+1;" | mymysql play & ) ; done 43 | for i in {1..60}; do 44 | sleep 2 45 | mymysql --skip-column-names -e "select a from play.t1" >> $reportfile 46 | done 47 | killall mysql 48 | 49 | ps -p $mysqldpid -o pid,comm,etime,time >> $cpulogfile 50 | cat /proc/$mysqldpid/stat >> $cpulogfile 51 | cat /proc/stat | grep '^cpu ' >> $cpulogfile 52 | # First half EOF 53 | echo 'EOF SECOND' >> $cpulogfile 54 | 55 | sleep 5 56 | 57 | mymysql -e "select a, a/120 AS per_sec from play.t1" | tee -a $logfile 58 | mymysql -e "SELECT NAME, COUNT FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE NAME = 'lock_deadlock_checker_forks';" | tee -a $logfile 59 | } 60 | 61 | mymysql -e "CREATE DATABASE IF NOT EXISTS play; DROP TABLE IF EXISTS play.t1; CREATE TABLE play.t1 (a int); INSERT INTO play.t1 VALUES (5);" 62 | 63 | mysqldpid=$4 64 | run_once $2 $3 65 | -------------------------------------------------------------------------------- /experiments/isolation/apache-segfault.md: -------------------------------------------------------------------------------- 1 | # Apache load balancer segfault case 2 | 3 | Original ticket: https://bz.apache.org/bugzilla/show_bug.cgi?id=59864 4 | 5 | Description: when there are multiple backends in the load balancer, and some of the backends reference each other as backup backends, HTTPD may run into infinite recurssion. 6 | 7 | Effect: This will cause segmentation fault, and all other running requests within the same HTTPD worker will drop connection. 8 | 9 | ## Generate config files 10 | 11 | Assume we are in the orbit root directory. Run the following to generate the config files that can trigger the issue. 12 | ```bash 13 | cd apps/httpd/ 14 | ./gen_config.sh $PWD/rel-watchdog/ $PWD/config-appendix-segfault 15 | ./gen_config.sh $PWD/rel-orig/ $PWD/config-appendix-segfault 16 | ``` 17 | Note that the config files could be overwritten when running other evaluation tasks. Therefore, re-run the above commands every time before running the reproduction commands below. 18 | 19 | ## Reproducing original issue 20 | 21 | First start the Apache server that reverted the fix. 22 | ```bash 23 | systemctl stop nginx 24 | module load httpd/segfault 25 | apachectl -k start 26 | ``` 27 | 28 | Start a simple python3 backend server: 29 | ```bash 30 | mkdir -p /tmp/www; cd /tmp/www; echo hello > hello; python3 -m http.server 1115 31 | ``` 32 | 33 | In one tmux pane, continuously issue normal GET requests. 34 | ```bash 35 | while curl localhost:8080/hello/hello; do true; done 36 | ``` 37 | 38 | In another tmux pane, run the following command to make a problematic request that triggers one segfault: 39 | ```bash 40 | curl -H 'Cookie: ROUTEID=.fe02' localhost:8080/somepath/index.html 41 | ``` 42 | This command will fail with curl error message: 43 | ``` 44 | curl: (52) Empty reply from server 45 | ``` 46 | 47 | Meanwhile, the `while` loop in the first pane will also show a curl error message (this may need several trials of the second-pane command to show up): 48 | ``` 49 | curl: (52) Empty reply from server 50 | ``` 51 | which should not occur since those normal request are not problematic. 52 | 53 | Stop Apache by running 54 | ```bash 55 | apachectl -k stop 56 | ``` 57 | 58 | ## Isolation with orbit 59 | 60 | First start the orbit version Apache server. 61 | ```bash 62 | module unload httpd 63 | systemctl stop nginx 64 | module load httpd/proxy 65 | apachectl -k start 66 | ``` 67 | 68 | Same as the reproduction, run the following set of commands separately. 69 | ```bash 70 | mkdir -p /tmp/www; cd /tmp/www; echo hello > hello; python3 -m http.server 1115 71 | ``` 72 | 73 | ```bash 74 | while curl localhost:8080/hello/hello; do true; done 75 | ``` 76 | 77 | ```bash 78 | curl -H 'Cookie: ROUTEID=.fe02' localhost:8080/somepath/index.html 79 | ``` 80 | 81 | Each time the problematic request triggers a segfault, Apache will return "503 Service Unavailable" instead of resulting a curl error. It will also not affect the correct requests in the `while` loop. Then orbit is re-spawned, and all requests proceeds as normal. 82 | 83 | Stop Apache server by running 84 | ```bash 85 | apachectl -k stop 86 | killall -9 httpd 87 | ``` 88 | -------------------------------------------------------------------------------- /experiments/isolation/nginx-segfault.md: -------------------------------------------------------------------------------- 1 | # Nginx WebDAV segfault case 2 | 3 | Original ticket: https://trac.nginx.org/nginx/ticket/238 4 | 5 | Description: When issuing a PUT request with empty HTTP body to a WebDAV location, Nginx will segmentation fault due to NULL pointer dereference. The reason is the lack of NULL pointer check in WebDAV PUT handler. 6 | 7 | Effect: This not only affects the PUT request itself, but also affects other running requests inside the same nginx worker. 8 | 9 | ## Reproducing original issue 10 | 11 | First start the Nginx server version that reverted the fix in the ticket. 12 | ```bash 13 | systemctl stop nginx 14 | module load nginx/segfault 15 | nginx 16 | ``` 17 | 18 | In one tmux pane, run the following command to continuously issue simple GET requests. This loop will stop once `curl` returns error. 19 | ```bash 20 | while curl -sS localhost/ > /dev/null; do echo -n; done 21 | ``` 22 | 23 | In another tmux pane, run the following payload that can trigger the segfault. 24 | ```bash 25 | while ! curl -I -X PUT http://127.0.0.1/dd/a.txt; do echo -n; done 26 | ``` 27 | 28 | Soon after the second `while` loop starts running, the first pane should have stopped the loop, with error message showing: 29 | ``` 30 | curl: (56) Recv failure: Connection reset by peer 31 | ``` 32 | 33 | This shows that the segfault can affects other requests within the same nginx worker. 34 | 35 | Stop nginx by running 36 | ```bash 37 | nginx -s stop 38 | ``` 39 | 40 | ## Isolation with orbit 41 | 42 | First start the orbit version Nginx server. 43 | ```bash 44 | module unload nginx 45 | systemctl stop nginx 46 | module load nginx/orbit 47 | nginx 48 | ``` 49 | 50 | In one tmux pane, run 51 | ```bash 52 | while curl -sS localhost/ > /dev/null; do echo -n; done 53 | ``` 54 | 55 | In another tmux pane, run 56 | ```bash 57 | while curl -sS -X PUT http://127.0.0.1/dd/a.txt > /dev/null; do echo -n; done 58 | ``` 59 | 60 | Both loops should be able to run for a long period of time. Each time the second command triggers a segfault, the server will be non-responsive for a short period of time. Then the orbit will be re-spawned, and all requests can proceed as normal. 61 | 62 | Stop Nginx by running 63 | ```bash 64 | nginx -s stop 65 | killall -9 nginx # to also stop orbit instance 66 | ``` 67 | -------------------------------------------------------------------------------- /experiments/isolation/rdb-segfault.md: -------------------------------------------------------------------------------- 1 | # Redis RDB segfault injection 2 | 3 | We manually inject a null-pointer dereference bug every 3 RDB background save calls. We will show that the main part of Redis can survive such faults even if RDB component fails. 4 | 5 | ## Producing faults 6 | 7 | In one tmux pane, start the Redis server. 8 | ```bash 9 | module load redis/rdb-fault 10 | redis-server 11 | ``` 12 | 13 | In another tmux pane, start the Redis client. 14 | ```bash 15 | module load redis/rdb-fault 16 | redis-cli 17 | ``` 18 | 19 | Then in the Redis client prompt, trigger three background saves. 20 | ``` 21 | bgsave 22 | bgsave 23 | bgsave 24 | ``` 25 | 26 | At the last run of `bgsave`, there will be a segmentation fault showing up in the Redis server pane, but the server is still running. At this time, try inserting a KV pair to Redis: 27 | ``` 28 | set foo bar 29 | ``` 30 | Redis will show the following error message: 31 | ``` 32 | (error) MISCONF Redis is configured to save RDB snapshots, but it is currently not able to persist on disk. Commands that may modify the data set are disabled, because this instance is configured to report errors during writes if RDB snapshotting fails (stop-writes-on-bgsave-error option). Please check the Redis logs for details about the RDB error. 33 | (error) ... 34 | ``` 35 | Redis by default protects any modification to the in-memory dictionary when an error occurs during RDB save. The orbit version provides the same level of isolation as the original fork version, and also utilizes this protection mechanism. Furthermore, Redis snapshots less pages compared to fork, as mentioned in t6 in Table 6. 36 | 37 | At this point, we can make another successful `bgsave` and try to insert a KV pair again: 38 | ``` 39 | bgsave 40 | set foo bar 41 | ``` 42 | This time the above two commnads will finish successfully. 43 | 44 | Stop Redis by CTRL-C on the Redis server and CTRL-D on the Redis client, and then run 45 | ```bash 46 | pkill -9 redis-server 47 | module unload redis 48 | ``` 49 | -------------------------------------------------------------------------------- /experiments/isolation/redis-cpu-hog.md: -------------------------------------------------------------------------------- 1 | # Redis Slowlog CPU Hogging Injection 2 | 3 | We use `cgroup` to enforce a CPU time limit of 10%, and inject a 10s busy loop in orbit task. 4 | 5 | We force the triggering of the fault in the orbit task, so that any request will cause the memory overuse issue. 6 | 7 | ## Preparing `cgroup` 8 | 9 | Create a `cgroup` and set resource limit by running 10 | ```bash 11 | cgdelete -g cpu:/cpulimit 2> /dev/null 12 | cgcreate -g cpu:/cpulimit 13 | cgset -r cpu.cfs_period_us=$((1000*1000)) cpulimit 14 | cgset -r cpu.cfs_quota_us=$((1000*100)) cpulimit 15 | ``` 16 | 17 | ## Producing CPU Hogging 18 | 19 | Start Redis server by running 20 | 21 | ```bash 22 | module load redis/cpu-hog 23 | redis-server 24 | ``` 25 | 26 | In another tmux pane, start the Redis client: 27 | ```bash 28 | module load redis/cpu-hog 29 | redis-cli 30 | ``` 31 | and run a simple command in Redis client: 32 | ``` 33 | set foo bar 34 | ``` 35 | 36 | Start the `top` command, and we can see that the total system CPU usage should be only about 10%. 37 | 38 | Stop everything by CTRL-C on the Redis server and CTRL-D on the Redis client, and run 39 | ```bash 40 | pkill -9 redis-server 41 | module unload redis 42 | ``` 43 | -------------------------------------------------------------------------------- /experiments/isolation/redis-memleak-payload: -------------------------------------------------------------------------------- 1 | config set slowlog-log-slower-than 0 2 | set foo bar 3 | object refcount foo 4 | FLUSHALL ASYNC 5 | SLOWLOG reset 6 | -------------------------------------------------------------------------------- /experiments/isolation/redis-memleak.md: -------------------------------------------------------------------------------- 1 | # Redis Slowlog memory leak case 2 | 3 | Original issue: https://github.com/redis/redis/issues/4323 4 | 5 | Description: A possible race condition (TOCTOU bug) on refcount between the asynchronous `lazyfree` thread and the SLOWLOG command can cause memory leak. 6 | 7 | ## Reproducing original issue 8 | 9 | Start the redis server that reverted the fixes. We have injected 10,000 microseconds delay to the code to make it easier to reproduce. 10 | ```bash 11 | module load redis/memleak 12 | redis-server 13 | ``` 14 | 15 | In another tmux pane, start the redis client and feed in the payload commands in the current directory: 16 | ```bash 17 | cd experiments/isolation/ 18 | module load redis/memleak 19 | redis-cli < redis-memleak-payload 20 | ``` 21 | 22 | > Explanation of the payload file: 23 | > ``` 24 | > config set slowlog-log-slower-than 0 # set to log all operations in slowlog 25 | > set foo bar # "bar" string object will be added to both the dict and the slowlog 26 | > object refcount foo # refcount should be 2 (1 for 'foo' key, 1 in slowlog entry) 27 | > FLUSHALL ASYNC # (async) refcount-- 28 | > SLOWLOG reset # (sync) refcount-- 29 | > ``` 30 | > and the last two refcount-- have a TOCTOU bug. 31 | 32 | The output that indicates memory leak is 33 | ``` 34 | bar obj dec 35 | (... DB saved on disk ...) 36 | bar obj dec 37 | bar obj dec 38 | ``` 39 | with no text showing "bar obj free". 40 | 41 | Stop redis by CTRL-C on the running `redis-server` and then `module unload redis` in every tmux pane. 42 | 43 | ## Isolation with orbit 44 | 45 | Start the orbit version or redis server. 46 | ```bash 47 | module unload redis 48 | module load redis/slowlog-delay 49 | redis-server 50 | ``` 51 | 52 | In another tmux pane, start the redis client and feed in the payload: 53 | ```bash 54 | module load redis/slowlog-delay 55 | redis-cli < redis-memleak-payload 56 | ``` 57 | 58 | The expected output that indicates successful memory deallocation will be 59 | ``` 60 | bar obj dec 61 | bar obj dec 62 | (... DB saved on disk ...) 63 | bar obj free 64 | ``` 65 | We can see there is a "bar obj free" in the output. 66 | 67 | Stop redis by CTRL-C on the running `redis-server` and then 68 | ```bash 69 | killall -9 redis-server 70 | module unload redis 71 | ``` 72 | -------------------------------------------------------------------------------- /experiments/isolation/redis-oom.md: -------------------------------------------------------------------------------- 1 | # Redis Slowlog OOM Injection 2 | 3 | We use `cgroup` to enforce a memory limit of 256 MB on the orbit task, and inject a memory allocation of 512 MB in orbit task. `Cgroup` will trigger an OOM kill on the task that goes over the memory limit. 4 | 5 | We force the triggering of the fault in the orbit task, so that any request will cause the memory overuse issue. 6 | 7 | ## Preparing `cgroup` 8 | 9 | Create a `cgroup` and set resource limit by running 10 | ```bash 11 | cgdelete -g memory:/memorylimit 2> /dev/null 12 | cgcreate -g memory:/memorylimit 13 | cgset -r memory.limit_in_bytes=$((256*1024*1024)) memorylimit 14 | ``` 15 | 16 | ## Producing OOM 17 | 18 | Start Redis server by running 19 | 20 | ```bash 21 | module load redis/oom 22 | redis-server 23 | ``` 24 | 25 | In another tmux pane, start the Redis client: 26 | ```bash 27 | module load redis/oom 28 | redis-cli 29 | ``` 30 | and run a simple command in Redis client: 31 | ``` 32 | set foo bar 33 | ``` 34 | 35 | The Redis server side will show an OOM kill on the redis task, but the server is still functional. Try Redis command 36 | ``` 37 | get foo 38 | ``` 39 | The Redis client can still correctly return the `bar` as the output. 40 | 41 | Stop everything by CTRL-C on the Redis server and CTRL-D on the Redis client, and run 42 | ```bash 43 | pkill -9 redis-server 44 | module unload redis 45 | ``` 46 | -------------------------------------------------------------------------------- /experiments/isolation/watchdog-diagnosis.md: -------------------------------------------------------------------------------- 1 | # Apache lock watchdog diagnosis 2 | 3 | We added a newly-implemented task (t3) in Apache that periodically checks for long mutex lock waits. When a lock has been held for a period of time longer than a threshold, it will output notifications to the log. 4 | 5 | ## Producing a long holding time 6 | 7 | We inject a 25s sleep right after a request's mutex lock has been held. For easier demonstration, we use a 10s threshold instead of the 60s mentioned in the paper. 8 | 9 | Start the Apache server: 10 | ```bash 11 | module load httpd/watchdog-inject 12 | apachectl -X -k start 13 | ``` 14 | 15 | To start monitoring the logs, in another pane, run (suppose we are in the orbit root directory) 16 | ```bash 17 | tail -f apps/httpd/rel-watchdog-inject/logs/error_log 18 | ``` 19 | 20 | Make a simple request 21 | ```bash 22 | curl http://127.0.0.1:8080/ 23 | ``` 24 | After 10s, the watchdog will find the lock has been held for too long time. The log will show information about the lock creation location and holder location: 25 | ``` 26 | orbit: counter 1 has been occupied by lock 15 for 10 checks 27 | counter info: id=1, counter=0, holder=1 28 | lock info: id=15, tid=309 29 | time=0, slotidx=1 mutex=0x7ff2fc004240 30 | creator = http_request.c:448:ap_process_async_request 31 | holder = http_request.c:449:ap_process_async_request 32 | orbit: counter 1 has been occupied by lock 15 for 20 checks 33 | ... 34 | ``` 35 | and after another 15s, the lock will be released, and `curl` will return a webpage. 36 | 37 | Stop by CTRL-C on the Apache server and the `cat`, and run 38 | ```bash 39 | pkill -9 httpd 40 | ``` 41 | -------------------------------------------------------------------------------- /experiments/micro-call/.gitignore: -------------------------------------------------------------------------------- 1 | snap 2 | -------------------------------------------------------------------------------- /experiments/micro-call/Makefile: -------------------------------------------------------------------------------- 1 | CXXFLAGS = -lorbit -O3 2 | snap: snap.cpp 3 | -------------------------------------------------------------------------------- /experiments/micro-call/README.md: -------------------------------------------------------------------------------- 1 | # Orbit call (async) 2 | 3 | This experiment measures the latency of `orbit_call_async` with respect to the size of orbit area, as mentioned in Figure 8. 4 | 5 | ## Running the experiment (30s) 6 | 7 | Run the experiment by running `./run.sh` in this directory. It runs each data point for 20 times, and it takes in total about 30s. 8 | 9 | ## Analyzing results 10 | 11 | Plot the figure by running `./plot.sh`. 12 | 13 | The VM does not contain GUI environment, so to view the figure, it needs to be copied out first. Shutdown the VM, then on the host machine, mount and copy: 14 | ```bash 15 | m 16 | sudo cp mount-point.dir/root/orbit/experiments/micro-call/figure.pdf . 17 | ``` 18 | -------------------------------------------------------------------------------- /experiments/micro-call/avg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import csv 4 | 5 | def calc_and_conv(name, new_name): 6 | size_time = {} 7 | with open(name, newline='') as f: 8 | reader = csv.DictReader(f) 9 | for row in reader: 10 | size = int(row['Size']) 11 | time = int(row['Time(ns)']) 12 | if size not in size_time: 13 | size_time[size] = [] 14 | size_time[size].append(time) 15 | for size in size_time: 16 | times = size_time[size] 17 | size_time[size] = sum(times) / len(times) 18 | with open(new_name, 'w') as f: 19 | writer = csv.DictWriter(f, fieldnames=['Size(KB)', 'Time(us)']) 20 | writer.writeheader() 21 | res = sorted(size_time.items()) 22 | for size, time in res: 23 | writer.writerow({ 'Size(KB)': size // 1024, 'Time(us)': time / 1000 }) 24 | 25 | calc_and_conv("orbit-snap.csv", "orbit-snap-avg-us.csv") 26 | -------------------------------------------------------------------------------- /experiments/micro-call/plot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | import os 5 | import pandas as pd 6 | from datetime import datetime 7 | import matplotlib.pyplot as plt 8 | from matplotlib.ticker import MultipleLocator 9 | import numpy as np 10 | from matplotlib import cm 11 | import argparse 12 | import matplotlib 13 | 14 | #matplotlib.rcParams['pdf.fonttype'] = 42 15 | #matplotlib.rcParams['ps.fonttype'] = 42 16 | #matplotlib.rc('text', usetex=True) 17 | 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument('-o', '--output', help="path to output image file") 20 | parser.add_argument('input', help="input data file") 21 | 22 | def human_size_format(num): 23 | for unit in 'KMGT': 24 | if num < 1024: 25 | return f'{num}{unit}B' 26 | if num % 1024 != 0: 27 | raise ValueError(f'num {num} is not divisible by 1024') 28 | num //= 1024 29 | return f'{num}PB' 30 | 31 | def plot(args): 32 | df = pd.read_csv(args.input) 33 | 34 | figure, ax = plt.subplots(figsize=(4.6, 2.5)) 35 | 36 | ind = np.arange(len(df)) 37 | ax.plot(ind, df['Time(us)'].values, label='orbit_call_async', 38 | marker='s', color='#00008B', linewidth=2, zorder=3) 39 | ax.set_yscale('log') 40 | ax.set_xticks(ind[::3]) 41 | ax.set_xticklabels(map(human_size_format, df['Size(KB)'].values[::3])) 42 | 43 | ax.legend(loc='best', ncol=1, frameon=True, edgecolor='black', fontsize=9, 44 | columnspacing=1.0) 45 | ax.grid(axis='y', linestyle='--', zorder=0) 46 | ax.set_ylabel('Latency (us)', fontsize=10) 47 | ax.set_xlabel('State size', fontsize=10) 48 | # ax.set_ylim(bottom=0) 49 | 50 | figure.tight_layout() 51 | if args.output: 52 | ax.margins(0,0) 53 | plt.savefig(args.output, bbox_inches='tight', pad_inches=0) 54 | plt.show() 55 | 56 | if __name__ == '__main__': 57 | args = parser.parse_args() 58 | if not args.input or not os.path.isfile(args.input): 59 | sys.stderr.write("Input file " + args.input + " does not exist\n") 60 | sys.exit(1) 61 | plot(args) 62 | -------------------------------------------------------------------------------- /experiments/micro-call/plot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | ./avg.py 7 | ./plot.py -o figure.pdf orbit-snap-avg-us.csv 8 | -------------------------------------------------------------------------------- /experiments/micro-call/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | module load userlib/plain 7 | make 8 | 9 | limit=$((1024 * 1024 * 1024)) 10 | 11 | echo 'Size,Time(ns)' > orbit-snap.csv 12 | 13 | for ((size=4096; $size <= $limit; size=$((2 * $size)))); do 14 | echo $size 15 | for i in {1..20}; do 16 | ./snap $size >> orbit-snap.csv 17 | sleep 0.01 18 | done 19 | done 20 | -------------------------------------------------------------------------------- /experiments/micro-call/snap.cpp: -------------------------------------------------------------------------------- 1 | #include "orbit.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | using std::min; 11 | using namespace std::chrono; 12 | 13 | #define PGSIZE 4096UL 14 | #define KB 1024UL 15 | #define MB (1024UL * KB) 16 | #define GB (1024UL * MB) 17 | 18 | // Force assert regardless of NDEBUG matter 19 | #define assert(expr) do { \ 20 | if (!(expr)) \ 21 | fprintf(stderr, #expr "failed"); \ 22 | } while (0) 23 | 24 | unsigned long empty(void *store, void *argbuf) { 25 | (void)store; 26 | (void)argbuf; 27 | std::this_thread::sleep_for(milliseconds(5)); 28 | return 0; 29 | } 30 | 31 | orbit_module *m; 32 | orbit_pool *pool; 33 | orbit_allocator *alloc; 34 | 35 | // Make pages dirty 36 | void dirty_pages(orbit_pool *pool) { 37 | for (char *page = (char*)pool->rawptr; 38 | page < (char*)pool->rawptr + pool->used; 39 | page += PGSIZE) 40 | { 41 | *page = 'h'; 42 | } 43 | } 44 | 45 | long long orbit_one(void) { 46 | orbit_task task; 47 | auto t1 = high_resolution_clock::now(); 48 | int ret = orbit_call_async(m, 0, 1, &pool, NULL, NULL, 0, &task); 49 | auto t2 = high_resolution_clock::now(); 50 | assert(ret == 0); 51 | 52 | orbit_result result; 53 | ret = orbit_recvv(&result, &task); 54 | assert(ret == 0 && result.retval == 0); 55 | 56 | return duration_cast(t2 - t1).count(); 57 | } 58 | 59 | void bench_snap(size_t size, bool csv) { 60 | pool = orbit_pool_create(NULL, size); 61 | alloc = orbit_allocator_from_pool(pool, false); 62 | m = orbit_create("snap percentage", empty, NULL); 63 | assert(m); 64 | 65 | if (!csv) 66 | printf("Testing snapshot size %lu\n", size); 67 | 68 | void *ptr = orbit_alloc(alloc, size); 69 | assert(ptr); 70 | assert(size == pool->used); 71 | 72 | dirty_pages(pool); 73 | long long duration = orbit_one(); 74 | 75 | if (csv) 76 | printf("%lu,%lld\n", size, duration); 77 | else 78 | printf("Snap %lu takes %lld ns.\n", size, duration); 79 | 80 | orbit_destroy(m->gobid); 81 | } 82 | 83 | void usage(const char *prog_name, int exit_val) { 84 | fprintf(stderr, "Usage: %s \n", prog_name); 85 | exit(exit_val); 86 | } 87 | 88 | int main(int argc, const char *argv[]) { 89 | if (argc != 2) 90 | usage(argv[0], 1); 91 | size_t size; 92 | if (sscanf(argv[1], "%ld", &size) != 1) 93 | usage(argv[0], 1); 94 | bench_snap(size, true); 95 | return 0; 96 | } 97 | -------------------------------------------------------------------------------- /experiments/throughput/.gitignore: -------------------------------------------------------------------------------- 1 | mix 2 | -------------------------------------------------------------------------------- /experiments/throughput/README.md: -------------------------------------------------------------------------------- 1 | # Overhead: Throughput 2 | 3 | This experiments measures the overhead on throughput for all 8 systems, and reproduces Figure 9. 4 | 5 | ## Running the experiment 6 | 7 | We provide a script to run systems in batch. For example, run the following command to run the 5 systems. 8 | ```bash 9 | ./run_batch.sh leveldb nginx proxy slowlog watchdog 10 | ``` 11 | 12 | Note that MySQL require a different kernel version. To run MySQL workload, shutdown the VM, and start the VM via 13 | ```bash 14 | r formysql 15 | ``` 16 | on the host, and then in the VM: 17 | ```bash 18 | ./run_batch.sh mysql 19 | ``` 20 | 21 | ***Please also see [Known issues](#known-issues) section before running the experiments.*** 22 | 23 | ### Repeat times 24 | 25 | By default, the `run_batch.sh` script only repeat for 1 time. To run it for 26 | more time (we repeated for 5 times in the paper), insert an integer argument as 27 | the first argument: 28 | ```bash 29 | ./run_batch.sh 5 [systems...] 30 | ``` 31 | 32 | ### Expected running time 33 | 34 | Below shows the expected time of each system running for once. The first 7 35 | systems (which can run using the same kernel) takes about 25min. Repeating all 36 | experiments for 5 times takes about 2.5 hours in total. 37 | 38 | | System | Approx. time (min) | 39 | | ---- | ---- | 40 | | leveldb | 2.5 | 41 | | nginx | 4 | 42 | | proxy | 2.5 | 43 | | rdb | 2 | 44 | | slowlog | 4.5 | 45 | | varnish | 4.5 | 46 | | watchdog | 4.5 | 47 | | mysql | 4.5 | 48 | 49 | ## Analyzing results 50 | 51 | Generate figure by running `./plot.sh`. 52 | 53 | The VM does not contain GUI environment, so to view the figure, it needs to be copied out first. Shutdown the VM, then on the host machine, mount and copy: 54 | ```bash 55 | m 56 | sudo cp mount-point.dir/root/orbit/experiments/throughput/figure.pdf . 57 | ``` 58 | 59 | ## Known issues 60 | 61 | As mentioned in the [Known issues](../README.md#known-issues) in the upper 62 | directory, there are some flaky bugs in Redis RDB and Varnish. See the upper 63 | directory for the buggy behaviors. 64 | 65 | Therefore, you may want to run the 5 systems first: 66 | ```bash 67 | ./run_batch.sh [times] leveldb nginx proxy slowlog watchdog 68 | ``` 69 | 70 | Then `cd` into `rdb/` and `varnish/` in this directory to run multiple runs 71 | respectively and monitor the results: 72 | ```bash 73 | # suppose we are in experiments/throughput/ 74 | cd rdb/ # or `cd varnish/` 75 | ./run.sh -n 1 76 | ./run.sh -n 2 77 | ... 78 | ./run.sh -n 5 79 | ``` 80 | Here each line will only repeat once (running orbit version first and then 81 | original version). When there is buggy error behavior, you can restart that 82 | single round. The `-n` specifies which log file to (re)write to. 83 | You will see result files naming similar to `res-orbit-2.log` when running with `-n 2`. 84 | 85 | And finally shutdown the VM and start the VM again with the kernel for MySQL 86 | with `r formysql`, and then `cd` into the `mysql/` folder in this directory, 87 | and run multiple times. 88 | ```bash 89 | # suppose we are in experiments/throughput/ 90 | cd mysql/ 91 | ./run.sh 5 92 | ``` 93 | 94 | ## Cleanup logs 95 | 96 | To cleanup all previous results, run the following command in this directory. 97 | ```bash 98 | rm */*.{out,err,log} 99 | ``` 100 | -------------------------------------------------------------------------------- /experiments/throughput/collect.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | function avg { 7 | # with empty value protection 8 | python3 -c 'import sys; from statistics import mean; print(mean([eval(v.strip()) for v in sys.stdin.readlines() if len(v.strip()) != 0] or [0]))' 9 | } 10 | 11 | function collect_t1 { 12 | cd mysql 13 | orbit=$(grep -F queries: sysbench-orbit-16-*.log | awk '{print substr($3,2)}' | avg) 14 | orig=$(grep -F queries: sysbench-orig-16-*.log | awk '{print substr($3,2)}' | avg) 15 | echo "t1,MySQL deadlock detector,$orbit,$orig" 16 | cd .. 17 | } 18 | 19 | function collect_t2 { 20 | cd proxy 21 | # `mix` outputs time. Since we only look at the relative performance, we intentionally 22 | # exchange the variable of following to lines. 23 | orig=$(cat proxy-*.log | avg) 24 | orbit=$(cat orig-*.log | avg) 25 | echo "t2,Apache proxy balancer,$orbit,$orig" 26 | cd .. 27 | } 28 | 29 | function collect_t3 { 30 | cd watchdog 31 | orbit=$(grep -F 'Requests per second:' res-watchdog4-*.out | awk '{print $4}' | avg) 32 | orig=$(grep -F 'Requests per second:' res-orig4-*.out | awk '{print $4}' | avg) 33 | echo "t3,Apache watchdog,$orbit,$orig" 34 | cd .. 35 | } 36 | 37 | function collect_t4 { 38 | cd nginx 39 | # `mix` outputs time. Since we only look at the relative performance, we intentionally 40 | # exchange the variable of following to lines. 41 | orig=$(cat orbit-*.log | avg) 42 | orbit=$(cat orig-*.log | avg) 43 | echo "t4,Nginx WebDAV handler,$orbit,$orig" 44 | cd .. 45 | } 46 | 47 | function collect_t5 { 48 | cd varnish 49 | orbit=$(grep -F 'Requests per second:' res-orbit4-*.out | awk '{print $4}' | avg) 50 | orig=$(grep -F 'Requests per second:' res-orig4-*.out | awk '{print $4}' | avg) 51 | echo "t5,Varnish pool herder,$orbit,$orig" 52 | cd .. 53 | } 54 | 55 | function collect_t6 { 56 | cd slowlog 57 | orbit=$(grep -F '[OVERALL], Throughput(ops/sec)' res-run-slowlog-*.out | awk '{print $3}' | avg) 58 | orig=$(grep -F '[OVERALL], Throughput(ops/sec)' res-run-orig-*.out | awk '{print $3}' | avg) 59 | echo "t6,Redis slowlog,$orbit,$orig" 60 | cd .. 61 | } 62 | 63 | function collect_t7 { 64 | cd rdb 65 | orbit=$(grep -F '[OVERALL], Throughput(ops/sec)' res-run-rdb-*.out | awk '{print $3}' | avg) 66 | orig=$(grep -F '[OVERALL], Throughput(ops/sec)' res-run-orig-*.out | awk '{print $3}' | avg) 67 | echo "t7,Redis RDB,$orbit,$orig" 68 | cd .. 69 | } 70 | 71 | function collect_t8 { 72 | cd leveldb 73 | orbit=$(grep fillseq orbit-*.log | awk '{print $5}' | avg) 74 | orig=$(grep fillseq orig-*.log | awk '{print $5}' | avg) 75 | echo "t8,LevelDB compaction,$orbit,$orig" 76 | cd .. 77 | } 78 | 79 | function collect { 80 | echo "Case,Name,Orbit,Vanilla" 81 | for i in {1..8}; do 82 | collect_t$i 83 | done 84 | } 85 | 86 | collect > overhead.csv 87 | -------------------------------------------------------------------------------- /experiments/throughput/leveldb/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | repeat=1 7 | if [ ! -z "$1" ]; then 8 | repeat=$1 9 | fi 10 | 11 | function run { 12 | module load leveldb/$1 13 | rm -rf /tmp/obtestdb 14 | for i in `seq $repeat`; do 15 | db_bench --benchmarks=fillseq --db=/tmp/obtestdb --num=15000000 > $1-$i.log 16 | sleep 5 17 | pkill -9 db_bench 18 | sleep 1 19 | rm -rf /tmp/obtestdb 20 | done 21 | module unload leveldb 22 | } 23 | 24 | run orig 25 | run orbit 26 | -------------------------------------------------------------------------------- /experiments/throughput/mysql/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | repeat=1 7 | if [ ! -z "$1" ]; then 8 | repeat=$1 9 | fi 10 | 11 | # sysbench 12 | export PATH="$SCRIPT_DIR/../../tools/sysbench/bin:$PATH" 13 | 14 | function run { 15 | module load mysql/$1 16 | mysqld --user=root & 17 | sleep 3 18 | 19 | ./sysbench.sh init 20 | ./sysbench.sh cleanup 21 | ./sysbench.sh $1 $repeat 22 | ./sysbench.sh cleanup 23 | 24 | killall -9 mysqld 25 | sleep 1 26 | module unload mysql 27 | } 28 | 29 | run orig 30 | run orbit 31 | -------------------------------------------------------------------------------- /experiments/throughput/mysql/sysbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | 5 | sockfile=/tmp/mysql.sock 6 | 7 | function mymysql { 8 | mysql -S ${sockfile} -u root "${@}" 9 | } 10 | 11 | mysqlargs="--mysql-socket=${sockfile} --mysql-user=root" 12 | 13 | logdir=$PWD 14 | 15 | taskfile=$SCRIPT_DIR/../../tools/sysbench/share/sysbench/oltp_read_write.lua 16 | 17 | runtime=120 18 | 19 | cd $taskdir 20 | 21 | if [[ $1 == "" ]]; then 22 | echo 'Please specify experiment name' 23 | exit 1 24 | fi 25 | testname=$1 26 | 27 | repeat=1 28 | if [ ! -z "$2" ]; then 29 | repeat=$2 30 | fi 31 | 32 | function find_mysqld { 33 | # We do not care about differences in multiple pgrep runs 34 | count=$(($(pgrep -x mysqld | wc -l))) 35 | if [[ $count != "1" ]]; then 36 | echo "Multiple mysqld instances found:" $(pgrep -x mysqld) 37 | exit 1 38 | fi 39 | mysqldpid=$(pgrep -x mysqld) 40 | } 41 | 42 | function find_mysqld_pidfile { 43 | mysqldpid=$(cat /tmp/mysqld.pid) 44 | if ps -p $mysqldpid > /dev/null; then 45 | : 46 | else 47 | echo "pid not found" 48 | exit 1 49 | fi 50 | } 51 | 52 | function sysbench_prepare { 53 | sysbench $taskfile $mysqlargs --threads=$1 --time=$runtime --report-interval=10 prepare 54 | } 55 | 56 | function sysbench_run { 57 | sysbench $taskfile $mysqlargs --threads=$1 --time=$runtime --report-interval=10 run 58 | } 59 | 60 | function sysbench_cleanup { 61 | sysbench $taskfile $mysqlargs cleanup 62 | } 63 | 64 | function run_once { 65 | thds=$1 66 | trial=$2 67 | 68 | sysbench_prepare $thds 69 | 70 | runname=sysbench-${testname}-${thds}-${trial}-$(date +%s) 71 | logfile=$logdir/sysbench-${testname}-${thds}-${trial}.log 72 | cpulogfile=$logdir/sysbenchcpu-${testname}-${thds}.log 73 | #dlckforklogfile=$logdir/sysbenchdlckfork-${testname}-${thds}.log 74 | 75 | echo $runname > $logfile 76 | echo $runname >> $cpulogfile 77 | #echo $runname >> $dlckforklogfile 78 | #mymysql -e "SELECT NAME, COUNT FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE NAME = 'lock_deadlock_checker_forks';" | tee -a $dlckforklogfile; 79 | 80 | ps -p $mysqldpid -o pid,comm,etime,time >> $cpulogfile 81 | cat /proc/$mysqldpid/stat >> $cpulogfile 82 | cat /proc/stat >> $cpulogfile 83 | # First half EOF 84 | echo 'EOF FIRST' >> $cpulogfile 85 | 86 | sysbench_run $thds | tee -a $logfile 87 | 88 | ps -p $mysqldpid -o pid,comm,etime,time >> $cpulogfile 89 | cat /proc/$mysqldpid/stat >> $cpulogfile 90 | cat /proc/stat >> $cpulogfile 91 | # First half EOF 92 | echo 'EOF SECOND' >> $cpulogfile 93 | 94 | #mymysql -e "SELECT NAME, COUNT FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE NAME = 'lock_deadlock_checker_forks';" | tee -a $dlckforklogfile; 95 | 96 | sysbench_cleanup 97 | } 98 | 99 | function run_all { 100 | for thds in 16; do 101 | for t in `seq $repeat`; do 102 | run_once $thds $t 103 | sleep 10; 104 | done 105 | done 106 | } 107 | 108 | case $1 in 109 | init) 110 | mymysql -e "CREATE DATABASE IF NOT EXISTS sbtest;" 111 | ;; 112 | prepare) 113 | sysbench_prepare 114 | ;; 115 | run) 116 | sysbench_run 117 | ;; 118 | cleanup) 119 | sysbench_cleanup 120 | ;; 121 | *) 122 | # Otherwise, regard as experiment name 123 | find_mysqld_pidfile 124 | run_all 125 | ;; 126 | esac 127 | -------------------------------------------------------------------------------- /experiments/throughput/nginx/Makefile: -------------------------------------------------------------------------------- 1 | CXXFLAGS = -O3 -pthread 2 | LDFLAGS = -lcurl 3 | mix: mix.cpp 4 | -------------------------------------------------------------------------------- /experiments/throughput/nginx/mix.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | using std::chrono::high_resolution_clock; 8 | 9 | size_t write_callback(char *ptr, size_t size, size_t nmemb, void *userdata) { 10 | (void)ptr; 11 | (void)size; 12 | (void)userdata; 13 | return nmemb; 14 | } 15 | 16 | void get_one() { 17 | CURLcode res; 18 | CURL *curl = curl_easy_init(); 19 | assert(curl); 20 | curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_callback); 21 | curl_easy_setopt(curl, CURLOPT_URL, "http://127.0.0.1/"); 22 | res = curl_easy_perform(curl); 23 | curl_easy_cleanup(curl); 24 | (void)res; 25 | } 26 | 27 | // curl -X PUT -d 'hello' localhost/dd/a.txt 28 | // https://stackoverflow.com/a/7570281 29 | void put_one() { 30 | CURLcode res; 31 | CURL *curl = curl_easy_init(); 32 | assert(curl); 33 | curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_callback); 34 | curl_easy_setopt(curl, CURLOPT_URL, "http://127.0.0.1/dd/a.txt"); 35 | curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "PUT"); 36 | curl_easy_setopt(curl, CURLOPT_POSTFIELDS, "hello\n"); 37 | res = curl_easy_perform(curl); 38 | curl_easy_cleanup(curl); 39 | (void)res; 40 | } 41 | 42 | void thd(void) { 43 | for (int i = 0; i < 1000 * 300; ++i) { 44 | if (i % 10) 45 | get_one(); 46 | else 47 | put_one(); 48 | if ((i+1) % 10000 == 0) 49 | std::cerr << "Finished " << (i + 1) << '\n'; 50 | } 51 | } 52 | 53 | int main() { 54 | std::cerr << "Start running..." << std::endl; 55 | 56 | auto start = high_resolution_clock::now(); 57 | 58 | std::thread t1(thd); 59 | std::thread t2(thd); 60 | std::thread t3(thd); 61 | std::thread t4(thd); 62 | 63 | t1.join(); 64 | t2.join(); 65 | t3.join(); 66 | t4.join(); 67 | 68 | auto end = high_resolution_clock::now(); 69 | 70 | std::cerr << "Finished running." << std::endl; 71 | std::cout << (end - start).count() << std::endl; 72 | 73 | return 0; 74 | } 75 | -------------------------------------------------------------------------------- /experiments/throughput/nginx/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | repeat=1 7 | if [ ! -z "$1" ]; then 8 | repeat=$1 9 | fi 10 | 11 | make 12 | 13 | duration=120 14 | 15 | thd=4 16 | 17 | function run { 18 | systemctl stop nginx 19 | sleep 1 20 | 21 | module load nginx/$1 22 | nginx 23 | sleep 1 24 | 25 | for i in `seq $repeat`; do 26 | ./mix > $1-$i.log 27 | sleep 1 28 | done 29 | 30 | nginx -s stop 31 | sleep 1 32 | pkill -9 nginx 33 | module unload nginx 34 | sleep 1 35 | } 36 | 37 | run orig 38 | run orbit 39 | -------------------------------------------------------------------------------- /experiments/throughput/plot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | import pandas as pd 5 | from datetime import datetime 6 | import matplotlib.pyplot as plt 7 | import matplotlib.dates as mdates 8 | from matplotlib.ticker import MultipleLocator 9 | import numpy as np 10 | import argparse 11 | import matplotlib 12 | from matplotlib import cm 13 | 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument('-o', '--output', help="path to output image file") 16 | parser.add_argument('input', help="path to input data file") 17 | 18 | #matplotlib.rcParams['pdf.fonttype'] = 42 19 | #matplotlib.rcParams['ps.fonttype'] = 42 20 | 21 | def hatch_bar(ax, df): 22 | bars = ax.patches 23 | hatches = '-\\/.-+o' 24 | hatch_repeats = 4 25 | all_hatches = [] 26 | for h in hatches: 27 | all_hatches.extend([h * hatch_repeats] * len(df)) 28 | for bar, hatch in zip(bars, all_hatches): 29 | bar.set_hatch(hatch) 30 | 31 | def plot_overhead(df): 32 | vanilla = df['Vanilla'] 33 | orbit = df['Orbit'] 34 | relative_vanilla = vanilla / vanilla 35 | relative_orbit = orbit / vanilla 36 | print(1 - relative_orbit) 37 | print(1 - relative_orbit.median()) 38 | 39 | figure, ax = plt.subplots(figsize=(5, 2)) 40 | ind = np.arange(len(relative_orbit)) 41 | width = 0.4 42 | vanilla_bars = ax.bar(ind, relative_vanilla.values, width - 0.03, bottom=0, label='Vanilla', color='#a6dc80') 43 | orbit_bars = ax.bar(ind + width, relative_orbit.values, width - 0.03, bottom=0, label='Orbit', color='#98c8df') 44 | ax.set_xticks(ind + 0.5 * width) 45 | ax.set_xticklabels(df.index.values, rotation=0) 46 | ax.set_ylim(0, 1.1) 47 | hatch_bar(ax, relative_orbit) 48 | ax.set_yticks(np.arange(0, 1.1, 0.2)) 49 | 50 | # for p, v in zip(vanilla_bars, vanilla.values): 51 | # height = p.get_height() 52 | # ax.text(p.get_x() + p.get_width() / 2., 1.05 * height, '%.1f' % round(v), 53 | # ha='center', va='bottom') 54 | 55 | ax.legend(loc='lower left', bbox_to_anchor=(0., 0.92), frameon=False, fontsize=9, ncol=3) 56 | ax.set_ylabel("Normalized thput") 57 | ax.set_xlabel("Task") 58 | for tick in ax.get_xticklabels(): 59 | tick.set_rotation(0) 60 | plt.tight_layout() 61 | if args.output: 62 | plt.savefig(args.output, bbox_inches='tight', pad_inches=0) 63 | plt.show() 64 | 65 | if __name__ == '__main__': 66 | args = parser.parse_args() 67 | if not args.input: 68 | sys.stderr.write('Must specify input data file\n') 69 | sys.exit(1) 70 | df = pd.read_csv(args.input, index_col=0) 71 | plot_overhead(df) 72 | 73 | -------------------------------------------------------------------------------- /experiments/throughput/plot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | ./collect.sh 7 | ./plot.py -o figure.pdf overhead.csv 8 | -------------------------------------------------------------------------------- /experiments/throughput/proxy/Makefile: -------------------------------------------------------------------------------- 1 | CXXFLAGS = -O3 -pthread 2 | LDFLAGS = -lcurl 3 | mix: mix.cpp 4 | -------------------------------------------------------------------------------- /experiments/throughput/proxy/mix.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | using std::chrono::high_resolution_clock; 8 | 9 | size_t write_callback(char *ptr, size_t size, size_t nmemb, void *userdata) { 10 | (void)ptr; 11 | (void)size; 12 | (void)userdata; 13 | return nmemb; 14 | } 15 | 16 | void get_one(const char *url) { 17 | CURLcode res; 18 | CURL *curl = curl_easy_init(); 19 | assert(curl); 20 | curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_callback); 21 | curl_easy_setopt(curl, CURLOPT_URL, url); 22 | res = curl_easy_perform(curl); 23 | curl_easy_cleanup(curl); 24 | (void)res; 25 | } 26 | 27 | void thd(void) { 28 | for (int i = 0; i < 1000 * 200; ++i) { 29 | const char *url = i % 10 30 | ? "http://127.0.0.1:8080/index.html" 31 | : "http://127.0.0.1:8080/somepath/index.html?&ROUTEID=.fe02"; 32 | get_one(url); 33 | if ((i+1) % 10000 == 0) 34 | std::cerr << "Finished " << (i + 1) << '\n'; 35 | } 36 | } 37 | 38 | int main() { 39 | std::cerr << "Started running..." << std::endl; 40 | 41 | auto start = high_resolution_clock::now(); 42 | 43 | std::thread t1(thd); 44 | std::thread t2(thd); 45 | std::thread t3(thd); 46 | std::thread t4(thd); 47 | 48 | t1.join(); 49 | t2.join(); 50 | t3.join(); 51 | t4.join(); 52 | 53 | auto end = high_resolution_clock::now(); 54 | 55 | std::cerr << "Finished running." << std::endl; 56 | std::cout << (end - start).count() << std::endl; 57 | 58 | return 0; 59 | } 60 | -------------------------------------------------------------------------------- /experiments/throughput/proxy/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | repeat=1 7 | if [ ! -z "$1" ]; then 8 | repeat=$1 9 | fi 10 | 11 | make 12 | 13 | duration=120 14 | 15 | thd=4 16 | 17 | # Regenerate configs 18 | cd ../../../apps/httpd 19 | ./gen_config.sh $PWD/rel-proxy/ $PWD/config-appendix 20 | ./gen_config.sh $PWD/rel-orig/ $PWD/config-appendix 21 | cd $SCRIPT_DIR 22 | 23 | function run { 24 | systemctl start nginx 25 | sleep 1 26 | 27 | module load httpd/$1 28 | apachectl -k start 29 | sleep 1 30 | 31 | for i in `seq $repeat`; do 32 | ./mix > $1-$i.log 33 | sleep 1 34 | done 35 | 36 | apachectl -k stop 37 | module unload httpd 38 | sleep 1 39 | pkill -9 httpd 40 | 41 | systemctl stop nginx 42 | sleep 1 43 | } 44 | 45 | run orig 46 | run proxy 47 | -------------------------------------------------------------------------------- /experiments/throughput/rdb/param: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010 Yahoo! Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you 4 | # may not use this file except in compliance with the License. You 5 | # may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. See the License for the specific language governing 13 | # permissions and limitations under the License. See accompanying 14 | # LICENSE file. 15 | 16 | # Yahoo! Cloud System Benchmark 17 | # Workload B: Read mostly workload 18 | # Application example: photo tagging; add a tag is an update, but most operations are to read tags 19 | # 20 | # Read/update ratio: 95/5 21 | # Default data size: 1 KB records (10 fields, 100 bytes each, plus key) 22 | # Request distribution: zipfian 23 | 24 | workload=site.ycsb.workloads.CoreWorkload 25 | 26 | readallfields=true 27 | 28 | readproportion=0.95 29 | updateproportion=0.05 30 | scanproportion=0 31 | insertproportion=0 32 | 33 | requestdistribution=zipfian 34 | 35 | 36 | redis.host=127.0.0.1 37 | redis.port=6379 38 | fieldlength=10 39 | recordcount=30000 40 | operationcount=3000000 41 | threadcount=32 42 | -------------------------------------------------------------------------------- /experiments/throughput/rdb/redis.conf: -------------------------------------------------------------------------------- 1 | save 5 10000 2 | -------------------------------------------------------------------------------- /experiments/throughput/rdb/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | repeat=1 7 | re='^[0-9]+$' 8 | if [[ $1 =~ $re ]] ; then 9 | repeat=$1 10 | elif [[ $1 == '-n' ]]; then 11 | if [[ $2 =~ $re ]]; then 12 | fileid=$2 13 | else 14 | echo "Invalid -n argument. Expected a number." 15 | exit 1 16 | fi 17 | elif [ ! -z "$1" ]; then 18 | echo "Unknown argument \"$1\"" 19 | exit 1 20 | fi 21 | 22 | function ycsb { 23 | $SCRIPT_DIR/../../tools/ycsb/bin/ycsb $@ 24 | } 25 | 26 | function run { 27 | tp=$1 28 | module load redis/$tp 29 | rm -f *.rdb 30 | 31 | for i in `seq $repeat`; do 32 | if [ ! -z "$fileid" ]; then 33 | i=$fileid 34 | fi 35 | 36 | rm -f *.rdb 37 | redis-server $SCRIPT_DIR/redis.conf & 38 | sleep 1 39 | 40 | ycsb load redis -s -P $SCRIPT_DIR/param > res-load-${tp}.out 2> res-load-${tp}.err 41 | sleep 5 42 | ycsb run redis -s -P $SCRIPT_DIR/param > res-run-${tp}-${i}.out 2> res-run-${tp}-${i}.err 43 | sleep 5 44 | 45 | killall redis-server 46 | sleep 1 47 | killall -9 redis-server 48 | sleep 1 49 | done 50 | module unload redis 51 | } 52 | 53 | run rdb 54 | run orig 55 | -------------------------------------------------------------------------------- /experiments/throughput/run_batch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | repeat=1 7 | re='^[0-9]+$' 8 | if [[ $1 =~ $re ]] ; then 9 | repeat=$1 10 | fi 11 | echo Will repeat for $repeat times 12 | 13 | for f in ${@}; do 14 | if [ ! -d "$f" ]; then 15 | echo Experiment "\"$f\"" not found! 16 | continue 17 | fi 18 | echo Running experiment "\"$f\""... 19 | ./$f/run.sh $repeat 20 | done 21 | -------------------------------------------------------------------------------- /experiments/throughput/slowlog/param: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010 Yahoo! Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you 4 | # may not use this file except in compliance with the License. You 5 | # may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. See the License for the specific language governing 13 | # permissions and limitations under the License. See accompanying 14 | # LICENSE file. 15 | 16 | # Yahoo! Cloud System Benchmark 17 | # Workload B: Read mostly workload 18 | # Application example: photo tagging; add a tag is an update, but most operations are to read tags 19 | # 20 | # Read/update ratio: 95/5 21 | # Default data size: 1 KB records (10 fields, 100 bytes each, plus key) 22 | # Request distribution: zipfian 23 | 24 | workload=site.ycsb.workloads.CoreWorkload 25 | 26 | readallfields=true 27 | 28 | readproportion=0.95 29 | updateproportion=0.05 30 | scanproportion=0 31 | insertproportion=0 32 | 33 | requestdistribution=zipfian 34 | 35 | 36 | redis.host=127.0.0.1 37 | redis.port=6379 38 | fieldlength=10 39 | recordcount=30000 40 | operationcount=10000000 41 | threadcount=32 42 | -------------------------------------------------------------------------------- /experiments/throughput/slowlog/redis.conf: -------------------------------------------------------------------------------- 1 | slowlog-log-slower-than 10000 2 | -------------------------------------------------------------------------------- /experiments/throughput/slowlog/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | repeat=1 7 | if [ ! -z "$1" ]; then 8 | repeat=$1 9 | fi 10 | 11 | function ycsb { 12 | $SCRIPT_DIR/../../tools/ycsb/bin/ycsb $@ 13 | } 14 | 15 | function run { 16 | tp=$1 17 | module load redis/$tp 18 | 19 | for i in `seq $repeat`; do 20 | redis-server $SCRIPT_DIR/redis.conf & 21 | sleep 1 22 | 23 | ycsb load redis -s -P $SCRIPT_DIR/param > res-load-${tp}.out 2> res-load-${tp}.err 24 | sleep 5 25 | ycsb run redis -s -P $SCRIPT_DIR/param > res-run-${tp}-${i}.out 2> res-run-${tp}-${i}.err 26 | sleep 5 27 | 28 | killall redis-server 29 | sleep 1 30 | killall -9 redis-server 31 | sleep 1 32 | done 33 | module unload redis 34 | } 35 | 36 | run orig 37 | run slowlog 38 | -------------------------------------------------------------------------------- /experiments/throughput/varnish/config.vcl: -------------------------------------------------------------------------------- 1 | vcl 4.0; 2 | 3 | backend default { 4 | .host = "127.0.0.1"; 5 | .port = "1111"; 6 | } 7 | -------------------------------------------------------------------------------- /experiments/throughput/varnish/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | repeat=1 7 | re='^[0-9]+$' 8 | if [[ $1 =~ $re ]] ; then 9 | repeat=$1 10 | elif [[ $1 == '-n' ]]; then 11 | if [[ $2 =~ $re ]]; then 12 | fileid=$2 13 | else 14 | echo "Invalid -n argument. Expected a number." 15 | exit 1 16 | fi 17 | elif [ ! -z "$1" ]; then 18 | echo "Unknown argument \"$1\"" 19 | exit 1 20 | fi 21 | 22 | function ab { 23 | $SCRIPT_DIR/../../../apps/httpd/rel-orig/bin/ab $@ 24 | } 25 | 26 | duration=120 27 | 28 | url=http://127.0.0.1:8080/ 29 | 30 | thd=4 31 | 32 | function run { 33 | systemctl start nginx 34 | sleep 1 35 | 36 | module load varnish/$1 37 | varnishd -a :8080 -f $SCRIPT_DIR/config.vcl -p thread_pools=1 -p thread_pool_min=2 -p thread_pool_max=20 -p thread_pool_timeout=10 38 | sleep 1 39 | 40 | for i in `seq $repeat`; do 41 | if [ ! -z "$fileid" ]; then 42 | i=$fileid 43 | fi 44 | 45 | ab -c$thd -t$duration -n100000000 $url > res-${1}${thd}-${i}.out 46 | sleep 5 47 | done 48 | 49 | varnishadm stop 50 | sleep 1 51 | pkill -9 varnishd 52 | sleep 1 53 | module unload varnish 54 | 55 | systemctl stop nginx 56 | sleep 1 57 | } 58 | 59 | run orbit 60 | run orig 61 | -------------------------------------------------------------------------------- /experiments/throughput/watchdog/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | repeat=1 7 | if [ ! -z "$1" ]; then 8 | repeat=$1 9 | fi 10 | 11 | function ab { 12 | $SCRIPT_DIR/../../../apps/httpd/rel-orig/bin/ab $@ 13 | } 14 | 15 | duration=120 16 | 17 | url=http://127.0.0.1:8080/somepath/ 18 | 19 | thd=4 20 | 21 | # Regenerate configs 22 | cd ../../../apps/httpd 23 | ./gen_config.sh $PWD/rel-watchdog/ $PWD/config-appendix 24 | ./gen_config.sh $PWD/rel-orig/ $PWD/config-appendix 25 | cd $SCRIPT_DIR 26 | 27 | function run { 28 | systemctl start nginx 29 | sleep 1 30 | 31 | module load httpd/$1 32 | apachectl -X -k start & 33 | sleep 1 34 | 35 | for i in `seq $repeat`; do 36 | ab -c$thd -t$duration -n100000000 $url > res-${1}${thd}-${i}.out 37 | sleep 5 38 | done 39 | apachectl -X -k stop 40 | sleep 1 41 | killall -9 httpd 42 | module unload httpd 43 | sleep 1 44 | 45 | systemctl stop nginx 46 | sleep 1 47 | } 48 | 49 | run orig 50 | run watchdog 51 | -------------------------------------------------------------------------------- /experiments/tools/.gitignore: -------------------------------------------------------------------------------- 1 | sysbench/ 2 | sysbench-code/ 3 | ycsb/ 4 | ycsb-code/ 5 | -------------------------------------------------------------------------------- /experiments/tools/build_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | ./install_sysbench.sh 7 | ./install_ycsb.sh 8 | -------------------------------------------------------------------------------- /experiments/tools/install_sysbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | module load mysql/orig 7 | if ! which mysqld > /dev/null 2>&1; then 8 | echo "Build of sysbench depends on a built MySQL. Please wait until apps has been built." 9 | exit 1 10 | fi 11 | 12 | git clone https://github.com/akopytov/sysbench sysbench-code 13 | cd sysbench-code 14 | git checkout 1.0.20 15 | 16 | #export PATH="$PATH:$SCRIPT_DIR/../../apps/mysql/rel-orig/dist/bin" 17 | export PATH="$PATH:/root/mysql/rel-orig/dist/bin" 18 | 19 | ./autogen.sh 20 | ./configure --prefix=$SCRIPT_DIR/sysbench 21 | make -j$(nproc) 22 | make install 23 | -------------------------------------------------------------------------------- /experiments/tools/install_ycsb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | mkdir -p ycsb 7 | 8 | git clone https://github.com/brianfrankcooper/YCSB.git ycsb-code 9 | cd ycsb-code 10 | git checkout ce3eb9ce51c84ee9e236998cdd2cefaeb96798a8 11 | git apply ../ycsb-customize.patch 12 | 13 | mvn -pl site.ycsb:redis-binding -am clean package 14 | tar -xf redis/target/ycsb-redis-binding-*.tar.gz \ 15 | -C $SCRIPT_DIR/ycsb/ --strip-components=1 16 | -------------------------------------------------------------------------------- /experiments/tools/ycsb-customize.patch: -------------------------------------------------------------------------------- 1 | diff --git a/redis/src/main/java/site/ycsb/db/RedisClient.java b/redis/src/main/java/site/ycsb/db/RedisClient.java 2 | index 2de9ad23..12662054 100644 3 | --- a/redis/src/main/java/site/ycsb/db/RedisClient.java 4 | +++ b/redis/src/main/java/site/ycsb/db/RedisClient.java 5 | @@ -144,7 +144,7 @@ public class RedisClient extends DB { 6 | Map values) { 7 | if (jedis.hmset(key, StringByteIterator.getStringMap(values)) 8 | .equals("OK")) { 9 | - jedis.zadd(INDEX_KEY, hash(key), key); 10 | + // jedis.zadd(INDEX_KEY, hash(key), key); 11 | return Status.OK; 12 | } 13 | return Status.ERROR; 14 | @@ -152,7 +152,7 @@ public class RedisClient extends DB { 15 | 16 | @Override 17 | public Status delete(String table, String key) { 18 | - return jedis.del(key) == 0 && jedis.zrem(INDEX_KEY, key) == 0 ? Status.ERROR 19 | + return jedis.del(key) == 0 /*&& jedis.zrem(INDEX_KEY, key) == 0*/ ? Status.ERROR 20 | : Status.OK; 21 | } 22 | 23 | @@ -166,7 +166,8 @@ public class RedisClient extends DB { 24 | @Override 25 | public Status scan(String table, String startkey, int recordcount, 26 | Set fields, Vector> result) { 27 | - Set keys = jedis.zrangeByScore(INDEX_KEY, hash(startkey), 28 | + return Status.OK; 29 | + /* Set keys = jedis.zrangeByScore(INDEX_KEY, hash(startkey), 30 | Double.POSITIVE_INFINITY, 0, recordcount); 31 | 32 | HashMap values; 33 | @@ -176,7 +177,7 @@ public class RedisClient extends DB { 34 | result.add(values); 35 | } 36 | 37 | - return Status.OK; 38 | + return Status.OK; */ 39 | } 40 | 41 | } 42 | -------------------------------------------------------------------------------- /modulefiles/httpd/orig: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict httpd 4 | 5 | set prefix /root/orbit/apps/httpd/rel-orig 6 | 7 | prepend-path PATH ${prefix}/bin 8 | prepend-path CPATH ${prefix}/include 9 | prepend-path C_INCLUDE_PATH ${prefix}/include 10 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 11 | prepend-path LIBRARY_PATH ${prefix}/lib 12 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 13 | #prepend-path MANPATH ${prefix}/man 14 | -------------------------------------------------------------------------------- /modulefiles/httpd/proxy: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict httpd 4 | 5 | module load userlib/reuse 6 | 7 | set prefix /root/orbit/apps/httpd/rel-proxy 8 | 9 | prepend-path PATH ${prefix}/bin 10 | prepend-path CPATH ${prefix}/include 11 | prepend-path C_INCLUDE_PATH ${prefix}/include 12 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 13 | prepend-path LIBRARY_PATH ${prefix}/lib 14 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 15 | #prepend-path MANPATH ${prefix}/man 16 | -------------------------------------------------------------------------------- /modulefiles/httpd/segfault: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict httpd 4 | 5 | set prefix /root/orbit/apps/httpd/rel-segfault 6 | 7 | prepend-path PATH ${prefix}/bin 8 | prepend-path CPATH ${prefix}/include 9 | prepend-path C_INCLUDE_PATH ${prefix}/include 10 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 11 | prepend-path LIBRARY_PATH ${prefix}/lib 12 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 13 | #prepend-path MANPATH ${prefix}/man 14 | -------------------------------------------------------------------------------- /modulefiles/httpd/watchdog: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict httpd 4 | 5 | module load userlib/reuse 6 | 7 | set prefix /root/orbit/apps/httpd/rel-watchdog 8 | 9 | prepend-path PATH ${prefix}/bin 10 | prepend-path CPATH ${prefix}/include 11 | prepend-path C_INCLUDE_PATH ${prefix}/include 12 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 13 | prepend-path LIBRARY_PATH ${prefix}/lib 14 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 15 | #prepend-path MANPATH ${prefix}/man 16 | -------------------------------------------------------------------------------- /modulefiles/httpd/watchdog-inject: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict httpd 4 | 5 | module load userlib/reuse 6 | 7 | set prefix /root/orbit/apps/httpd/rel-watchdog-inject 8 | 9 | prepend-path PATH ${prefix}/bin 10 | prepend-path CPATH ${prefix}/include 11 | prepend-path C_INCLUDE_PATH ${prefix}/include 12 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 13 | prepend-path LIBRARY_PATH ${prefix}/lib 14 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 15 | #prepend-path MANPATH ${prefix}/man 16 | -------------------------------------------------------------------------------- /modulefiles/leveldb/orbit: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict leveldb 4 | 5 | module load userlib/dealloc 6 | 7 | set prefix /root/orbit/apps/leveldb/rel-orbit 8 | 9 | prepend-path PATH ${prefix}/bin 10 | -------------------------------------------------------------------------------- /modulefiles/leveldb/orig: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict leveldb 4 | 5 | set prefix /root/orbit/apps/leveldb/rel-orig 6 | 7 | prepend-path PATH ${prefix}/bin 8 | -------------------------------------------------------------------------------- /modulefiles/mysql/fork: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict mysql 4 | 5 | set prefix /root/orbit/apps/mysql/rel-fork/dist 6 | 7 | prepend-path PATH ${prefix}/bin 8 | prepend-path CPATH ${prefix}/include 9 | prepend-path C_INCLUDE_PATH ${prefix}/include 10 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 11 | prepend-path LIBRARY_PATH ${prefix}/lib 12 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 13 | #prepend-path MANPATH ${prefix}/man 14 | -------------------------------------------------------------------------------- /modulefiles/mysql/orbit: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict mysql 4 | 5 | module load userlib/plain 6 | 7 | set prefix /root/orbit/apps/mysql/rel-orbit/dist 8 | 9 | prepend-path PATH ${prefix}/bin 10 | prepend-path CPATH ${prefix}/include 11 | prepend-path C_INCLUDE_PATH ${prefix}/include 12 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 13 | prepend-path LIBRARY_PATH ${prefix}/lib 14 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 15 | #prepend-path MANPATH ${prefix}/man 16 | -------------------------------------------------------------------------------- /modulefiles/mysql/orig: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict mysql 4 | 5 | set prefix /root/orbit/apps/mysql/rel-orig/dist 6 | 7 | prepend-path PATH ${prefix}/bin 8 | prepend-path CPATH ${prefix}/include 9 | prepend-path C_INCLUDE_PATH ${prefix}/include 10 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 11 | prepend-path LIBRARY_PATH ${prefix}/lib 12 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 13 | #prepend-path MANPATH ${prefix}/man 14 | -------------------------------------------------------------------------------- /modulefiles/mysql/sync: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict mysql 4 | 5 | module load userlib/plain 6 | 7 | set prefix /root/orbit/apps/mysql/rel-sync/dist 8 | 9 | prepend-path PATH ${prefix}/bin 10 | prepend-path CPATH ${prefix}/include 11 | prepend-path C_INCLUDE_PATH ${prefix}/include 12 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 13 | prepend-path LIBRARY_PATH ${prefix}/lib 14 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 15 | #prepend-path MANPATH ${prefix}/man 16 | -------------------------------------------------------------------------------- /modulefiles/nginx/orbit: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict nginx 4 | 5 | module load userlib/reuse 6 | 7 | set prefix /root/orbit/apps/nginx/rel-orbit 8 | 9 | prepend-path PATH ${prefix}/sbin 10 | -------------------------------------------------------------------------------- /modulefiles/nginx/orig: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict nginx 4 | 5 | set prefix /root/orbit/apps/nginx/rel-orig 6 | 7 | prepend-path PATH ${prefix}/sbin 8 | -------------------------------------------------------------------------------- /modulefiles/nginx/segfault: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict nginx 4 | 5 | set prefix /root/orbit/apps/nginx/rel-segfault 6 | 7 | prepend-path PATH ${prefix}/sbin 8 | -------------------------------------------------------------------------------- /modulefiles/redis/cpu-hog: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict redis 4 | 5 | module load userlib/dealloc 6 | 7 | set prefix /root/orbit/apps/redis/rel-cpu-hog 8 | 9 | prepend-path PATH ${prefix}/bin 10 | -------------------------------------------------------------------------------- /modulefiles/redis/memleak: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict redis 4 | 5 | set prefix /root/orbit/apps/redis/rel-memleak 6 | 7 | prepend-path PATH ${prefix}/bin 8 | -------------------------------------------------------------------------------- /modulefiles/redis/oom: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict redis 4 | 5 | module load userlib/dealloc 6 | 7 | set prefix /root/orbit/apps/redis/rel-oom 8 | 9 | prepend-path PATH ${prefix}/bin 10 | -------------------------------------------------------------------------------- /modulefiles/redis/orig: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict redis 4 | 5 | set prefix /root/orbit/apps/redis/rel-orig 6 | 7 | prepend-path PATH ${prefix}/bin 8 | -------------------------------------------------------------------------------- /modulefiles/redis/rdb: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict redis 4 | 5 | module load userlib/dealloc 6 | 7 | set prefix /root/orbit/apps/redis/rel-rdb 8 | 9 | prepend-path PATH ${prefix}/bin 10 | -------------------------------------------------------------------------------- /modulefiles/redis/rdb-fault: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict redis 4 | 5 | module load userlib/dealloc 6 | 7 | set prefix /root/orbit/apps/redis/rel-rdb-fault 8 | 9 | prepend-path PATH ${prefix}/bin 10 | -------------------------------------------------------------------------------- /modulefiles/redis/slowlog: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict redis 4 | 5 | module load userlib/dealloc 6 | 7 | set prefix /root/orbit/apps/redis/rel-slowlog 8 | 9 | prepend-path PATH ${prefix}/bin 10 | -------------------------------------------------------------------------------- /modulefiles/redis/slowlog-delay: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict redis 4 | 5 | module load userlib/dealloc 6 | 7 | set prefix /root/orbit/apps/redis/rel-slowlog-delay 8 | 9 | prepend-path PATH ${prefix}/bin 10 | -------------------------------------------------------------------------------- /modulefiles/userlib/dealloc: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict userlib 4 | 5 | set prefix /root/orbit/userlib/build/rel-dealloc 6 | 7 | prepend-path CPATH ${prefix}/include 8 | prepend-path C_INCLUDE_PATH ${prefix}/include 9 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 10 | prepend-path LIBRARY_PATH ${prefix}/lib 11 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 12 | -------------------------------------------------------------------------------- /modulefiles/userlib/plain: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict userlib 4 | 5 | set prefix /root/orbit/userlib/build/rel-plain 6 | 7 | prepend-path CPATH ${prefix}/include 8 | prepend-path C_INCLUDE_PATH ${prefix}/include 9 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 10 | prepend-path LIBRARY_PATH ${prefix}/lib 11 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 12 | -------------------------------------------------------------------------------- /modulefiles/userlib/reuse: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict userlib 4 | 5 | set prefix /root/orbit/userlib/build/rel-reuse 6 | 7 | prepend-path CPATH ${prefix}/include 8 | prepend-path C_INCLUDE_PATH ${prefix}/include 9 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 10 | prepend-path LIBRARY_PATH ${prefix}/lib 11 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 12 | -------------------------------------------------------------------------------- /modulefiles/varnish/orbit: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict varnish 4 | 5 | module load userlib/reuse 6 | 7 | set prefix /root/orbit/apps/varnish/rel-orbit 8 | 9 | prepend-path PATH ${prefix}/bin 10 | prepend-path PATH ${prefix}/sbin 11 | prepend-path CPATH ${prefix}/include 12 | prepend-path C_INCLUDE_PATH ${prefix}/include 13 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 14 | prepend-path LIBRARY_PATH ${prefix}/lib 15 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 16 | #prepend-path MANPATH ${prefix}/man 17 | -------------------------------------------------------------------------------- /modulefiles/varnish/orig: -------------------------------------------------------------------------------- 1 | #%Module1.0 2 | 3 | conflict varnish 4 | 5 | set prefix /root/orbit/apps/varnish/rel-orig 6 | 7 | prepend-path PATH ${prefix}/bin 8 | prepend-path PATH ${prefix}/sbin 9 | prepend-path CPATH ${prefix}/include 10 | prepend-path C_INCLUDE_PATH ${prefix}/include 11 | prepend-path CPLUS_INCLUDE_PATH ${prefix}/include 12 | prepend-path LIBRARY_PATH ${prefix}/lib 13 | prepend-path LD_LIBRARY_PATH ${prefix}/lib 14 | #prepend-path MANPATH ${prefix}/man 15 | -------------------------------------------------------------------------------- /patches/userlib_reuse.patch: -------------------------------------------------------------------------------- 1 | diff --git a/lib/src/orbit.c b/lib/src/orbit.c 2 | index 152ef19..db3462f 100644 3 | --- a/lib/src/orbit.c 4 | +++ b/lib/src/orbit.c 5 | @@ -593,7 +593,7 @@ int orbit_sendv(struct orbit_scratch *s) 6 | /* If the send is not successful, we do not call trunc(), and it is 7 | * safe to allocate a new scratch in the same area since we will 8 | * rewrite it later anyway. */ 9 | - scratch_trunc(s); 10 | + // scratch_trunc(s); 11 | 12 | return ret; 13 | } 14 | -------------------------------------------------------------------------------- /scripts/1k.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Welcome to nginx! 5 | 8 | 9 | 10 |

Welcome to nginx!

11 |

If you see this page, the nginx web server is successfully installed and 12 | working. Further configuration is required.

13 | 14 | Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor 15 | incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis 16 | nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. 17 | Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu 18 | fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in 19 | culpa qui officia deserunt mollit anim id est laborum. 20 | 21 |

For online documentation and support please refer to 22 | nginx.org.
23 | Commercial support is available at 24 | nginx.com.

25 | 26 |

Thank you for using nginx.

27 | 28 | 29 | -------------------------------------------------------------------------------- /scripts/alias.sh: -------------------------------------------------------------------------------- 1 | export __ORBIT_ROOT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )/../" &> /dev/null && pwd ) 2 | export __ORBIT_IMAGE_FILE=$__ORBIT_ROOT_DIR/qemu-image.img 3 | export __ORBIT_MOUNT_DIR=$__ORBIT_ROOT_DIR/mount-point.dir 4 | function mount_qemu_image() { 5 | sudo mount -o loop $__ORBIT_IMAGE_FILE $__ORBIT_MOUNT_DIR && \ 6 | sudo mount -o bind,ro /dev $__ORBIT_MOUNT_DIR/dev && \ 7 | sudo mount -o bind,ro /dev/pts $__ORBIT_MOUNT_DIR/dev/pts && \ 8 | sudo mount -t proc none $__ORBIT_MOUNT_DIR/proc 9 | } 10 | function unmount_qemu_image() { 11 | busy_process=$(sudo lsof | grep $__ORBIT_MOUNT_DIR | awk '{print($2)}') 12 | if [ ! -z "$busy_process" ]; then 13 | echo "Mount point $__ORBIT_MOUNT_DIR is busy." 14 | echo "Used by the following PID(s):" 15 | echo "$busy_process" 16 | echo "To unmount it, kill these processes or exit properly" 17 | echo "(e.g., 'cd' outside if you are inside the mount point)." 18 | return 1 19 | fi 20 | 21 | sudo umount $__ORBIT_MOUNT_DIR/dev/pts 22 | sudo umount $__ORBIT_MOUNT_DIR/dev 23 | sudo umount $__ORBIT_MOUNT_DIR/proc 24 | sudo umount $__ORBIT_MOUNT_DIR 25 | } 26 | # Mount/unmount disk image (do not mount if qemu is running) 27 | alias m='pgrep qemu && echo "QEMU is running (with pid above)" || mount_qemu_image' 28 | alias um=unmount_qemu_image 29 | # Chroot into mounted disk image 30 | # (`sudo' is used for setting $HOME and other variables correctly) 31 | alias ch='sudo -i chroot $__ORBIT_MOUNT_DIR' 32 | # Run the VM (fail if still mounted) 33 | alias r='um; (mount | grep $__ORBIT_MOUNT_DIR) || $__ORBIT_ROOT_DIR/scripts/run-kernel.sh' 34 | # Force kill QEMU 35 | alias k='killall qemu-system-x86_64' 36 | -------------------------------------------------------------------------------- /scripts/build_compiler_support.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 5 | cd $SCRIPT_DIR/../ 6 | 7 | git submodule update --init --remote compiler 8 | cd compiler 9 | mkdir build 10 | cd build 11 | cmake .. 12 | make -j$(nproc) 13 | -------------------------------------------------------------------------------- /scripts/build_kernel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 5 | cd $SCRIPT_DIR/../ 6 | 7 | git submodule update --init --remote kernel 8 | 9 | cd kernel 10 | cp $SCRIPT_DIR/orbit.config kernel/configs/ 11 | make x86_64_defconfig 12 | make kvm_guest.config 13 | make orbit.config 14 | 15 | git checkout 7139b41f 16 | make -j$(nproc) 17 | cp arch/x86/boot/bzImage bzImage-7139b41f 18 | 19 | git checkout ddcd247b 20 | make -j$(nproc) 21 | cp arch/x86/boot/bzImage bzImage-ddcd247b 22 | -------------------------------------------------------------------------------- /scripts/build_userlib.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ROOT_DIR=$(cd "$(dirname "${BASH_SOURCE-$0}")"/..; pwd) 5 | 6 | cd $ROOT_DIR 7 | 8 | git submodule update --init --remote userlib 9 | 10 | cd userlib 11 | mkdir -p build 12 | mkdir -p build/rel-{plain,dealloc,reuse}/{include,lib} 13 | 14 | function build { 15 | commit=$1 16 | target=$2 17 | patch=$3 18 | 19 | cd $ROOT_DIR/userlib 20 | git checkout $commit 21 | [ -n "$patch" ] && git apply $patch 22 | 23 | cd build 24 | cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo 25 | make -j$(nproc) 26 | 27 | cd $ROOT_DIR/userlib 28 | # TODO: install script within cmake 29 | cp lib/include/orbit.h build/$target/include/ 30 | cp build/lib/liborbit.so build/$target/lib/ 31 | git checkout -- . 32 | } 33 | 34 | build b5ee3a2 rel-plain 35 | build b5ee3a2 rel-reuse $ROOT_DIR/patches/userlib_reuse.patch 36 | build c5770e9 rel-dealloc 37 | -------------------------------------------------------------------------------- /scripts/fix-modulefiles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR/../ 5 | 6 | echo "Please also run:" 7 | echo " echo 'export MODULEPATH=$(pwd)/modulefiles' >> ~/.bashrc" 8 | echo "to setup the MODULEPATH environment." 9 | 10 | newroot=$(pwd | sed 's/\//\\\//g') 11 | 12 | for d in `ls modulefiles`; do 13 | for f in `ls modulefiles/$d`; do 14 | sed "s/\/root\/orbit/${newroot}/" -i modulefiles/$d/$f; 15 | done 16 | done 17 | -------------------------------------------------------------------------------- /scripts/guest_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR 5 | 6 | if [[ $(id -u) != 0 ]]; then 7 | echo Please run this script as root. 8 | exit 1 9 | fi 10 | 11 | apt update 12 | apt install -y xterm neovim git psmisc procps tmux cmake build-essential bison \ 13 | libssl-dev libncurses5-dev pkg-config python3 zlib1g-dev curl cgroup-tools \ 14 | automake autotools-dev libedit-dev libjemalloc-dev libncurses-dev \ 15 | libpcre3-dev libtool libtool-bin python3-docutils python3-sphinx cpio \ 16 | llvm-6.0 llvm-6.0-dev llvm-6.0-tools llvm-6.0-runtime llvm-6.0-doc \ 17 | libllvm6.0 clang-6.0 clang-tools-6.0 clang-6.0-doc clang-format-6.0 \ 18 | clang-tidy-6.0 libclang-6.0-dev libclang-common-6.0-dev libclang1-6.0 python-clang-6.0 lld-6.0 \ 19 | maven environment-modules tclsh libcurl4-openssl-dev nginx python3-pip python3-pandas 20 | 21 | cp $SCRIPT_DIR/nginx-orbit-test /etc/nginx/sites-enabled/orbit-test 22 | systemctl disable nginx 23 | systemctl stop nginx 24 | mkdir -p /var/www/rep{1,2} 25 | cp $SCRIPT_DIR/1k.html /var/www/rep1/index.html 26 | cp $SCRIPT_DIR/1k.html /var/www/rep2/index.html 27 | echo "127.0.0.1 fe01 fe02 fe03" >> /etc/hosts 28 | 29 | echo '/dev/sda / ext4 errors=remount-ro,acl 0 1' > /etc/fstab 30 | passwd -d root 31 | echo 'resize > /dev/null 2>&1' >> ~/.bashrc 32 | echo 'if [[ $TMUX = "" ]]; then shutdown -h now; fi' > ~/.bash_logout 33 | 34 | # Varnish will set user to "nobody", and then it cannot see its header 35 | # installed in `/root`... Workaround for this: 36 | chmod 755 /root 37 | -------------------------------------------------------------------------------- /scripts/mkimg.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 4 | cd $SCRIPT_DIR/../ 5 | 6 | IMG=qemu-image.img 7 | DIR=mount-point.dir 8 | qemu-img create $IMG 40G 9 | mkfs.ext4 $IMG 10 | mkdir $DIR 11 | sudo mount -o loop $IMG $DIR 12 | sudo debootstrap --arch amd64 buster $DIR 13 | sudo umount $DIR 14 | -------------------------------------------------------------------------------- /scripts/nginx-orbit-test: -------------------------------------------------------------------------------- 1 | server { 2 | listen 1111; 3 | root /var/www/rep1; 4 | index index.html; 5 | } 6 | server { 7 | listen 1112; 8 | root /var/www/rep2; 9 | index index.html; 10 | } 11 | -------------------------------------------------------------------------------- /scripts/orbit.config: -------------------------------------------------------------------------------- 1 | # CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION is not set 2 | # CONFIG_COMPACTION is not set 3 | # CONFIG_MIGRATION is not set 4 | # CONFIG_NUMA_BALANCING is not set 5 | CONFIG_MEMCG=y 6 | CONFIG_CFS_BANDWIDTH=y 7 | -------------------------------------------------------------------------------- /scripts/run-kernel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd $__ORBIT_ROOT_DIR 4 | 5 | if [[ $1 = '-d' ]]; then 6 | DBG='-s -S' # gdb debug options (port 1234; stop cpu) 7 | shift 8 | else 9 | DBG= 10 | fi 11 | # in gdb console, `break start_kernel` 12 | 13 | if [[ $1 = 'formysql' ]]; then 14 | image=kernel/bzImage-7139b41f 15 | elif [ -n "$1" ] && [[ $1 != 'formysql' ]]; then 16 | echo Unknown kernel version "\"$1\"". 17 | exit 1 18 | else 19 | image=kernel/bzImage-ddcd247b 20 | fi 21 | 22 | KVM=--enable-kvm 23 | #KVM= 24 | 25 | qemu-system-x86_64 -kernel $image \ 26 | -hda qemu-image.img \ 27 | -append "root=/dev/sda console=ttyS0 nokaslr cgroup_enable=memory loglevel=6" \ 28 | ${DBG} \ 29 | ${KVM} \ 30 | -smp cores=4 -m 10G \ 31 | -nographic 32 | #-serial stdio -display none 33 | --------------------------------------------------------------------------------