├── .coveragerc ├── .gitignore ├── .gitmodules ├── .travis.yml ├── COPYRIGHT ├── LICENSE-APACHE ├── LICENSE-MIT ├── Makefile ├── README.md ├── examples ├── benchmarks │ ├── Makefile │ ├── fannkuch_redux │ │ ├── c │ │ │ └── bench.c │ │ ├── java │ │ │ ├── KrunEntry.java │ │ │ └── fannkuchredux.java │ │ ├── lua │ │ │ └── bench.lua │ │ └── python │ │ │ └── bench.py │ └── nbody │ │ ├── c │ │ └── bench.c │ │ ├── java │ │ ├── KrunEntry.java │ │ └── nbody.java │ │ ├── lua │ │ └── bench.lua │ │ └── python │ │ └── bench.py ├── example.krun ├── ext.krun ├── ext_script.py ├── java.krun └── travis.krun ├── iterations_runners ├── Makefile ├── iterations_runner.c ├── iterations_runner.java ├── iterations_runner.js ├── iterations_runner.lua ├── iterations_runner.php ├── iterations_runner.py ├── iterations_runner.rb └── iterations_runner.som ├── krun.py ├── krun ├── __init__.py ├── amperf.py ├── audit.py ├── config.py ├── env.py ├── mail.py ├── platform.py ├── results.py ├── scheduler.py ├── tests │ ├── __init__.py │ ├── broken_etas_results.json.bz2 │ ├── corrupt.krun │ ├── custom_dmesg_whitelist0001.krun │ ├── custom_dmesg_whitelist0002.krun │ ├── env.krun │ ├── example.krun │ ├── example_all_skip.krun │ ├── example_skip_1vm.krun │ ├── mocks.py │ ├── more_complicated.krun │ ├── one_exec.krun │ ├── quick.krun │ ├── quick_results.json.bz2 │ ├── skips.krun │ ├── space_in_benchmark_name.krun │ ├── space_in_variant_name.krun │ ├── space_in_vm_name.krun │ ├── test_amperf.py │ ├── test_audit.py │ ├── test_config.py │ ├── test_entry_point.py │ ├── test_env.py │ ├── test_genericplatform.py │ ├── test_linuxplatform.py │ ├── test_mailer.py │ ├── test_manifest_manager.py │ ├── test_openbsdplatform.py │ ├── test_process.py │ ├── test_results.py │ ├── test_scheduler.py │ ├── test_time_estimate.py │ ├── test_util.py │ └── test_vmdef.py ├── time_estimate.py ├── util.py └── vm_defs.py ├── libkrun ├── .gitignore ├── Makefile ├── libkruntime.c ├── libkruntime.h └── test │ ├── test_libkruntime.py │ └── test_prog.c ├── platform_sanity_checks ├── Makefile ├── check_linux_cpu_affinity_not_pinned.c ├── check_linux_cpu_affinity_pinned.c ├── check_linux_scheduler.c ├── check_nice_priority.c ├── check_openbsd_malloc_options.c └── check_user_change.py ├── requirements.txt ├── scripts ├── calibrate_amperf_tolerance.py ├── check_envlogs.py ├── progress.awk └── run_krun_at_boot ├── utils ├── .gitignore ├── Makefile └── query_turbo.c └── vm_sanity_checks ├── JavaCheckJVMCIServerEnabled.java ├── Makefile └── truffleruby_check_graal_enabled.rb /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | omit = krun/tests/* 4 | 5 | [report] 6 | exclude_lines = 7 | pragma: no cover 8 | def __repr__ 9 | raise NotImplementedError 10 | if __name__ == .__main__.: 11 | # ignore abstract methods/properties mocked with pass 12 | pass 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .coverage 2 | examples/*.log 3 | examples/*.json 4 | examples/*.bz2 5 | examples/*.manifest 6 | **/*.pyc 7 | **/*.swp 8 | **/__pycache__ 9 | **/*.class 10 | **/*.so 11 | iterations_runners/iterations_runner_c 12 | platform_sanity_checks/check_openbsd_malloc_options 13 | .cache 14 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/softdevteam/krun/a0c8e5bfb91d192695df63a500be96b7e6764491/.gitmodules -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | 3 | language: python 4 | python: 2.7 5 | 6 | before_install: 7 | - sudo apt-get update -qq 8 | - sudo apt-get install linux-libc-dev libc6-dev 9 | - sudo apt-get install default-jdk 10 | - sudo apt-get install virt-what 11 | - sudo apt-get install pypy 12 | - sudo apt-get install luajit 13 | 14 | install: 15 | - pip install -r requirements.txt 16 | - pip install colorlog 17 | 18 | script: 19 | - sudo update-java-alternatives -l || true # returns 1 on success! 20 | - uname -a 21 | - make --version 22 | - ldd --version 23 | - java -version 24 | - JAVA_CPPFLAGS='"-I${JAVA_HOME}/include -I${JAVA_HOME}/include/linux"' JAVA_LDFLAGS=-L${JAVA_HOME}/lib ENABLE_JAVA=1 make 25 | - cd examples/benchmarks 26 | - make 27 | - make java-bench 28 | - cd ../../ 29 | - make 30 | - py.test --cov-report term --cov=krun krun libkrun 31 | - cd examples 32 | - python ../krun.py --quick --no-pstate-check --no-tickless-check --no-user-change travis.krun 33 | -------------------------------------------------------------------------------- /COPYRIGHT: -------------------------------------------------------------------------------- 1 | Except as otherwise noted (below and/or in individual files), this project is 2 | licensed under the Apache License, Version 2.0 3 | or the MIT license 4 | , at your option. 5 | 6 | Copyright is retained by contributors and/or the organisations they 7 | represent(ed) -- this project does not require copyright assignment. Please see 8 | version control history for a full list of contributors. Note that some files 9 | may include explicit copyright and/or licensing notices. 10 | 11 | The following contributors wish to explicitly make it known that the copyright 12 | of their contributions is retained by an organisation: 13 | 14 | Edd Barrett : copyright retained by 15 | King's College London 16 | Carl Friedrich Bolz-Tereick : copyright retained by 17 | King's College London 18 | Thomas Fransham : copyright retained by 19 | King's College London 20 | Sarah Mount : copyright retained by 21 | King's College London 22 | Laurence Tratt : copyright retained by 23 | King's College London 24 | 25 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use 2 | this file except in compliance with the License. You may obtain a copy of the 3 | License at 4 | 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | Unless required by applicable law or agreed to in writing, software distributed 8 | under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 9 | CONDITIONS OF ANY KIND, either express or implied. See the License for the 10 | specific language governing permissions and limitations under the License. 11 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any person obtaining a copy of 2 | this software and associated documentation files (the "Software"), to deal in 3 | the Software without restriction, including without limitation the rights to 4 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 5 | of the Software, and to permit persons to whom the Software is furnished to do 6 | so, subject to the following conditions: 7 | 8 | The above copyright notice and this permission notice shall be included in all 9 | copies or substantial portions of the Software. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 14 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 15 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 16 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 17 | SOFTWARE. 18 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | JAVAC ?= javac 2 | MSRS ?= 0 3 | 4 | PASS_DOWN_ARGS = ENABLE_JAVA=${ENABLE_JAVA} JAVAC=${JAVAC} \ 5 | JAVA_CPPFLAGS=${JAVA_CPPFLAGS} \ 6 | JAVA_CFLAGS=${JAVA_CFLAGS} JAVA_LDFLAGS=${JAVA_LDFLAGS} \ 7 | CC=${CC} CFLAGS=${CFLAGS} CPPFLAGS=${CPPFLAGS} \ 8 | LDFLAGS=${LDFLAGS} MSRS=${MSRS} 9 | 10 | .PHONY: utils libkrun vm-sanity-checks clean all 11 | 12 | all: utils iterations-runners libkrun vm-sanity-checks platform-sanity-checks 13 | 14 | iterations-runners: libkrun 15 | cd iterations_runners && ${MAKE} ${PASS_DOWN_ARGS} 16 | 17 | libkrun: 18 | cd libkrun && ${MAKE} ${PASS_DOWN_ARGS} 19 | 20 | vm-sanity-checks: 21 | cd vm_sanity_checks && ${MAKE} ${PASS_DOWN_ARGS} 22 | 23 | platform-sanity-checks: 24 | cd platform_sanity_checks && ${MAKE} ${PASS_DOWN_ARGS} 25 | 26 | utils: 27 | cd utils && ${MAKE} 28 | 29 | clean: 30 | cd iterations_runners && ${MAKE} clean 31 | cd libkrun && ${MAKE} clean 32 | cd vm_sanity_checks && ${MAKE} clean 33 | cd platform_sanity_checks && ${MAKE} clean 34 | cd utils && ${MAKE} clean 35 | -------------------------------------------------------------------------------- /examples/benchmarks/Makefile: -------------------------------------------------------------------------------- 1 | JAVAC ?= javac 2 | 3 | BENCHMARKS = fannkuch_redux nbody 4 | 5 | BENCH_CFLAGS = -shared -Wall -fPIC -m64 6 | BENCH_LDFLAGS = 7 | C_EXTRA_LDFLAGS_nbody = -lm 8 | 9 | .PHONY: all c-bench java-bench clean clean-c clean-java 10 | 11 | all: c-bench 12 | 13 | c-bench: 14 | $(foreach i, ${BENCHMARKS}, \ 15 | echo "Building C benchmark ${i}..."; \ 16 | cd ${i}/c && \ 17 | ${CC} ${CFLAGS} ${CPPFLAGS} ${BENCH_CFLAGS} -o bench.so bench.c \ 18 | ${LDFLAGS} ${BENCH_LDFLAGS} ${C_EXTRA_LDFLAGS_${i}} || exit $?; \ 19 | cd ../../; \ 20 | ) 21 | 22 | java-bench: 23 | $(foreach i, ${BENCHMARKS}, \ 24 | echo "Building java benchmark ${i}..."; \ 25 | cd ${i}/java && \ 26 | CLASSPATH=../../../../iterations_runners/ ${JAVAC} *.java; \ 27 | cd ../../; \ 28 | ) 29 | 30 | clean: clean-java clean-c 31 | 32 | clean-java: 33 | $(foreach i, ${BENCHMARKS}, \ 34 | cd ${i}/java && \ 35 | rm -f *.class; \ 36 | cd ../../; \ 37 | ) 38 | 39 | clean-c: 40 | $(foreach i, ${BENCHMARKS}, \ 41 | cd ${i}/c && \ 42 | rm -f *.so; \ 43 | cd ../../; \ 44 | ) 45 | -------------------------------------------------------------------------------- /examples/benchmarks/fannkuch_redux/c/bench.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The Computer Language Benchmarks Game 3 | * http://shootout.alioth.debian.org/ 4 | * 5 | * contributed by Ledrug Katz 6 | * 7 | */ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #define MAX_N 8 15 | #define EXPECT_CKSUM 1616 16 | 17 | /* this depends highly on the platform. It might be faster to use 18 | char type on 32-bit systems; it might be faster to use unsigned. */ 19 | 20 | typedef int elem; 21 | 22 | elem s[MAX_N], t[MAX_N]; 23 | 24 | int maxflips = 0; 25 | int odd = 0; 26 | u_int32_t checksum = 0; 27 | 28 | 29 | int flip() 30 | { 31 | register int i; 32 | register elem *x, *y, c; 33 | 34 | for (x = t, y = s, i = MAX_N; i--; ) 35 | *x++ = *y++; 36 | i = 1; 37 | do { 38 | for (x = t, y = t + t[0]; x < y; ) 39 | c = *x, *x++ = *y, *y-- = c; 40 | i++; 41 | } while (t[t[0]]); 42 | return i; 43 | } 44 | 45 | void rotate(int n) 46 | { 47 | elem c; 48 | register int i; 49 | c = s[0]; 50 | for (i = 1; i <= n; i++) s[i-1] = s[i]; 51 | s[n] = c; 52 | } 53 | 54 | /* Tompkin-Paige iterative perm generation */ 55 | void tk() 56 | { 57 | int i = 0, f, n = MAX_N; 58 | elem c[MAX_N] = {0}; 59 | 60 | while (i < n) { 61 | rotate(i); 62 | if (c[i] >= i) { 63 | c[i++] = 0; 64 | continue; 65 | } 66 | 67 | c[i]++; 68 | i = 1; 69 | odd = ~odd; 70 | if (*s) { 71 | f = s[s[0]] ? flip() : 1; 72 | if (f > maxflips) maxflips = f; 73 | checksum += odd ? -f : f; 74 | } 75 | } 76 | 77 | if (checksum != EXPECT_CKSUM) { 78 | errx(EXIT_FAILURE, "bad checksum: %d vs %d", checksum, EXPECT_CKSUM); 79 | } 80 | } 81 | 82 | void setup_state(void) { 83 | int i; 84 | 85 | for (i = 0; i < MAX_N; i++) { 86 | s[i] = i; 87 | } 88 | checksum = 0; 89 | maxflips = 0; 90 | odd = 0; 91 | } 92 | 93 | void run_iter(int n) 94 | { 95 | int i; 96 | 97 | for (i = 0; i < n; i++) { 98 | setup_state(); 99 | tk(); 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /examples/benchmarks/fannkuch_redux/java/KrunEntry.java: -------------------------------------------------------------------------------- 1 | class KrunEntry implements BaseKrunEntry { 2 | static { 3 | fannkuchredux.init(); 4 | } // force class to be loaded 5 | 6 | public void run_iter(int param) { 7 | fannkuchredux.runIter(param); 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /examples/benchmarks/fannkuch_redux/java/fannkuchredux.java: -------------------------------------------------------------------------------- 1 | /* The Computer Language Benchmarks Game 2 | http://shootout.alioth.debian.org/ 3 | 4 | contributed by Isaac Gouy 5 | converted to Java by Oleg Mazurov 6 | */ 7 | 8 | public class fannkuchredux 9 | { 10 | static void init() {}; 11 | private static int EXPECT_CKSUM = 1616; 12 | private static int MAX_N = 8; 13 | 14 | 15 | public static int inner_iter() { 16 | int n = MAX_N; 17 | int[] perm = new int[n]; 18 | int[] perm1 = new int[n]; 19 | int[] count = new int[n]; 20 | int maxFlipsCount = 0; 21 | int permCount = 0; 22 | int checksum = 0; 23 | 24 | for(int i=0; i> 1; 37 | for(int i=0; i 0) break; 66 | r++; 67 | } 68 | 69 | permCount++; 70 | } 71 | } 72 | 73 | public static void runIter(int n) { 74 | for (int i = 0; i < n; i++) { 75 | /* inner_iter() deals with state setup */ 76 | inner_iter(); 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /examples/benchmarks/fannkuch_redux/lua/bench.lua: -------------------------------------------------------------------------------- 1 | -- The Computer Language Benchmarks Game 2 | -- http://shootout.alioth.debian.org/ 3 | -- contributed by Mike Pall 4 | 5 | local EXPECT_CKSUM = 1616 6 | local MAX_N = 8 7 | 8 | function run_iter(n) 9 | for i=1,n do 10 | inner_iter() 11 | end 12 | end 13 | 14 | function inner_iter() 15 | n = MAX_N 16 | local p, q, s, sign, maxflips, sum = {}, {}, {}, 1, 0, 0 17 | for i=1,n do p[i] = i; q[i] = i; s[i] = i end 18 | repeat 19 | -- Copy and flip. 20 | local q1 = p[1] -- Cache 1st element. 21 | if q1 ~= 1 then 22 | for i=2,n do q[i] = p[i] end -- Work on a copy. 23 | local flips = 1 24 | repeat 25 | local qq = q[q1] 26 | if qq == 1 then -- ... until 1st element is 1. 27 | sum = sum + sign*flips 28 | if flips > maxflips then maxflips = flips end -- New maximum? 29 | break 30 | end 31 | q[q1] = q1 32 | if q1 >= 4 then 33 | local i, j = 2, q1 - 1 34 | repeat q[i], q[j] = q[j], q[i]; i = i + 1; j = j - 1; until i >= j 35 | end 36 | q1 = qq; flips = flips + 1 37 | until false 38 | end 39 | -- Permute. 40 | if sign == 1 then 41 | p[2], p[1] = p[1], p[2]; sign = -1 -- Rotate 1<-2. 42 | else 43 | p[2], p[3] = p[3], p[2]; sign = 1 -- Rotate 1<-2 and 1<-2<-3. 44 | for i=3,n do 45 | local sx = s[i] 46 | if sx ~= 1 then s[i] = sx-1; break end 47 | if i == n then 48 | if sum ~= EXPECT_CKSUM then 49 | io.write("bad checksum: " .. sum .. " vs " .. EXPECT_CKSUM) 50 | os.exit(1) 51 | end 52 | return sum, maxflips 53 | end -- Out of permutations. 54 | s[i] = i 55 | -- Rotate 1<-...<-i+1. 56 | local t = p[1]; for j=1,i do p[j] = p[j+1] end; p[i+1] = t 57 | end 58 | end 59 | until false 60 | end 61 | 62 | --local n = tonumber(arg and arg[1]) or 1 63 | --local sum, flips = fannkuch(n) 64 | --io.write(sum, "\nPfannkuchen(", n, ") = ", flips, "\n") 65 | -------------------------------------------------------------------------------- /examples/benchmarks/fannkuch_redux/python/bench.py: -------------------------------------------------------------------------------- 1 | # The Computer Language Benchmarks Game 2 | # http://shootout.alioth.debian.org/ 3 | 4 | # contributed by Isaac Gouy 5 | # converted to Java by Oleg Mazurov 6 | # converted to Python by Buck Golemon 7 | # modified by Justin Peel 8 | 9 | import sys 10 | 11 | EXPECT_CKSUM = 1616 12 | MAX_N = 8 13 | 14 | def run_iter(n): 15 | for i in xrange(n): 16 | inner_iter() 17 | 18 | def inner_iter(): 19 | n = MAX_N 20 | maxFlipsCount = 0 21 | permSign = True 22 | checksum = 0 23 | 24 | perm1 = list(range(n)) 25 | count = perm1[:] 26 | rxrange = range(2, n - 1) 27 | nm = n - 1 28 | while 1: 29 | k = perm1[0] 30 | if k: 31 | perm = perm1[:] 32 | flipsCount = 1 33 | kk = perm[k] 34 | while kk: 35 | perm[:k+1] = perm[k::-1] 36 | flipsCount += 1 37 | k = kk 38 | kk = perm[kk] 39 | if maxFlipsCount < flipsCount: 40 | maxFlipsCount = flipsCount 41 | checksum += flipsCount if permSign else -flipsCount 42 | 43 | # Use incremental change to generate another permutation 44 | if permSign: 45 | perm1[0],perm1[1] = perm1[1],perm1[0] 46 | permSign = False 47 | else: 48 | perm1[1],perm1[2] = perm1[2],perm1[1] 49 | permSign = True 50 | for r in rxrange: 51 | if count[r]: 52 | break 53 | count[r] = r 54 | perm0 = perm1[0] 55 | perm1[:r+1] = perm1[1:r+2] 56 | perm1[r+1] = perm0 57 | else: 58 | r = nm 59 | if not count[r]: 60 | if checksum != EXPECT_CKSUM: 61 | print("bad checksum: " + checksum + " vs " + EXPECT_CKSUM) 62 | sys.exit(1) 63 | return maxFlipsCount 64 | count[r] -= 1 65 | -------------------------------------------------------------------------------- /examples/benchmarks/nbody/c/bench.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The Great Computer Language Shootout 3 | * http://shootout.alioth.debian.org/ 4 | * 5 | * contributed by Christoph Bauer 6 | * 7 | */ 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #define pi 3.141592653589793 16 | #define solar_mass (4 * pi * pi) 17 | #define days_per_year 365.24 18 | 19 | static double checksum = 0; 20 | #define EXPECT_CHECKSUM -0.3381550232201908645635057837353087961673736572265625 21 | #define N_ADVANCES 100000 22 | #define EPSILON 0.0000000000001 23 | 24 | 25 | struct planet { 26 | double x, y, z; 27 | double vx, vy, vz; 28 | double mass; 29 | }; 30 | 31 | void n_advance(int nbodies, struct planet * bodies, double dt) 32 | { 33 | int i, j; 34 | 35 | for (i = 0; i < nbodies; i++) { 36 | struct planet * b = &(bodies[i]); 37 | for (j = i + 1; j < nbodies; j++) { 38 | struct planet * b2 = &(bodies[j]); 39 | double dx = b->x - b2->x; 40 | double dy = b->y - b2->y; 41 | double dz = b->z - b2->z; 42 | double distance = sqrt(dx * dx + dy * dy + dz * dz); 43 | double mag = dt / (distance * distance * distance); 44 | b->vx -= dx * b2->mass * mag; 45 | b->vy -= dy * b2->mass * mag; 46 | b->vz -= dz * b2->mass * mag; 47 | b2->vx += dx * b->mass * mag; 48 | b2->vy += dy * b->mass * mag; 49 | b2->vz += dz * b->mass * mag; 50 | } 51 | } 52 | for (i = 0; i < nbodies; i++) { 53 | struct planet * b = &(bodies[i]); 54 | b->x += dt * b->vx; 55 | b->y += dt * b->vy; 56 | b->z += dt * b->vz; 57 | } 58 | } 59 | 60 | double energy(int nbodies, struct planet * bodies) 61 | { 62 | double e; 63 | int i, j; 64 | 65 | e = 0.0; 66 | for (i = 0; i < nbodies; i++) { 67 | struct planet * b = &(bodies[i]); 68 | e += 0.5 * b->mass * (b->vx * b->vx + b->vy * b->vy + b->vz * b->vz); 69 | for (j = i + 1; j < nbodies; j++) { 70 | struct planet * b2 = &(bodies[j]); 71 | double dx = b->x - b2->x; 72 | double dy = b->y - b2->y; 73 | double dz = b->z - b2->z; 74 | double distance = sqrt(dx * dx + dy * dy + dz * dz); 75 | e -= (b->mass * b2->mass) / distance; 76 | } 77 | } 78 | return e; 79 | } 80 | 81 | void offset_momentum(int nbodies, struct planet * bodies) 82 | { 83 | double px = 0.0, py = 0.0, pz = 0.0; 84 | int i; 85 | for (i = 0; i < nbodies; i++) { 86 | px += bodies[i].vx * bodies[i].mass; 87 | py += bodies[i].vy * bodies[i].mass; 88 | pz += bodies[i].vz * bodies[i].mass; 89 | } 90 | bodies[0].vx = - px / solar_mass; 91 | bodies[0].vy = - py / solar_mass; 92 | bodies[0].vz = - pz / solar_mass; 93 | } 94 | 95 | #define NBODIES 5 96 | struct planet *bodies = NULL; 97 | /* 98 | * Benchmark mutates the bodies! 99 | * This is the initial state, which is restored after each run. 100 | */ 101 | const struct planet initial_bodies[NBODIES] = { 102 | { /* sun */ 103 | 0, 0, 0, 0, 0, 0, solar_mass 104 | }, 105 | { /* jupiter */ 106 | 4.84143144246472090e+00, 107 | -1.16032004402742839e+00, 108 | -1.03622044471123109e-01, 109 | 1.66007664274403694e-03 * days_per_year, 110 | 7.69901118419740425e-03 * days_per_year, 111 | -6.90460016972063023e-05 * days_per_year, 112 | 9.54791938424326609e-04 * solar_mass 113 | }, 114 | { /* saturn */ 115 | 8.34336671824457987e+00, 116 | 4.12479856412430479e+00, 117 | -4.03523417114321381e-01, 118 | -2.76742510726862411e-03 * days_per_year, 119 | 4.99852801234917238e-03 * days_per_year, 120 | 2.30417297573763929e-05 * days_per_year, 121 | 2.85885980666130812e-04 * solar_mass 122 | }, 123 | { /* uranus */ 124 | 1.28943695621391310e+01, 125 | -1.51111514016986312e+01, 126 | -2.23307578892655734e-01, 127 | 2.96460137564761618e-03 * days_per_year, 128 | 2.37847173959480950e-03 * days_per_year, 129 | -2.96589568540237556e-05 * days_per_year, 130 | 4.36624404335156298e-05 * solar_mass 131 | }, 132 | { /* neptune */ 133 | 1.53796971148509165e+01, 134 | -2.59193146099879641e+01, 135 | 1.79258772950371181e-01, 136 | 2.68067772490389322e-03 * days_per_year, 137 | 1.62824170038242295e-03 * days_per_year, 138 | -9.51592254519715870e-05 * days_per_year, 139 | 5.15138902046611451e-05 * solar_mass 140 | } 141 | }; 142 | 143 | void inner_iter(int n) 144 | { 145 | int i; 146 | 147 | offset_momentum(NBODIES, bodies); 148 | checksum += energy(NBODIES, bodies); 149 | for (i = 1; i <= n; i++) 150 | n_advance(NBODIES, bodies, 0.01); 151 | checksum += energy(NBODIES, bodies); 152 | 153 | if (abs(checksum - EXPECT_CHECKSUM) >= EPSILON) { 154 | errx(EXIT_FAILURE, "bad checksum: %.52f vs %.52f", 155 | checksum, EXPECT_CHECKSUM); 156 | } 157 | } 158 | 159 | void run_iter(int n) { 160 | int i; 161 | 162 | if ((bodies = malloc(sizeof(initial_bodies))) == NULL) { 163 | errx(EXIT_FAILURE, "malloc failed"); 164 | } 165 | 166 | for (i = 0; i < n; i++) { 167 | /* reset global state */ 168 | checksum = 0; 169 | memcpy(bodies, initial_bodies, sizeof(initial_bodies)); 170 | inner_iter(N_ADVANCES); 171 | } 172 | 173 | free(bodies); 174 | } 175 | -------------------------------------------------------------------------------- /examples/benchmarks/nbody/java/KrunEntry.java: -------------------------------------------------------------------------------- 1 | 2 | class KrunEntry implements BaseKrunEntry { 3 | public void run_iter(int param) { 4 | nbody.runIter(param); 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /examples/benchmarks/nbody/java/nbody.java: -------------------------------------------------------------------------------- 1 | /* The Computer Language Benchmarks Game 2 | 3 | http://shootout.alioth.debian.org/ 4 | 5 | 6 | 7 | contributed by Mark C. Lewis 8 | 9 | modified slightly by Chad Whipkey 10 | 11 | */ 12 | 13 | public final class nbody { 14 | static void init() {}; 15 | private static double checksum = 0; 16 | private static final int N_ADVANCES = 100000; 17 | private static final double EXPECT_CHECKSUM = -0.3381550232201908645635057837353087961673736572265625; 18 | private static final double EPSILON = 0.0000000000001; 19 | 20 | public static void runIter(int n) { 21 | for (int i = 0; i < n; i++) { 22 | inner_iter(N_ADVANCES); 23 | } 24 | } 25 | 26 | private static void inner_iter(int n) { 27 | checksum = 0; 28 | NBodySystem bodies = new NBodySystem(); 29 | checksum += bodies.energy(); 30 | for (int i=0; i= EPSILON) { 35 | System.out.println("bad checksum: " + checksum + " vs " + EXPECT_CHECKSUM); 36 | System.exit(1); 37 | } 38 | } 39 | } 40 | 41 | final class NBodySystem { 42 | private Body[] bodies; 43 | 44 | public NBodySystem(){ 45 | bodies = new Body[]{ 46 | Body.sun(), 47 | Body.jupiter(), 48 | Body.saturn(), 49 | Body.uranus(), 50 | Body.neptune() 51 | }; 52 | 53 | double px = 0.0; 54 | double py = 0.0; 55 | double pz = 0.0; 56 | for(int i=0; i < bodies.length; ++i) { 57 | px += bodies[i].vx * bodies[i].mass; 58 | py += bodies[i].vy * bodies[i].mass; 59 | pz += bodies[i].vz * bodies[i].mass; 60 | } 61 | bodies[0].offsetMomentum(px,py,pz); 62 | } 63 | 64 | public void advance(double dt) { 65 | 66 | for(int i=0; i < bodies.length; ++i) { 67 | Body iBody = bodies[i]; 68 | for(int j=i+1; j < bodies.length; ++j) { 69 | double dx = iBody.x - bodies[j].x; 70 | double dy = iBody.y - bodies[j].y; 71 | double dz = iBody.z - bodies[j].z; 72 | 73 | double dSquared = dx * dx + dy * dy + dz * dz; 74 | double distance = Math.sqrt(dSquared); 75 | double mag = dt / (dSquared * distance); 76 | 77 | iBody.vx -= dx * bodies[j].mass * mag; 78 | iBody.vy -= dy * bodies[j].mass * mag; 79 | iBody.vz -= dz * bodies[j].mass * mag; 80 | 81 | bodies[j].vx += dx * iBody.mass * mag; 82 | bodies[j].vy += dy * iBody.mass * mag; 83 | bodies[j].vz += dz * iBody.mass * mag; 84 | } 85 | } 86 | 87 | for ( Body body : bodies) { 88 | body.x += dt * body.vx; 89 | body.y += dt * body.vy; 90 | body.z += dt * body.vz; 91 | } 92 | } 93 | 94 | public double energy(){ 95 | double dx, dy, dz, distance; 96 | double e = 0.0; 97 | 98 | for (int i=0; i < bodies.length; ++i) { 99 | Body iBody = bodies[i]; 100 | e += 0.5 * iBody.mass * 101 | ( iBody.vx * iBody.vx 102 | + iBody.vy * iBody.vy 103 | + iBody.vz * iBody.vz ); 104 | 105 | for (int j=i+1; j < bodies.length; ++j) { 106 | Body jBody = bodies[j]; 107 | dx = iBody.x - jBody.x; 108 | dy = iBody.y - jBody.y; 109 | dz = iBody.z - jBody.z; 110 | 111 | distance = Math.sqrt(dx*dx + dy*dy + dz*dz); 112 | e -= (iBody.mass * jBody.mass) / distance; 113 | } 114 | } 115 | return e; 116 | } 117 | } 118 | 119 | 120 | final class Body { 121 | static final double PI = 3.141592653589793; 122 | static final double SOLAR_MASS = 4 * PI * PI; 123 | static final double DAYS_PER_YEAR = 365.24; 124 | 125 | public double x, y, z, vx, vy, vz, mass; 126 | 127 | public Body(){} 128 | 129 | static Body jupiter(){ 130 | Body p = new Body(); 131 | p.x = 4.84143144246472090e+00; 132 | p.y = -1.16032004402742839e+00; 133 | p.z = -1.03622044471123109e-01; 134 | p.vx = 1.66007664274403694e-03 * DAYS_PER_YEAR; 135 | p.vy = 7.69901118419740425e-03 * DAYS_PER_YEAR; 136 | p.vz = -6.90460016972063023e-05 * DAYS_PER_YEAR; 137 | p.mass = 9.54791938424326609e-04 * SOLAR_MASS; 138 | return p; 139 | } 140 | 141 | static Body saturn(){ 142 | Body p = new Body(); 143 | p.x = 8.34336671824457987e+00; 144 | p.y = 4.12479856412430479e+00; 145 | p.z = -4.03523417114321381e-01; 146 | p.vx = -2.76742510726862411e-03 * DAYS_PER_YEAR; 147 | p.vy = 4.99852801234917238e-03 * DAYS_PER_YEAR; 148 | p.vz = 2.30417297573763929e-05 * DAYS_PER_YEAR; 149 | p.mass = 2.85885980666130812e-04 * SOLAR_MASS; 150 | return p; 151 | } 152 | 153 | static Body uranus(){ 154 | Body p = new Body(); 155 | p.x = 1.28943695621391310e+01; 156 | p.y = -1.51111514016986312e+01; 157 | p.z = -2.23307578892655734e-01; 158 | p.vx = 2.96460137564761618e-03 * DAYS_PER_YEAR; 159 | p.vy = 2.37847173959480950e-03 * DAYS_PER_YEAR; 160 | p.vz = -2.96589568540237556e-05 * DAYS_PER_YEAR; 161 | p.mass = 4.36624404335156298e-05 * SOLAR_MASS; 162 | return p; 163 | } 164 | 165 | static Body neptune(){ 166 | Body p = new Body(); 167 | p.x = 1.53796971148509165e+01; 168 | p.y = -2.59193146099879641e+01; 169 | p.z = 1.79258772950371181e-01; 170 | p.vx = 2.68067772490389322e-03 * DAYS_PER_YEAR; 171 | p.vy = 1.62824170038242295e-03 * DAYS_PER_YEAR; 172 | p.vz = -9.51592254519715870e-05 * DAYS_PER_YEAR; 173 | p.mass = 5.15138902046611451e-05 * SOLAR_MASS; 174 | return p; 175 | } 176 | 177 | static Body sun(){ 178 | Body p = new Body(); 179 | p.mass = SOLAR_MASS; 180 | return p; 181 | } 182 | 183 | Body offsetMomentum(double px, double py, double pz){ 184 | vx = -px / SOLAR_MASS; 185 | vy = -py / SOLAR_MASS; 186 | vz = -pz / SOLAR_MASS; 187 | return this; 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /examples/benchmarks/nbody/lua/bench.lua: -------------------------------------------------------------------------------- 1 | -- The Computer Language Benchmarks Game 2 | -- http://shootout.alioth.debian.org/ 3 | -- contributed by Mike Pall 4 | -- modified by Geoff Leyland 5 | -- modified by Mario Pernici 6 | 7 | sun = {} 8 | jupiter = {} 9 | saturn = {} 10 | uranus = {} 11 | neptune = {} 12 | 13 | local EXPECT_CHECKSUM = -0.3381550232201908645635057837353087961673736572265625 14 | local N_ADVANCES = 100000 15 | local EPSILON = 0.0000000000001 16 | 17 | local sqrt = math.sqrt 18 | 19 | local PI = 3.141592653589793 20 | local SOLAR_MASS = 4 * PI * PI 21 | local DAYS_PER_YEAR = 365.24 22 | 23 | function setup_state() 24 | -- defines global variables, that get mutated. 25 | sun.x = 0.0 26 | sun.y = 0.0 27 | sun.z = 0.0 28 | sun.vx = 0.0 29 | sun.vy = 0.0 30 | sun.vz = 0.0 31 | sun.mass = SOLAR_MASS 32 | jupiter.x = 4.84143144246472090e+00 33 | jupiter.y = -1.16032004402742839e+00 34 | jupiter.z = -1.03622044471123109e-01 35 | jupiter.vx = 1.66007664274403694e-03 * DAYS_PER_YEAR 36 | jupiter.vy = 7.69901118419740425e-03 * DAYS_PER_YEAR 37 | jupiter.vz = -6.90460016972063023e-05 * DAYS_PER_YEAR 38 | jupiter.mass = 9.54791938424326609e-04 * SOLAR_MASS 39 | saturn.x = 8.34336671824457987e+00 40 | saturn.y = 4.12479856412430479e+00 41 | saturn.z = -4.03523417114321381e-01 42 | saturn.vx = -2.76742510726862411e-03 * DAYS_PER_YEAR 43 | saturn.vy = 4.99852801234917238e-03 * DAYS_PER_YEAR 44 | saturn.vz = 2.30417297573763929e-05 * DAYS_PER_YEAR 45 | saturn.mass = 2.85885980666130812e-04 * SOLAR_MASS 46 | uranus.x = 1.28943695621391310e+01 47 | uranus.y = -1.51111514016986312e+01 48 | uranus.z = -2.23307578892655734e-01 49 | uranus.vx = 2.96460137564761618e-03 * DAYS_PER_YEAR 50 | uranus.vy = 2.37847173959480950e-03 * DAYS_PER_YEAR 51 | uranus.vz = -2.96589568540237556e-05 * DAYS_PER_YEAR 52 | uranus.mass = 4.36624404335156298e-05 * SOLAR_MASS 53 | neptune.x = 1.53796971148509165e+01 54 | neptune.y = -2.59193146099879641e+01 55 | neptune.z = 1.79258772950371181e-01 56 | neptune.vx = 2.68067772490389322e-03 * DAYS_PER_YEAR 57 | neptune.vy = 1.62824170038242295e-03 * DAYS_PER_YEAR 58 | neptune.vz = -9.51592254519715870e-05 * DAYS_PER_YEAR 59 | neptune.mass = 5.15138902046611451e-05 * SOLAR_MASS 60 | 61 | bodies = {sun,jupiter,saturn,uranus,neptune} 62 | end 63 | 64 | local function advance(bodies, nbody, dt) 65 | for i=1,nbody do 66 | local bi = bodies[i] 67 | local bix, biy, biz, bimass = bi.x, bi.y, bi.z, bi.mass 68 | local bivx, bivy, bivz = bi.vx, bi.vy, bi.vz 69 | for j=i+1,nbody do 70 | local bj = bodies[j] 71 | local dx, dy, dz = bix-bj.x, biy-bj.y, biz-bj.z 72 | local dist2 = dx*dx + dy*dy + dz*dz 73 | local mag = sqrt(dist2) 74 | mag = dt / (mag * dist2) 75 | local bm = bj.mass*mag 76 | bivx = bivx - (dx * bm) 77 | bivy = bivy - (dy * bm) 78 | bivz = bivz - (dz * bm) 79 | bm = bimass*mag 80 | bj.vx = bj.vx + (dx * bm) 81 | bj.vy = bj.vy + (dy * bm) 82 | bj.vz = bj.vz + (dz * bm) 83 | end 84 | bi.vx = bivx 85 | bi.vy = bivy 86 | bi.vz = bivz 87 | bi.x = bix + dt * bivx 88 | bi.y = biy + dt * bivy 89 | bi.z = biz + dt * bivz 90 | end 91 | end 92 | 93 | local function energy(bodies, nbody) 94 | local e = 0 95 | for i=1,nbody do 96 | local bi = bodies[i] 97 | local vx, vy, vz, bim = bi.vx, bi.vy, bi.vz, bi.mass 98 | e = e + (0.5 * bim * (vx*vx + vy*vy + vz*vz)) 99 | for j=i+1,nbody do 100 | local bj = bodies[j] 101 | local dx, dy, dz = bi.x-bj.x, bi.y-bj.y, bi.z-bj.z 102 | local distance = sqrt(dx*dx + dy*dy + dz*dz) 103 | e = e - ((bim * bj.mass) / distance) 104 | end 105 | end 106 | return e 107 | end 108 | 109 | local function offsetMomentum(b, nbody) 110 | local px, py, pz = 0, 0, 0 111 | for i=1,nbody do 112 | local bi = b[i] 113 | local bim = bi.mass 114 | px = px + (bi.vx * bim) 115 | py = py + (bi.vy * bim) 116 | pz = pz + (bi.vz * bim) 117 | end 118 | b[1].vx = -px / SOLAR_MASS 119 | b[1].vy = -py / SOLAR_MASS 120 | b[1].vz = -pz / SOLAR_MASS 121 | end 122 | 123 | local function inner_iter(N) 124 | local checksum = 0 125 | setup_state() 126 | local nbody = #bodies 127 | 128 | offsetMomentum(bodies, nbody) 129 | checksum = checksum + energy(bodies, nbody) 130 | for i=1,N do advance(bodies, nbody, 0.01) end 131 | checksum = checksum + energy(bodies, nbody) 132 | 133 | if math.abs(checksum - EXPECT_CHECKSUM) >= EPSILON then 134 | print("bad checksum: " .. checksum .. " vs " .. EXPECT_CHECKSUM) 135 | os.exit(1) 136 | end 137 | end 138 | 139 | function run_iter(n) 140 | for i=1, n do 141 | inner_iter(N_ADVANCES) 142 | end 143 | end 144 | -------------------------------------------------------------------------------- /examples/benchmarks/nbody/python/bench.py: -------------------------------------------------------------------------------- 1 | # The Computer Language Benchmarks Game 2 | # http://shootout.alioth.debian.org/ 3 | # 4 | # originally by Kevin Carson 5 | # modified by Tupteq, Fredrik Johansson, and Daniel Nanz 6 | # modified by Maciej Fijalkowski 7 | 8 | import sys 9 | 10 | EXPECT_CHECKSUM = -0.3381550232201908645635057837353087961673736572265625 11 | N_ADVANCES = 100000 12 | EPSILON = 0.0000000000001 13 | 14 | 15 | def combinations(l): 16 | result = [] 17 | for x in xrange(len(l) - 1): 18 | ls = l[x+1:] 19 | for y in ls: 20 | result.append((l[x],y)) 21 | return result 22 | 23 | PI = 3.141592653589793 24 | SOLAR_MASS = 4 * PI * PI 25 | DAYS_PER_YEAR = 365.24 26 | 27 | def setup_state(): 28 | 29 | bodies = { 30 | 'sun': ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0], SOLAR_MASS), 31 | 32 | 'jupiter': ([4.84143144246472090e+00, 33 | -1.16032004402742839e+00, 34 | -1.03622044471123109e-01], 35 | [1.66007664274403694e-03 * DAYS_PER_YEAR, 36 | 7.69901118419740425e-03 * DAYS_PER_YEAR, 37 | -6.90460016972063023e-05 * DAYS_PER_YEAR], 38 | 9.54791938424326609e-04 * SOLAR_MASS), 39 | 40 | 'saturn': ([8.34336671824457987e+00, 41 | 4.12479856412430479e+00, 42 | -4.03523417114321381e-01], 43 | [-2.76742510726862411e-03 * DAYS_PER_YEAR, 44 | 4.99852801234917238e-03 * DAYS_PER_YEAR, 45 | 2.30417297573763929e-05 * DAYS_PER_YEAR], 46 | 2.85885980666130812e-04 * SOLAR_MASS), 47 | 48 | 'uranus': ([1.28943695621391310e+01, 49 | -1.51111514016986312e+01, 50 | -2.23307578892655734e-01], 51 | [2.96460137564761618e-03 * DAYS_PER_YEAR, 52 | 2.37847173959480950e-03 * DAYS_PER_YEAR, 53 | -2.96589568540237556e-05 * DAYS_PER_YEAR], 54 | 4.36624404335156298e-05 * SOLAR_MASS), 55 | 56 | 'neptune': ([1.53796971148509165e+01, 57 | -2.59193146099879641e+01, 58 | 1.79258772950371181e-01], 59 | [2.68067772490389322e-03 * DAYS_PER_YEAR, 60 | 1.62824170038242295e-03 * DAYS_PER_YEAR, 61 | -9.51592254519715870e-05 * DAYS_PER_YEAR], 62 | 5.15138902046611451e-05 * SOLAR_MASS) } 63 | 64 | 65 | system = bodies.values() 66 | pairs = combinations(system) 67 | 68 | return system, bodies, pairs 69 | 70 | 71 | def advance(dt, n, bodies, pairs): 72 | 73 | for i in xrange(n): 74 | for (([x1, y1, z1], v1, m1), 75 | ([x2, y2, z2], v2, m2)) in pairs: 76 | dx = x1 - x2 77 | dy = y1 - y2 78 | dz = z1 - z2 79 | mag = dt * ((dx * dx + dy * dy + dz * dz) ** (-1.5)) 80 | b1m = m1 * mag 81 | b2m = m2 * mag 82 | v1[0] -= dx * b2m 83 | v1[1] -= dy * b2m 84 | v1[2] -= dz * b2m 85 | v2[0] += dx * b1m 86 | v2[1] += dy * b1m 87 | v2[2] += dz * b1m 88 | for (r, [vx, vy, vz], m) in bodies: 89 | r[0] += dt * vx 90 | r[1] += dt * vy 91 | r[2] += dt * vz 92 | 93 | 94 | def report_energy(bodies, pairs, e=0.0): 95 | 96 | for (((x1, y1, z1), v1, m1), 97 | ((x2, y2, z2), v2, m2)) in pairs: 98 | dx = x1 - x2 99 | dy = y1 - y2 100 | dz = z1 - z2 101 | e -= (m1 * m2) / ((dx * dx + dy * dy + dz * dz) ** 0.5) 102 | for (r, [vx, vy, vz], m) in bodies: 103 | e += m * (vx * vx + vy * vy + vz * vz) / 2. 104 | return e 105 | 106 | def offset_momentum(ref, bodies, px=0.0, py=0.0, pz=0.0): 107 | 108 | for (r, [vx, vy, vz], m) in bodies: 109 | px -= vx * m 110 | py -= vy * m 111 | pz -= vz * m 112 | (r, v, m) = ref 113 | v[0] = px / m 114 | v[1] = py / m 115 | v[2] = pz / m 116 | 117 | def inner_iter(n, ref='sun'): 118 | checksum = 0 119 | system, bodies, pairs = setup_state() 120 | 121 | offset_momentum(bodies[ref], system) 122 | checksum += report_energy(system, pairs) 123 | advance(0.01, n, system, pairs) 124 | checksum += report_energy(system, pairs) 125 | 126 | if abs(checksum - EXPECT_CHECKSUM) >= EPSILON: 127 | print("bad checksum: %f vs %f" % (checksum, EXPECT_CHECKSUM)) 128 | sys.exit(1) 129 | 130 | def run_iter(n): 131 | for i in xrange(n): 132 | inner_iter(N_ADVANCES) 133 | -------------------------------------------------------------------------------- /examples/example.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import (PyPyVMDef, LuaVMDef, NativeCodeVMDef) 3 | from krun import EntryPoint 4 | from krun.util import fatal 5 | from distutils.spawn import find_executable 6 | 7 | # For a real experiment you would certainly use absolute paths 8 | PYPY_BIN = find_executable("pypy") 9 | if PYPY_BIN is None: 10 | fatal("pypy binary not found in path") 11 | 12 | LUAJIT_BIN = find_executable("luajit") 13 | if LUAJIT_BIN is None: 14 | fatal("luajit binary not found in path") 15 | 16 | EXECUTION_TIMEOUT = 60 # time allowance for each process execution in seconds. 17 | 18 | # Who to mail 19 | MAIL_TO = [] 20 | 21 | # Maximum number of error emails to send per-run 22 | #MAX_MAILS = 2 23 | 24 | DIR = os.getcwd() 25 | JKRUNTIME_DIR = os.path.join(DIR, "krun", "libkruntime", "") 26 | 27 | HEAP_LIMIT = 2097152 # KiB 28 | STACK_LIMIT = 8192 # KiB 29 | 30 | # Variant name -> EntryPoint 31 | VARIANTS = { 32 | "default-c": EntryPoint("bench.so", subdir="c"), 33 | "default-python": EntryPoint("bench.py", subdir="python"), 34 | "default-lua": EntryPoint("bench.lua", subdir="lua"), 35 | } 36 | 37 | ITERATIONS_ALL_VMS = 5 # Small number for testing. 38 | 39 | VMS = { 40 | 'C': { 41 | 'vm_def': NativeCodeVMDef(), 42 | 'variants': ['default-c'], 43 | 'n_iterations': ITERATIONS_ALL_VMS, 44 | }, 45 | 'PyPy': { 46 | 'vm_def': PyPyVMDef(PYPY_BIN), 47 | 'variants': ['default-python'], 48 | 'n_iterations': ITERATIONS_ALL_VMS, 49 | }, 50 | 'LuaJIT': { 51 | 'vm_def': LuaVMDef(LUAJIT_BIN), 52 | 'variants': ['default-lua'], 53 | 'n_iterations': ITERATIONS_ALL_VMS, 54 | }, 55 | } 56 | 57 | 58 | BENCHMARKS = { 59 | 'fannkuch_redux': 100, 60 | 'nbody': 15, 61 | } 62 | 63 | # list of "bench:vm:variant" 64 | SKIP = [ 65 | #"*:C:*", 66 | #"*:PyPy:*", 67 | #"*:LuaJIT:*", 68 | ] 69 | 70 | N_EXECUTIONS = 2 # Number of fresh processes. 71 | 72 | # No. of seconds to wait before taking the initial temperature reading. 73 | # You should set this high enough for the system to cool down a bit. 74 | # The default (if omitted) is 60 seconds. 75 | TEMP_READ_PAUSE = 1 76 | 77 | # Commands to run before and after each process execution 78 | # 79 | # Environment available for these commands: 80 | # KRUN_RESULTS_FILE: path to results file. 81 | # KRUN_LOG_FILE: path to log file. 82 | # KRUN_ETA_DATUM: time the ETA was computed 83 | # KRUN_ETA_VALUE: estimated time of completion 84 | #PRE_EXECUTION_CMDS = ["sudo service cron stop"] 85 | #POST_EXECUTION_CMDS = ["sudo service cron start"] 86 | 87 | # CPU pinning (off by default) 88 | #ENABLE_PINNING = False 89 | 90 | # Lower and upper bound for acceptable APERF/MPERF ratios 91 | AMPERF_RATIO_BOUNDS = 0.995, 1.005 92 | 93 | # Rough rate of change in APERF per second above which a core is considered busy. 94 | # For many machines this is simply the base clock frequency, but generally 95 | # speaking, is undefined, so you need to check on a per-machine basis. 96 | AMPERF_BUSY_THRESHOLD = 3.4 * 1000 * 1000 * 1000 / 1000 # 3.4 GHz / 1000 97 | -------------------------------------------------------------------------------- /examples/ext.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import ExternalSuiteVMDef 3 | 4 | # Who to mail 5 | MAIL_TO = [] 6 | 7 | # Maximum number of error emails to send per-run 8 | #MAX_MAILS = 2 9 | 10 | DIR = os.getcwd() 11 | 12 | HEAP_LIMIT = 2097152 # KiB 13 | STACK_LIMIT = 8192 # KiB 14 | 15 | # Variant name -> EntryPoint 16 | VARIANTS = { 17 | # Normally you'd have an EntryPoint on the right-hand side, but there's no 18 | # such notion for the ExternalSuiteVMDef. Just pass None. 19 | "default-ext": None, 20 | } 21 | 22 | ITERATIONS_ALL_VMS = 5 # Small number for testing. 23 | 24 | VMS = { 25 | 'Ext': { 26 | 'vm_def': ExternalSuiteVMDef(os.path.join(DIR, "ext_script.py")), 27 | 'variants': ['default-ext'], 28 | 'n_iterations': ITERATIONS_ALL_VMS, 29 | }, 30 | } 31 | 32 | BENCHMARKS = { 33 | 'fannkuch_redux': 100, 34 | 'nbody': 15, 35 | } 36 | 37 | # list of "bench:vm:variant" 38 | SKIP = [] 39 | 40 | N_EXECUTIONS = 2 # Number of fresh processes. 41 | 42 | # No. of seconds to wait before taking the initial temperature reading. 43 | # You should set this high enough for the system to cool down a bit. 44 | # The default (if omitted) is 60 seconds. 45 | TEMP_READ_PAUSE = 1 46 | 47 | # Commands to run before and after each process execution 48 | # 49 | # Environment available for these commands: 50 | # KRUN_RESULTS_FILE: path to results file. 51 | # KRUN_LOG_FILE: path to log file. 52 | # KRUN_ETA_DATUM: time the ETA was computed 53 | # KRUN_ETA_VALUE: estimated time of completion 54 | #PRE_EXECUTION_CMDS = ["sudo service cron stop"] 55 | #POST_EXECUTION_CMDS = ["sudo service cron start"] 56 | 57 | # CPU pinning (off by default) 58 | #ENABLE_PINNING = False 59 | -------------------------------------------------------------------------------- /examples/ext_script.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2.7 2 | # A dummy external script, demonstrating how the ExternalSuiteVMDef works. 3 | # 4 | # This script is called once for each process execution. 5 | 6 | import sys 7 | import json 8 | 9 | _, benchmark, iters, param, instr = sys.argv 10 | iters = int(iters) 11 | 12 | # 13 | 14 | # Then emit your results to stdout in the following format: 15 | js = { 16 | "wallclock_times": list(range(iters)), # dummy results. 17 | # ExternalSuiteVMDef doesn't support the following fields. 18 | "core_cycle_counts": [], 19 | "aperf_counts": [], 20 | "mperf_counts": [], 21 | } 22 | 23 | sys.stdout.write("%s\n" % json.dumps(js)) 24 | -------------------------------------------------------------------------------- /examples/java.krun: -------------------------------------------------------------------------------- 1 | execfile("example.krun", globals()) 2 | 3 | from krun.vm_defs import JavaVMDef 4 | 5 | JAVA_BIN = find_executable("java") 6 | if JAVA_BIN is None: 7 | fatal("Java binary not found in path") 8 | 9 | VARIANTS["default-java"] = EntryPoint("KrunEntry", subdir="java") 10 | 11 | VMS['Java'] = { 12 | 'vm_def': JavaVMDef(JAVA_BIN), 13 | 'variants': ['default-java'], 14 | 'n_iterations': ITERATIONS_ALL_VMS, 15 | } 16 | -------------------------------------------------------------------------------- /examples/travis.krun: -------------------------------------------------------------------------------- 1 | execfile("java.krun", globals()) 2 | 3 | N_EXECUTIONS = 1 # Keep travis-ci runs short 4 | -------------------------------------------------------------------------------- /iterations_runners/Makefile: -------------------------------------------------------------------------------- 1 | C_ITER_RUNNER_CFLAGS = -fPIC -Wall -Wextra -pedantic -std=gnu99 2 | 3 | ifeq ($(shell uname -s),Linux) 4 | C_ITER_RUNNER_LDFLAGS = -ldl -lrt 5 | else 6 | C_ITER_RUNNER_LDFLAGS = 7 | endif 8 | 9 | .PHONY: clean 10 | 11 | all: iterations_runner_c IterationsRunner.class BaseKrunEntry.class 12 | 13 | IterationsRunner.class BaseKrunEntry.class: iterations_runner.java 14 | if [ "${ENABLE_JAVA}" = "1" ]; then \ 15 | ${JAVAC} iterations_runner.java; \ 16 | fi 17 | 18 | iterations_runner_c: iterations_runner.c 19 | echo ${C_ITER_RUNNER_LDFLAGS} 20 | ${CC} ${C_ITER_RUNNER_CFLAGS} ${CFLAGS} -L`pwd`/../libkrun \ 21 | ${CPPFLAGS} iterations_runner.c \ 22 | -o iterations_runner_c -lkruntime ${C_ITER_RUNNER_LDFLAGS} \ 23 | ${LDFLAGS} 24 | 25 | clean: 26 | rm -f BaseKrunEntry.class IterationsRunner.class 27 | rm -f iterations_runner_c 28 | -------------------------------------------------------------------------------- /iterations_runners/iterations_runner.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Iterations runner for C benchmarks. 3 | * 4 | * Code style here is KNF, but with 4 spaces instead of tabs. 5 | */ 6 | 7 | /* To correctly expose asprintf() on Linux */ 8 | #define _GNU_SOURCE 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | #include "../libkrun/libkruntime.h" 21 | 22 | #define BENCH_FUNC_NAME "run_iter" 23 | 24 | // Private protos 25 | int convert_str_to_int(char *s); 26 | void emit_per_core_data(char *name, int num_cores, int num_iters, uint64_t **data); 27 | 28 | void 29 | emit_per_core_data(char *name, int num_cores, int num_iters, uint64_t **data) 30 | { 31 | int core, iter_num; 32 | 33 | fprintf(stdout, "\"%s\": [", name); 34 | for (core = 0; core < num_cores; core++) { 35 | fprintf(stdout, "["); 36 | 37 | for (iter_num = 0; iter_num < num_iters; iter_num++) { 38 | fprintf(stdout, "%" PRIu64, data[core][iter_num]); 39 | 40 | if (iter_num < num_iters - 1) { 41 | fprintf(stdout, ", "); 42 | } 43 | } 44 | 45 | fprintf(stdout, "]"); 46 | if (core < num_cores - 1) { 47 | fprintf(stdout, ", "); 48 | } 49 | } 50 | fprintf(stdout, "]"); 51 | } 52 | 53 | int 54 | convert_str_to_int(char *s) 55 | { 56 | char *endptr; 57 | long r; 58 | 59 | errno = 0; /* errno not set to 0 on success */ 60 | r = strtol(s, &endptr, 10); 61 | 62 | if ((errno != 0) || (*endptr != 0)) { 63 | perror("strtoll"); 64 | exit(EXIT_FAILURE); 65 | } 66 | 67 | if ((r > INT_MAX) || (r < INT_MIN)) { 68 | fprintf(stderr, "Number would be truncated! %ld\n", r); 69 | exit (EXIT_FAILURE); 70 | } 71 | 72 | return ((int) r); 73 | } 74 | 75 | void 76 | usage() { 77 | printf("usage: iterations_runner_c <# of iterations> " 78 | "\n [instrumentation dir] " 79 | "[key] [key pexec index]\n\n"); 80 | printf("Arguments in [] are for instrumentation mode only.\n"); 81 | exit(EXIT_FAILURE); 82 | } 83 | 84 | int 85 | main(int argc, char **argv) 86 | { 87 | char *krun_benchmark = 0; 88 | int krun_total_iters = 0, krun_param = 0, krun_iter_num = 0; 89 | int krun_debug = 0, krun_num_cores = 0, krun_core, krun_instrument = 0; 90 | void *krun_dl_handle = 0; 91 | int (*krun_bench_func)(int); /* func ptr to benchmark entry */ 92 | double *krun_wallclock_times = NULL; 93 | uint64_t **krun_cycle_counts = NULL, **krun_aperf_counts = NULL; 94 | uint64_t **krun_mperf_counts = NULL; 95 | 96 | if (argc < 5) { 97 | usage(); 98 | } 99 | 100 | krun_benchmark = argv[1]; 101 | krun_total_iters = convert_str_to_int(argv[2]); 102 | krun_param = convert_str_to_int(argv[3]); 103 | krun_debug = convert_str_to_int(argv[4]); 104 | krun_instrument = argc >= 6; 105 | 106 | if (krun_instrument && (argc != 8)) { 107 | usage(); 108 | } 109 | 110 | krun_init(); 111 | krun_num_cores = krun_get_num_cores(); 112 | 113 | krun_dl_handle = dlopen(krun_benchmark, RTLD_NOW | RTLD_LOCAL); 114 | if (krun_dl_handle == NULL) { 115 | errx(EXIT_FAILURE, "%s", dlerror()); 116 | goto clean; 117 | } 118 | 119 | /* Odd pointer gymnastics are intentional. See Linux dlopen manual */ 120 | *(void **) (&krun_bench_func) = dlsym(krun_dl_handle, BENCH_FUNC_NAME); 121 | if (krun_bench_func == NULL) { 122 | errx(EXIT_FAILURE, "%s", dlerror()); 123 | goto clean; 124 | } 125 | 126 | /* Allocate arrays */ 127 | krun_wallclock_times = krun_xcalloc(krun_total_iters, sizeof(double)); 128 | krun_cycle_counts = krun_xcalloc(krun_num_cores, sizeof(uint64_t *)); 129 | krun_aperf_counts = krun_xcalloc(krun_num_cores, sizeof(uint64_t *)); 130 | krun_mperf_counts = krun_xcalloc(krun_num_cores, sizeof(uint64_t *)); 131 | for (krun_core = 0; krun_core < krun_num_cores; krun_core++) { 132 | krun_cycle_counts[krun_core] = 133 | krun_xcalloc(krun_total_iters, sizeof(uint64_t)); 134 | krun_aperf_counts[krun_core] = 135 | krun_xcalloc(krun_total_iters, sizeof(uint64_t)); 136 | krun_mperf_counts[krun_core] = 137 | krun_xcalloc(krun_total_iters, sizeof(uint64_t)); 138 | } 139 | 140 | /* Set default values */ 141 | for (krun_iter_num = 0; krun_iter_num < krun_total_iters; 142 | krun_iter_num++) { 143 | for (krun_core = 0; krun_core < krun_num_cores; krun_core++) { 144 | krun_cycle_counts[krun_core][krun_iter_num] = 0; 145 | krun_aperf_counts[krun_core][krun_iter_num] = 0; 146 | krun_mperf_counts[krun_core][krun_iter_num] = 0; 147 | } 148 | krun_wallclock_times[krun_iter_num] = 0; 149 | } 150 | 151 | /* Main loop */ 152 | for (krun_iter_num = 0; krun_iter_num < krun_total_iters; 153 | krun_iter_num++) { 154 | 155 | if (krun_debug > 0) { 156 | fprintf(stderr, "[iterations_runner.c] iteration %d/%d\n", 157 | krun_iter_num + 1, krun_total_iters); 158 | } 159 | 160 | /* Start timed section */ 161 | krun_measure(0); 162 | (void) (*krun_bench_func)(krun_param); 163 | krun_measure(1); 164 | /* End timed section */ 165 | 166 | /* Extract and store wallclock data from libkruntime */ 167 | krun_wallclock_times[krun_iter_num] = 168 | krun_get_wallclock(1) - krun_get_wallclock(0); 169 | 170 | /* Same for per-core measurements */ 171 | for (krun_core = 0; krun_core < krun_num_cores; krun_core++ ) { 172 | krun_cycle_counts[krun_core][krun_iter_num] = 173 | krun_get_core_cycles(1, krun_core) - 174 | krun_get_core_cycles(0, krun_core); 175 | krun_aperf_counts[krun_core][krun_iter_num] = 176 | krun_get_aperf(1, krun_core) - krun_get_aperf(0, krun_core); 177 | krun_mperf_counts[krun_core][krun_iter_num] = 178 | krun_get_mperf(1, krun_core) - krun_get_mperf(0, krun_core); 179 | } 180 | } 181 | 182 | /* Emit results */ 183 | fprintf(stdout, "{ \"wallclock_times\": ["); 184 | for (krun_iter_num = 0; krun_iter_num < krun_total_iters; 185 | krun_iter_num++) { 186 | fprintf(stdout, "%f", krun_wallclock_times[krun_iter_num]); 187 | 188 | if (krun_iter_num < krun_total_iters - 1) { 189 | fprintf(stdout, ", "); 190 | } 191 | } 192 | fprintf(stdout, "], "); 193 | 194 | emit_per_core_data("core_cycle_counts", krun_num_cores, krun_total_iters, 195 | krun_cycle_counts); 196 | fprintf(stdout, ", "); 197 | 198 | emit_per_core_data("aperf_counts", krun_num_cores, krun_total_iters, 199 | krun_aperf_counts); 200 | fprintf(stdout, ", "); 201 | 202 | emit_per_core_data("mperf_counts", krun_num_cores, krun_total_iters, 203 | krun_mperf_counts); 204 | 205 | fprintf(stdout, "}\n"); 206 | 207 | clean: 208 | /* Free up allocations */ 209 | for (krun_core = 0; krun_core < krun_num_cores; krun_core++) { 210 | free(krun_cycle_counts[krun_core]); 211 | free(krun_aperf_counts[krun_core]); 212 | free(krun_mperf_counts[krun_core]); 213 | } 214 | free(krun_wallclock_times); 215 | free(krun_cycle_counts); 216 | free(krun_aperf_counts); 217 | free(krun_mperf_counts); 218 | 219 | if (krun_dl_handle != NULL) { 220 | dlclose(krun_dl_handle); 221 | } 222 | 223 | krun_done(); 224 | 225 | return (EXIT_SUCCESS); 226 | } 227 | -------------------------------------------------------------------------------- /iterations_runners/iterations_runner.js: -------------------------------------------------------------------------------- 1 | // NOTE: JS VM will need to be patched to allow access to: 2 | // 3 | // krun_init() 4 | // krun_done() 5 | // krun_measure() 6 | // krun_get_num_cores() 7 | // krun_get_{core_cycles,aperf,mperf}_double() 8 | // krun_get_wallclock() 9 | 10 | function emitPerCoreResults(name, num_cores, ary) { 11 | write('"' + name + '": [') 12 | for (core = 0; core < num_cores; core++) { 13 | write("[") 14 | for (BM_i = 0; BM_i < BM_n_iters; BM_i++) { 15 | write(ary[core][BM_i]); 16 | 17 | if (BM_i < BM_n_iters - 1) { 18 | write(", ") 19 | } 20 | } 21 | write("]") 22 | if (core < num_cores - 1) { 23 | write(", ") 24 | } 25 | } 26 | write("]") 27 | } 28 | 29 | function usage() { 30 | throw "\nusage: iterations_runner.js <# of iterations> " + 31 | "\n [instrumentation dir] [key] " + 32 | "[key pexec index]\n\nArguments in [] are for" + 33 | "instrumentation mode only.\n"; 34 | } 35 | 36 | if (this.arguments.length < 4) { 37 | usage(); 38 | } 39 | 40 | var BM_entry_point = this.arguments[0]; 41 | var BM_n_iters = parseInt(this.arguments[1]); 42 | var BM_param = parseInt(this.arguments[2]); 43 | var BM_debug = parseInt(this.arguments[3]) > 0; 44 | var BM_instrument = this.arguments.length >= 5; 45 | 46 | if (BM_instrument && (this.arguments.length != 7)) { 47 | usage(); 48 | } 49 | 50 | load(BM_entry_point); 51 | 52 | krun_init(); 53 | var BM_num_cores = krun_get_num_cores(); 54 | 55 | // Pre-allocate and fill arrays. 56 | // We use typed arrays to encourage type stability. 57 | var BM_wallclock_times = new Float64Array(BM_n_iters); 58 | BM_wallclock_times.fill(-0.0); 59 | 60 | var BM_cycle_counts = new Array(BM_num_cores); 61 | var BM_aperf_counts = new Array(BM_num_cores); 62 | var BM_mperf_counts = new Array(BM_num_cores); 63 | 64 | for (BM_core = 0; BM_core < BM_num_cores; BM_core++) { 65 | BM_cycle_counts[BM_core] = new Float64Array(BM_n_iters); 66 | BM_aperf_counts[BM_core] = new Float64Array(BM_n_iters); 67 | BM_mperf_counts[BM_core] = new Float64Array(BM_n_iters); 68 | 69 | BM_cycle_counts[BM_core].fill(-0.0); 70 | BM_aperf_counts[BM_core].fill(-0.0); 71 | BM_mperf_counts[BM_core].fill(-0.0); 72 | } 73 | 74 | // Main loop 75 | for (BM_i = 0; BM_i < BM_n_iters; BM_i++) { 76 | if (BM_debug) { 77 | printErr("[iterations_runner.js] iteration " + (BM_i + 1) + "/" + BM_n_iters); 78 | } 79 | 80 | // Start timed section 81 | krun_measure(0); 82 | run_iter(BM_param); 83 | krun_measure(1); 84 | // End timed section 85 | 86 | // Compute deltas 87 | BM_wallclock_times[BM_i] = krun_get_wallclock(1) - krun_get_wallclock(0); 88 | 89 | for (BM_core = 0; BM_core < BM_num_cores; BM_core++) { 90 | BM_cycle_counts[BM_core][BM_i] = 91 | krun_get_core_cycles_double(1, BM_core) - 92 | krun_get_core_cycles_double(0, BM_core); 93 | BM_aperf_counts[BM_core][BM_i] = 94 | krun_get_aperf_double(1, BM_core) - 95 | krun_get_aperf_double(0, BM_core); 96 | BM_mperf_counts[BM_core][BM_i] = 97 | krun_get_mperf_double(1, BM_core) - 98 | krun_get_mperf_double(0, BM_core); 99 | } 100 | } 101 | 102 | krun_done(); 103 | 104 | // Emit measurements 105 | write("{") 106 | 107 | write('"wallclock_times": [') 108 | for (BM_i = 0; BM_i < BM_n_iters; BM_i++) { 109 | write(BM_wallclock_times[BM_i]); 110 | 111 | if (BM_i < BM_n_iters - 1) { 112 | write(", ") 113 | } 114 | } 115 | write("], ") 116 | 117 | emitPerCoreResults("core_cycle_counts", BM_num_cores, BM_cycle_counts) 118 | write(", ") 119 | emitPerCoreResults("aperf_counts", BM_num_cores, BM_aperf_counts) 120 | write(", ") 121 | emitPerCoreResults("mperf_counts", BM_num_cores, BM_mperf_counts) 122 | 123 | write("}") 124 | -------------------------------------------------------------------------------- /iterations_runners/iterations_runner.lua: -------------------------------------------------------------------------------- 1 | local ffi = require("ffi") 2 | 3 | function emit_per_core_measurements(name, num_cores, tbl, tbl_len) 4 | io.stdout:write(string.format('"%s": [', name)) 5 | 6 | for BM_core = 1, num_cores, 1 do 7 | io.stdout:write("[") 8 | for BM_i = 1, tbl_len, 1 do 9 | io.stdout:write(tbl[BM_core][BM_i]) 10 | if BM_i < tbl_len then 11 | io.stdout:write(", ") 12 | end 13 | end 14 | io.stdout:write("]") 15 | if BM_core < num_cores then 16 | io.stdout:write(", ") 17 | end 18 | end 19 | io.stdout:write("]") 20 | end 21 | 22 | function usage() 23 | io.stderr:write("usage: iterations_runner.lua " .. 24 | "<# of iterations> \n " .. 25 | " [instrumentation dir] [key] " .. 26 | "[key pexec index]\n\n") 27 | io.stderr:write("Arguments in [] are for instrumentation mode only.\n") 28 | os.exit(1) 29 | end 30 | 31 | ffi.cdef[[ 32 | void krun_init(void); 33 | void krun_done(void); 34 | void krun_measure(int); 35 | int krun_get_num_cores(void); 36 | double krun_get_wallclock(int); 37 | double krun_get_core_cycles_double(int, int); 38 | double krun_get_aperf_double(int, int); 39 | double krun_get_mperf_double(int, int); 40 | ]] 41 | local libkruntime = ffi.load("kruntime") 42 | 43 | local krun_init = libkruntime.krun_init 44 | local krun_measure = libkruntime.krun_measure 45 | local krun_get_num_cores = libkruntime.krun_get_num_cores 46 | local krun_get_wallclock = libkruntime.krun_get_wallclock 47 | local krun_get_core_cycles_double = libkruntime.krun_get_core_cycles_double 48 | local krun_get_aperf_double = libkruntime.krun_get_aperf_double 49 | local krun_get_mperf_double = libkruntime.krun_get_mperf_double 50 | 51 | if #arg < 4 then 52 | usage() 53 | end 54 | 55 | local BM_benchmark = arg[1] 56 | local BM_iters = tonumber(arg[2]) 57 | local BM_param = tonumber(arg[3]) 58 | local BM_debug = tonumber(arg[4]) > 0 59 | local BM_instrument = #arg >= 5 60 | 61 | if BM_instrument and #arg ~= 7 then 62 | usage() 63 | end 64 | 65 | dofile(BM_benchmark) 66 | 67 | krun_init() 68 | local BM_num_cores = krun_get_num_cores() 69 | 70 | -- Pre-allocate and fill results tables. 71 | -- There doesn't appear to be a way to allocate the array all at once in Lua. 72 | local BM_wallclock_times = {} 73 | for BM_i = 1, BM_iters, 1 do 74 | BM_wallclock_times[BM_i] = -0.0 75 | end 76 | 77 | local BM_cycle_counts = {} 78 | for BM_core = 1, BM_num_cores, 1 do 79 | BM_cycle_counts[BM_core] = {} 80 | for BM_i = 1, BM_iters, 1 do 81 | BM_cycle_counts[BM_core][BM_i] = -0.0 82 | end 83 | end 84 | 85 | local BM_aperf_counts = {} 86 | for BM_core = 1, BM_num_cores, 1 do 87 | BM_aperf_counts[BM_core] = {} 88 | for BM_i = 1, BM_iters, 1 do 89 | BM_aperf_counts[BM_core][BM_i] = -0.0 90 | end 91 | end 92 | 93 | local BM_mperf_counts = {} 94 | for BM_core = 1, BM_num_cores, 1 do 95 | BM_mperf_counts[BM_core] = {} 96 | for BM_i = 1, BM_iters, 1 do 97 | BM_mperf_counts[BM_core][BM_i] = -0.0 98 | end 99 | end 100 | 101 | -- Main loop 102 | for BM_i = 1, BM_iters, 1 do 103 | if BM_debug then 104 | io.stderr:write(string.format("[iterations_runner.lua] iteration %d/%d\n", BM_i, BM_iters)) 105 | end 106 | 107 | -- Start timed section 108 | krun_measure(0); 109 | run_iter(BM_param) 110 | krun_measure(1); 111 | -- End timed section 112 | 113 | -- Compute deltas 114 | BM_wallclock_times[BM_i] = krun_get_wallclock(1) - krun_get_wallclock(0); 115 | 116 | for BM_core = 1, BM_num_cores, 1 do 117 | BM_cycle_counts[BM_core][BM_i] = 118 | krun_get_core_cycles_double(1, BM_core - 1) - 119 | krun_get_core_cycles_double(0, BM_core - 1) 120 | BM_aperf_counts[BM_core][BM_i] = 121 | krun_get_aperf_double(1, BM_core - 1) - 122 | krun_get_aperf_double(0, BM_core - 1) 123 | BM_mperf_counts[BM_core][BM_i] = 124 | krun_get_mperf_double(1, BM_core - 1) - 125 | krun_get_mperf_double(0, BM_core - 1) 126 | end 127 | end 128 | 129 | -- In LuaJIT, FFI functions are cdata values that are unable to reference any other object owned by 130 | -- the garbage collector. Calling an FFI function which has been cached on the stack (as we've done above) 131 | -- might fail because the parent FFI clib object may have been GC'd . By explicitly accessing krun_done via libkruntime 132 | -- here we guarantee libkruntime to live until this point. 133 | libkruntime.krun_done() 134 | 135 | io.stdout:write("{") 136 | 137 | io.stdout:write('"wallclock_times": [') 138 | for BM_i = 1, BM_iters, 1 do 139 | io.stdout:write(BM_wallclock_times[BM_i]) 140 | if BM_i < BM_iters then 141 | io.stdout:write(", ") 142 | end 143 | end 144 | io.stdout:write("], ") 145 | 146 | emit_per_core_measurements("core_cycle_counts", BM_num_cores, BM_cycle_counts, BM_iters) 147 | io.stdout:write(", ") 148 | emit_per_core_measurements("aperf_counts", BM_num_cores, BM_aperf_counts, BM_iters) 149 | io.stdout:write(", ") 150 | emit_per_core_measurements("mperf_counts", BM_num_cores, BM_mperf_counts, BM_iters) 151 | 152 | io.stdout:write("}\n") 153 | -------------------------------------------------------------------------------- /iterations_runners/iterations_runner.php: -------------------------------------------------------------------------------- 1 | <# of iterations> " . 14 | "\n [instrumentation dir] " . 15 | "[key] [key pexec index]>\n\n"); 16 | fwrite(STDERR, "Arguments in [] are for instrumentation mode only.\n"); 17 | exit(1); 18 | } 19 | 20 | if ($argc < 5) { 21 | usage(); 22 | } 23 | 24 | $BM_benchmark = $argv[1]; 25 | $BM_iters = $argv[2]; 26 | $BM_param = (int) $argv[3]; 27 | $BM_debug = ((int) $argv[4]) > 0; 28 | $BM_instrument = $argc >= 6; 29 | 30 | if ($BM_instrument && ($argc != 8)) { 31 | usage(); 32 | } 33 | 34 | if (!file_exists($BM_benchmark)) { 35 | throw new RuntimeException("Can't find $BM_benchmark"); 36 | } 37 | 38 | include($BM_benchmark); 39 | 40 | // Find benchmark entry point 41 | if (!function_exists("run_iter")) { 42 | throw new RuntimeException("Benchmark is missing a 'run_iter' function"); 43 | } 44 | 45 | krun_init(); 46 | $BM_num_cores = krun_get_num_cores(); 47 | 48 | // Pre-allocate results arrays 49 | $BM_wallclock_times = array_fill(0, $BM_iters, -0.0); 50 | $BM_cycle_counts = array_fill(0, $BM_num_cores, array()); 51 | $BM_aperf_counts = array_fill(0, $BM_num_cores, array()); 52 | $BM_mperf_counts = array_fill(0, $BM_num_cores, array()); 53 | for ($BM_core = 0; $BM_core < $BM_num_cores; $BM_core++) { 54 | $BM_cycle_counts[$BM_core] = array_fill(0, $BM_iters, -0.0); 55 | $BM_aperf_counts[$BM_core] = array_fill(0, $BM_iters, -0.0); 56 | $BM_mperf_counts[$BM_core] = array_fill(0, $BM_iters, -0.0); 57 | } 58 | 59 | // Main loop 60 | for ($BM_i = 0; $BM_i < $BM_iters; $BM_i++) { 61 | if ($BM_debug) { 62 | fprintf(STDERR, "[iterations_runner.php] iteration %d/%d\n", $BM_i + 1, $BM_iters); 63 | } 64 | 65 | // Start timed section 66 | krun_measure(0); 67 | run_iter($BM_param); 68 | krun_measure(1); 69 | // End timed section 70 | 71 | // Compute deltas 72 | $BM_wallclock_times[$BM_i] = krun_get_wallclock(1) - krun_get_wallclock(0); 73 | 74 | for ($BM_core = 0; $BM_core < $BM_num_cores; $BM_core++) { 75 | $BM_cycle_counts[$BM_core][$BM_i] = 76 | krun_get_core_cycles_double(1, $BM_core) - 77 | krun_get_core_cycles_double(0, $BM_core); 78 | $BM_aperf_counts[$BM_core][$BM_i] = 79 | krun_get_aperf_double(1, $BM_core) - 80 | krun_get_aperf_double(0, $BM_core); 81 | $BM_mperf_counts[$BM_core][$BM_i] = 82 | krun_get_mperf_double(1, $BM_core) - 83 | krun_get_mperf_double(0, $BM_core); 84 | } 85 | } 86 | 87 | krun_done(); 88 | 89 | $BM_output = array( 90 | "wallclock_times" => $BM_wallclock_times, 91 | "core_cycle_counts" => $BM_cycle_counts, 92 | "aperf_counts" => $BM_aperf_counts, 93 | "mperf_counts" => $BM_mperf_counts 94 | ); 95 | 96 | echo json_encode($BM_output); 97 | 98 | ?> 99 | -------------------------------------------------------------------------------- /iterations_runners/iterations_runner.py: -------------------------------------------------------------------------------- 1 | """ 2 | Iterations runner for Python VMs. 3 | 4 | Executes a benchmark many times within a single process. 5 | 6 | usage: iterations_runner.py <# of iterations> 7 | [instrumentation dir] [key] [key pexec index] 8 | 9 | Arguments in [] are for instrumentation mode only.""" 10 | 11 | import array, cffi, sys, imp, os 12 | 13 | 14 | ffi = cffi.FFI() 15 | 16 | ffi.cdef(""" 17 | void krun_init(void); 18 | void krun_done(void); 19 | double krun_measure(int); 20 | uint64_t krun_get_num_cores(void); 21 | double krun_get_wallclock(int); 22 | uint64_t krun_get_core_cycles(int, int); 23 | uint64_t krun_get_aperf(int, int); 24 | uint64_t krun_get_mperf(int, int); 25 | """) 26 | libkruntime = ffi.dlopen("libkruntime.so") 27 | 28 | krun_init = libkruntime.krun_init 29 | krun_done = libkruntime.krun_done 30 | krun_measure = libkruntime.krun_measure 31 | krun_get_num_cores = libkruntime.krun_get_num_cores 32 | krun_get_wallclock = libkruntime.krun_get_wallclock 33 | krun_get_core_cycles = libkruntime.krun_get_core_cycles 34 | krun_get_aperf = libkruntime.krun_get_aperf 35 | krun_get_mperf = libkruntime.krun_get_mperf 36 | 37 | def usage(): 38 | print(__doc__) 39 | sys.exit(1) 40 | 41 | # main 42 | if __name__ == "__main__": 43 | num_args = len(sys.argv) 44 | if num_args < 5: 45 | usage() 46 | 47 | benchmark, iters, param, debug = sys.argv[1:5] 48 | iters, param, debug = int(iters), int(param), int(debug) == 1 49 | instrument = num_args >= 6 50 | 51 | if instrument and num_args != 8: 52 | usage() 53 | 54 | if instrument: 55 | import pypyjit # instrumentation not supported on CPython yet. 56 | 57 | assert benchmark.endswith(".py") 58 | bench_mod_name = os.path.basename(benchmark[:-3]) 59 | bench_mod = imp.load_source(bench_mod_name, benchmark) 60 | 61 | # The benchmark should provide a function called "run_iter" which 62 | # represents one iterations level run of the benchmark. 63 | bench_func = bench_mod.run_iter 64 | 65 | # OK, all is well, let's run. 66 | 67 | krun_init() 68 | num_cores = krun_get_num_cores() 69 | 70 | # Pre-allocate result lists 71 | wallclock_times = array.array("d", [-0.0] * iters) 72 | # Although we can't be sure what size "L" actually is, if we generate ints 73 | # it can't store, an OverflowError results, so there's no chance of silent 74 | # truncation. 75 | cycle_counts = [array.array("L", [0] * iters) for _ in range(num_cores)] 76 | aperf_counts = [array.array("L", [0] * iters) for _ in range(num_cores)] 77 | mperf_counts = [array.array("L", [0] * iters) for _ in range(num_cores)] 78 | 79 | # Main loop 80 | for i in xrange(iters): 81 | if instrument: 82 | start_snap = pypyjit.get_stats_snapshot() 83 | if debug: 84 | sys.stderr.write( 85 | "[iterations_runner.py] iteration %d/%d\n" % (i + 1, iters)) 86 | 87 | # Start timed section 88 | krun_measure(0) 89 | bench_func(param) 90 | krun_measure(1) 91 | # End timed section 92 | 93 | # Extract/check/store wallclock time 94 | wallclock_times[i] = krun_get_wallclock(1) - krun_get_wallclock(0) 95 | 96 | # Extract/check/store per-core data 97 | for core in xrange(num_cores): 98 | cycle_counts[core][i] = ( 99 | krun_get_core_cycles(1, core) - 100 | krun_get_core_cycles(0, core)) 101 | aperf_counts[core][i] = ( 102 | krun_get_aperf(1, core) - 103 | krun_get_aperf(0, core)) 104 | mperf_counts[core][i] = ( 105 | krun_get_mperf(1, core) - 106 | krun_get_mperf(0, core)) 107 | 108 | # In instrumentation mode, write an iteration separator to stderr. 109 | if instrument: 110 | sys.stderr.write("@@@ END_IN_PROC_ITER: %d\n" % i) 111 | end_snap = pypyjit.get_stats_snapshot() 112 | jit_time = (end_snap.counter_times["TRACING"] - 113 | start_snap.counter_times["TRACING"]) 114 | jit_time += (end_snap.counter_times["BACKEND"] - 115 | start_snap.counter_times["BACKEND"]) 116 | sys.stderr.write("@@@ JIT_TIME: %s\n" % jit_time) 117 | sys.stderr.flush() 118 | 119 | krun_done() 120 | 121 | import json 122 | js = { 123 | "wallclock_times": list(wallclock_times), 124 | # You can't JSON encode a typed array, so convert to lists. 125 | "core_cycle_counts": [list(a) for a in cycle_counts], 126 | "aperf_counts": [list(a) for a in aperf_counts], 127 | "mperf_counts": [list(a) for a in mperf_counts], 128 | } 129 | 130 | sys.stdout.write("%s\n" % json.dumps(js)) 131 | -------------------------------------------------------------------------------- /iterations_runners/iterations_runner.rb: -------------------------------------------------------------------------------- 1 | # The VM needs to be patched to offer up: 2 | # krunt_init() 3 | # krun_done() 4 | # krun_measure() 5 | # krun_get_num_cores() 6 | # krun_get_core_cycles() 7 | # krun_get_aperf() 8 | # krun_get_mperf() 9 | 10 | # defined this way so we don't measure the conditional platform check. 11 | if /linux/ =~ RUBY_PLATFORM then 12 | def clock_gettime_monotonic() 13 | Process.clock_gettime(Process::CLOCK_MONOTONIC_RAW) 14 | end 15 | else 16 | def clock_gettime_monotonic() 17 | Process.clock_gettime(Process::CLOCK_MONOTONIC) 18 | end 19 | end 20 | 21 | def usage() 22 | STDERR.puts "usage: iterations_runner.rb "\ 23 | "<# of iterations> \n "\ 24 | " [instrumentation dir] [key] [key pexec index]>\n" 25 | STDERR.puts "Arguments in [] are supplied for instrumentation mode only.\n" 26 | Kernel.exit(1) 27 | end 28 | 29 | # main 30 | if __FILE__ == $0 31 | if ARGV.length < 4 32 | usage() 33 | end 34 | 35 | benchmark, iters, param, debug = ARGV 36 | iters = Integer(iters) 37 | param = Integer(param) 38 | debug = Integer(debug) > 0 39 | instrument = ARGV.length >= 5 40 | 41 | if instrument and ARGV.length != 7 then 42 | usage() 43 | end 44 | 45 | require("#{benchmark}") 46 | 47 | krun_init(); 48 | num_cores = krun_get_num_cores() 49 | 50 | # Pre-allocate result lists 51 | wallclock_times = [-0.0] * iters 52 | cycle_counts = [] 53 | aperf_counts = [] 54 | mperf_counts = [] 55 | for core in 0..num_cores - 1 do 56 | cycle_counts[core] = [-0.0] * iters 57 | aperf_counts[core] = [-0.0] * iters 58 | mperf_counts[core] = [-0.0] * iters 59 | end 60 | 61 | # Main loop 62 | for iter_num in 0..iters - 1 do 63 | if debug then 64 | STDERR.write "[iterations_runner.rb] iteration #{iter_num + 1}/#{iters}\n" 65 | STDERR.flush 66 | end 67 | 68 | # Start timed section 69 | krun_measure(0) 70 | run_iter(param) 71 | krun_measure(1) 72 | # End timed section 73 | 74 | # Compute deltas 75 | wallclock_times[iter_num] = \ 76 | krun_get_wallclock(1) - \ 77 | krun_get_wallclock(0) 78 | 79 | for core in 0..num_cores - 1 do 80 | cycle_counts[core][iter_num] = \ 81 | krun_get_core_cycles(1, core) - \ 82 | krun_get_core_cycles(0, core) 83 | aperf_counts[core][iter_num] = \ 84 | krun_get_aperf(1, core) - \ 85 | krun_get_aperf(0, core) 86 | mperf_counts[core][iter_num] = \ 87 | krun_get_mperf(1, core) - \ 88 | krun_get_mperf(0, core) 89 | end 90 | end 91 | 92 | krun_done(); 93 | 94 | # Emit measurements 95 | require 'json' 96 | 97 | out_hash = { 98 | 'wallclock_times' => wallclock_times, 99 | 'core_cycle_counts' => cycle_counts, 100 | 'aperf_counts' => aperf_counts, 101 | 'mperf_counts' => mperf_counts, 102 | } 103 | 104 | puts(JSON.generate(out_hash)) 105 | end 106 | -------------------------------------------------------------------------------- /iterations_runners/iterations_runner.som: -------------------------------------------------------------------------------- 1 | iterations_runner = ( 2 | | benchmarkClass numIterations param debug instrument numCores krun | 3 | 4 | run: args = ( 5 | krun := Krun new. 6 | self processArguments: args. 7 | self runBenchmark. 8 | ) 9 | 10 | usage = ( 11 | 'Iterations runner for SOM VMs.\n' println. 12 | 'Executes a benchmark many times within a single process.\n' println. 13 | 'usage: iterations_runner.som <# of iterations> ' println. 14 | ' [instrumentation dir] [key] [key pexec index]\n' println. 15 | 'Arguments in [] are for instrumentation mode only.' println. 16 | system exit. 17 | ) 18 | 19 | processArguments: args = ( 20 | args length < 5 ifTrue: [ ^ self usage ]. 21 | 22 | self loadBenchmarkClass: (args at: 2). "First argument is the Benchmark" 23 | numIterations := (args at: 3) asInteger. 24 | param := (args at: 4) asInteger. 25 | debug := (args at: 5) asInteger. 26 | 27 | instrument := false. 28 | args length >= 6 ifTrue: [ 29 | instrument := true. 30 | ]. 31 | 32 | instrument && (args length ~= 8) ifTrue: [ ^ self usage ]. 33 | ) 34 | 35 | loadBenchmarkClass: className = ( 36 | | sym cls | 37 | sym := className asSymbol. 38 | cls := system load: sym. 39 | cls ifNil: [ 40 | self error: 'Failed loading benchmark: ' + className ]. 41 | benchmarkClass := cls. 42 | ) 43 | 44 | runBenchmark = ( 45 | | bench wallclockTimes coreCycleCounts i | 46 | 47 | krun krunInit. 48 | numCores := krun krunGetNumCores. 49 | bench := benchmarkClass new. 50 | wallclockTimes := Array new: numIterations. 51 | coreCycleCounts := Array new: numCores. 52 | coreCycleCounts doIndexes: [:i | coreCycleCounts at: i put: (Array new: numIterations)]. 53 | 54 | i := 0. 55 | [ i < numIterations ] whileTrue: [ 56 | | start end core | 57 | debug > 0 ifTrue: [ 58 | ('[iterations_runner.som] iteration ' + i + '/' + numIterations) println. 59 | ]. 60 | 61 | krun krunMeasure: 0. 62 | bench run_iter: param. 63 | krun krunMeasure: 1. 64 | 65 | start := krun krunGetWallclock: 0. 66 | end := krun krunGetWallclock: 1. 67 | 68 | wallclockTimes at: (i + 1) put: (end - start). 69 | 70 | core := 1. 71 | [ core < (numCores + 1) ] whileTrue: [ 72 | | cycle_start cycle_end | 73 | 74 | cycle_end := (krun krunGetCoreCyclesDouble: 1 core: (core - 1)). 75 | cycle_start := (krun krunGetCoreCyclesDouble: 0 core: (core -1)). 76 | 77 | (coreCycleCounts at: core) at: (i + 1) put: (cycle_end - cycle_start). 78 | core := core + 1. 79 | ]. 80 | 81 | i := i + 1. 82 | ]. 83 | 84 | krun krunDone. 85 | "Emit measurements" 86 | '{' print. 87 | '"wallclock_times": [' print. 88 | wallclockTimes from: 1 to: (wallclockTimes length - 1) do: [:i | ('' + i + ', ') print]. 89 | (wallclockTimes at: wallclockTimes length) print. 90 | '], "core_cycle_counts": ' print. 91 | self emitPerCoreMeasurement: coreCycleCounts. 92 | 93 | "TODO - aperf and mperf counts not yet implemented." 94 | ', "aperf_counts": [] ' print. 95 | ', "mperf_counts": [] ' print. 96 | '}' println. 97 | ) 98 | 99 | emitPerCoreMeasurement: array = ( 100 | | core | 101 | core := 1. 102 | '[' print. 103 | [ core < (numCores + 1) ] whileTrue: [ 104 | | coreMetrics | 105 | '[' print. 106 | 107 | coreMetrics := (array at: core). 108 | coreMetrics from: 1 to: (coreMetrics length - 1) do: [:i | ('' + i + ', ') print]. 109 | (coreMetrics at: coreMetrics length) print. 110 | 111 | ']' print. 112 | 113 | core < numCores ifTrue: [ 114 | ', ' print. 115 | ]. 116 | core := core + 1. 117 | ]. 118 | ']' print. 119 | ) 120 | ) 121 | -------------------------------------------------------------------------------- /krun/__init__.py: -------------------------------------------------------------------------------- 1 | ABS_TIME_FORMAT = "%Y-%m-%d %H:%M:%S" 2 | LOGFILE_FILENAME_TIME_FORMAT = "%Y%m%d_%H%M%S" 3 | UNKNOWN_TIME_DELTA = "?:??:??" 4 | UNKNOWN_ABS_TIME = "????-??-?? ??:??:??" 5 | 6 | class EntryPoint(object): 7 | 8 | def __init__(self, target, subdir=None): 9 | self.target = target 10 | self.subdir = subdir 11 | 12 | def __eq__(self, other): 13 | return (isinstance(other, self.__class__) and 14 | (self.target == other.target) and 15 | (self.subdir == other.subdir)) 16 | -------------------------------------------------------------------------------- /krun/amperf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2.7 2 | 3 | import sys 4 | import os 5 | from logging import debug 6 | 7 | 8 | class AMPerfRatios(object): 9 | """Per-core {A,M}PERF analysis results""" 10 | 11 | def __init__(self, vals, violations, busy_iters): 12 | self.vals = vals # list of ratios 13 | self.violations = violations # dict: type_string -> [iter_idxs] 14 | self.busy_iters = busy_iters # list of bool 15 | 16 | def ok(self): 17 | for iters in self.violations.itervalues(): 18 | if len(iters) > 0: 19 | return False 20 | return True 21 | 22 | 23 | sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname( 24 | os.path.abspath(__file__))))) 25 | 26 | 27 | def check_amperf_ratios(aperfs, mperfs, wc_times, busy_threshold, ratio_bounds): 28 | results = [] # one AMPerfRatios instance for each core 29 | 30 | for core_idx in xrange(len(aperfs)): 31 | core_res = check_core_amperf_ratios(core_idx, aperfs[core_idx], 32 | mperfs[core_idx], wc_times, 33 | busy_threshold, ratio_bounds) 34 | results.append(core_res) 35 | return results 36 | 37 | 38 | def check_core_amperf_ratios(core_idx, aperfs, mperfs, wc_times, busy_threshold, 39 | ratio_bounds): 40 | assert len(aperfs) == len(mperfs) == len(wc_times) 41 | ratios = [] 42 | busy_iters = [] 43 | violations = { 44 | "throttle": [], 45 | "turbo": [], 46 | } 47 | 48 | itr = zip(xrange(len(aperfs)), aperfs, mperfs, wc_times) 49 | for iter_idx, aval, mval, wctval in itr: 50 | # normalise the counts to per-second readings 51 | norm_aval = float(aval) / wctval 52 | norm_mval = float(mval) / wctval 53 | ratio = norm_aval / norm_mval 54 | ratios.append(ratio) 55 | 56 | if norm_aval > busy_threshold: 57 | # Busy core 58 | busy_iters.append(True) 59 | if ratio < ratio_bounds[0]: 60 | violations["throttle"].append(iter_idx) 61 | elif ratio > ratio_bounds[1]: 62 | violations["turbo"].append(iter_idx) 63 | else: 64 | busy_iters.append(False) 65 | return AMPerfRatios(ratios, violations, busy_iters) 66 | -------------------------------------------------------------------------------- /krun/audit.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | 4 | class Audit(object): 5 | def __init__(self, audit_dict): 6 | assert isinstance(audit_dict, dict) 7 | self._audit = audit_dict 8 | for key, value in audit_dict.iteritems(): 9 | if type(value) is str: 10 | audit_dict[key] = value.decode("utf-8") 11 | 12 | def __contains__(self, key): 13 | return key in self._audit 14 | 15 | def __getitem__(self, key): 16 | return self._audit[key] 17 | 18 | def __setitem__(self, key, value): 19 | self._audit[key] = value 20 | 21 | def __unicode__(self): 22 | s = "" 23 | # important that the sections are sorted, for diffing 24 | for key, text in OrderedDict(sorted(self._audit.iteritems())).iteritems(): 25 | s += "Audit Section: %s" % key + "\n" 26 | s += "#" * 78 + "\n\n" 27 | s += unicode(text) + "\n\n" 28 | return s 29 | 30 | def __len__(self): 31 | return len(self._audit) 32 | 33 | @property 34 | def audit(self): 35 | return self._audit 36 | 37 | @audit.setter 38 | def audit(self, audit_dict): 39 | self._audit = audit_dict 40 | 41 | def __ne__(self, other): 42 | return not self == other 43 | 44 | def __eq__(self, other): 45 | if ((not isinstance(other, self.__class__)) or 46 | (not len(self) == len(other))): 47 | return False 48 | if "uname" in other: 49 | return self["uname"] == other["uname"] 50 | return True 51 | -------------------------------------------------------------------------------- /krun/config.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import time 3 | import sys 4 | import traceback 5 | 6 | from krun import LOGFILE_FILENAME_TIME_FORMAT 7 | from krun.util import fatal 8 | 9 | # XXX Add the rest of the required fields 10 | CHECK_FIELDS = ["HEAP_LIMIT", "STACK_LIMIT"] 11 | 12 | class Config(object): 13 | """All configuration for a Krun benchmark. 14 | Includes CLI args as well as configuration from .krun files. 15 | """ 16 | 17 | def __init__(self, config_file=None): 18 | # config defaults (variables) 19 | self.MAIL_TO = list() 20 | self.MAX_MAILS = 5 21 | self.VMS = dict() 22 | self.VARIANTS = dict() 23 | self.BENCHMARKS = dict() 24 | self.SKIP = list() 25 | self.N_EXECUTIONS = 1 26 | self.filename = config_file 27 | self.HEAP_LIMIT = None 28 | self.STACK_LIMIT = None 29 | self.TEMP_READ_PAUSE = 60 30 | self.ENABLE_PINNING = False 31 | self.AMPERF_BUSY_THRESHOLD = None 32 | self.AMPERF_RATIO_BOUNDS = None 33 | self.PRE_EXECUTION_CMDS = [] 34 | self.POST_EXECUTION_CMDS = [] 35 | self.EXECUTION_TIMEOUT = None 36 | 37 | # config defaults (callbacks) 38 | self.custom_dmesg_whitelist = None 39 | 40 | if config_file is not None: 41 | self.read_from_file(config_file) 42 | 43 | def _fatal_exception_execing_config(self, exc_info): 44 | lines = ["error importing config file: %s\n" % str(exc_info[1])] 45 | for frame in traceback.format_tb(exc_info[2]): 46 | lines.append(frame) 47 | fatal("".join(lines)) 48 | 49 | def check_config_consistency(self, config_str, filename): 50 | import difflib 51 | if self.text != config_str: 52 | diff = "".join(difflib.unified_diff( 53 | self.text.splitlines(True), config_str.splitlines(True), 54 | self.filename, "" % filename)) 55 | fatal("The experiment is in an inconsistent state as the config" 56 | "file %s has changed since it was initially cached in %s" 57 | "\n%s" % ( 58 | self.filename, filename, diff)) 59 | 60 | def read_from_file(self, config_file): 61 | assert config_file.endswith(".krun") 62 | config_dict = {} 63 | try: 64 | execfile(config_file, config_dict) 65 | except Exception: 66 | self._fatal_exception_execing_config(sys.exc_info()) 67 | 68 | for key in CHECK_FIELDS: 69 | if key not in config_dict: 70 | fatal("Config file is missing a %s" % key) 71 | 72 | for vm_name in config_dict["VMS"]: 73 | if " " in vm_name: 74 | fatal("VM names must not contain spaces") 75 | 76 | for vm_name in config_dict["BENCHMARKS"]: 77 | if " " in vm_name: 78 | fatal("Benchmark names must not contain spaces") 79 | 80 | for variant_name in config_dict["VARIANTS"]: 81 | if " " in variant_name: 82 | fatal("Variant names must not contain spaces") 83 | 84 | self.__dict__.update(config_dict) 85 | self.filename = config_file 86 | with open(config_file, "r") as fp: 87 | self.text = fp.read() 88 | 89 | if self.AMPERF_RATIO_BOUNDS and not self.AMPERF_BUSY_THRESHOLD or \ 90 | not self.AMPERF_RATIO_BOUNDS and self.AMPERF_BUSY_THRESHOLD: 91 | fatal("AMPERF_RATIO_BOUNDS and AMPERF_BUSY_THRESHOLD must either " 92 | "both be defined in the config file, or neither") 93 | 94 | def log_filename(self, resume=False): 95 | assert self.filename.endswith(".krun") 96 | return self.filename[:-5] + ".log" 97 | 98 | def results_filename(self): # FIXME: was called output_name in util 99 | """Makes a result file name based upon the config file name.""" 100 | assert self.filename.endswith(".krun") 101 | return self.filename[:-5] + "_results.json.bz2" 102 | 103 | def should_skip(self, this_key): 104 | """Decides if 'this_key' is a benchmark key that will be skipped""" 105 | 106 | this_elems = this_key.split(":") 107 | if len(this_elems) != 3: 108 | raise ValueError("bad benchmark key: %s" % this_key) 109 | 110 | for skip_key in self.SKIP: 111 | skip_elems = skip_key.split(":") 112 | 113 | # Should be triples of: bench * vm * variant 114 | assert len(skip_elems) == 3 and len(this_elems) == 3 115 | 116 | # Don't mutate this_elems directly, as we need it 117 | # fresh for future iterations. 118 | this_elems_copy = this_elems[:] 119 | for i in range(3): 120 | if skip_elems[i] == "*": 121 | this_elems_copy[i] = "*" 122 | 123 | if skip_elems == this_elems_copy: 124 | return True # skip 125 | 126 | return False 127 | 128 | def __str__(self): 129 | return self.text 130 | 131 | def __eq__(self, other): 132 | # Equality should ignore filename. 133 | return (isinstance(other, self.__class__) and 134 | (self.text == other.text) and 135 | (self.MAIL_TO == other.MAIL_TO) and 136 | (self.MAX_MAILS == other.MAX_MAILS) and 137 | (self.VMS == other.VMS) and 138 | (self.VARIANTS == other.VARIANTS) and 139 | (self.BENCHMARKS == other.BENCHMARKS) and 140 | (self.SKIP == other.SKIP) and 141 | (self.N_EXECUTIONS == other.N_EXECUTIONS) and 142 | (self.PRE_EXECUTION_CMDS == other.PRE_EXECUTION_CMDS) and 143 | (self.POST_EXECUTION_CMDS == other.POST_EXECUTION_CMDS) and 144 | (self.EXECUTION_TIMEOUT == other.EXECUTION_TIMEOUT)) 145 | -------------------------------------------------------------------------------- /krun/env.py: -------------------------------------------------------------------------------- 1 | # Tools to deal with setting and updating an environment dict. 2 | 3 | from abc import ABCMeta, abstractmethod 4 | from krun.util import fatal 5 | import os 6 | 7 | 8 | class EnvChange(object): 9 | __metaclass__ = ABCMeta 10 | 11 | def __init__(self, var, val): 12 | self.var, self.val = var, val 13 | 14 | @staticmethod 15 | def apply_all(changes, env): 16 | """Apply a collection of changes""" 17 | for change in changes: 18 | change.apply(env) 19 | 20 | @abstractmethod 21 | def apply(self, env): 22 | pass 23 | 24 | 25 | class EnvChangeSet(EnvChange): 26 | def apply(self, env): 27 | cur_val = env.get(self.var, None) 28 | if cur_val is not None: 29 | fatal("Environment %s is already defined" % self.var) 30 | else: 31 | env[self.var] = self.val 32 | 33 | 34 | class EnvChangeAppend(EnvChange): 35 | def apply(self, env): 36 | cur_val = env.get(self.var, None) 37 | if cur_val is None: 38 | env[self.var] = self.val 39 | else: 40 | env[self.var] = "%s%s%s" % (cur_val, os.pathsep, self.val) 41 | -------------------------------------------------------------------------------- /krun/mail.py: -------------------------------------------------------------------------------- 1 | from email.mime.text import MIMEText 2 | import socket 3 | import textwrap 4 | import logging 5 | import subprocess32 6 | 7 | 8 | FROM_USER = "noreply" 9 | SMTP_HOST = "localhost" 10 | WRAP_THRESHOLD = 72 11 | 12 | QUOTA_THRESHOLD_TEMPLATE = ( 13 | "Note: krun is configured to send no more than %d mails per-run. " 14 | "This is the last email krun will send for this run. Please check " 15 | "the log file on the benchmark system for subsequent errors.") 16 | 17 | SENDMAIL = "/usr/sbin/sendmail" 18 | 19 | 20 | class Mailer(object): 21 | def __init__(self, recipients=None, max_mails=5): 22 | if recipients is not None: 23 | self.recipients = recipients 24 | else: 25 | self.recipients = [] 26 | self.hostname = socket.gethostname() 27 | self.short_hostname = self.hostname.split(".")[0] 28 | 29 | # After sending the maximum number of emails, we stop sending more so 30 | # as not to spam. Some emails however, you will always want to send. 31 | # For these use send(..., bypass_limiter=True). 32 | self.max_mails = max_mails 33 | 34 | def set_recipients(self, recipients): 35 | self.recipients = recipients 36 | 37 | def _wrap_para(self, txt): 38 | return "\n".join(textwrap.wrap(txt, WRAP_THRESHOLD)) 39 | 40 | def send(self, append_subject, inner_body, bypass_limiter=False, 41 | manifest=None): 42 | if manifest is not None: 43 | num_mails_sent = manifest.num_mails_sent 44 | else: 45 | # It's OK to call this without a manifest (e.g. outside the 46 | # scheduler loop, where there is no manifest to speak of), but 47 | # without a manifest we can't know how many emails have been sent. 48 | # So, the only time this is OK is if we are skipping the limiter 49 | # anyway. 50 | assert bypass_limiter # Krun can't know how many mails were sent 51 | num_mails_sent = 0 52 | 53 | if not self.recipients: 54 | # Don't bother mailing if there are no recipients 55 | return 56 | 57 | if bypass_limiter or num_mails_sent < self.max_mails: 58 | body = "Message from krun running on %s:\n\n" % self.hostname 59 | body += inner_body + "\n" 60 | 61 | if not bypass_limiter and num_mails_sent == self.max_mails - 1: 62 | body += "\n\n%s" % self._wrap_para( 63 | QUOTA_THRESHOLD_TEMPLATE % self.max_mails) 64 | logging.warn("Mail quota reached.") 65 | 66 | msg = MIMEText(body) # text/plain 67 | msg['Subject'] = '[krun:%s] %s' % \ 68 | (self.short_hostname, append_subject) 69 | msg['From'] = "%s@%s" % (FROM_USER, self.hostname) 70 | msg['To'] = ", ".join(self.recipients) 71 | self._sendmail(msg) 72 | 73 | if not bypass_limiter: 74 | manifest.update_num_mails_sent() 75 | else: 76 | pass # as we have already sent our quota of mails 77 | 78 | def _sendmail(self, msg): 79 | logging.debug("Sending email to '%s' subject line '%s'" % 80 | (msg['To'], msg['Subject'])) 81 | 82 | pipe = subprocess32.Popen([SENDMAIL, "-t", "-oi"], 83 | stdin=subprocess32.PIPE) 84 | pipe.communicate(msg.as_string()) 85 | 86 | rc = pipe.returncode 87 | if rc != 0: 88 | logging.warning("Sendmail process returned %d" % rc) 89 | -------------------------------------------------------------------------------- /krun/results.py: -------------------------------------------------------------------------------- 1 | from krun.audit import Audit 2 | from logging import debug 3 | from krun.util import fatal, format_raw_exec_results 4 | 5 | import bz2 # decent enough compression with Python 2.7 compatibility. 6 | import json 7 | 8 | 9 | class Results(object): 10 | """Results of a Krun benchmarking session. 11 | Can be serialised to disk. 12 | """ 13 | 14 | # We use this to detect times where a results instance is loaded prior to a 15 | # process execution. This wouuld be bad, as the results can be big and 16 | # cause memory to fragment. 17 | ok_to_instantiate = False 18 | 19 | def __init__(self, config, platform, results_file=None): 20 | self.instantiation_check() 21 | 22 | self.config = config 23 | self.platform = platform 24 | 25 | # "bmark:vm:variant" -> [[e0i0, e0i1, ...], [e1i0, e1i1, ...], ...] 26 | self.wallclock_times = dict() # wall-clock times 27 | 28 | # Secondary, per-core measurements 29 | # Structure as above, but lifted for N processor cores. 30 | # i.e. aperf_counts[core#][proc_exec#][in_proc_iter#] 31 | self.core_cycle_counts = dict() 32 | self.aperf_counts = dict() 33 | self.mperf_counts = dict() 34 | 35 | # Record the flag for each process execution. 36 | self.pexec_flags = dict() 37 | 38 | # Record how long execs are taking so we can give the user a rough ETA. 39 | # Maps "bmark:vm:variant" -> [t_0, t_1, ...] 40 | self.eta_estimates = dict() 41 | 42 | # error_flag is flipped when a (non-fatal) error or warning occurs. 43 | # When Krun finishes and this flag is true, a message is printed, 44 | # thus prompting the user to investigate. 45 | self.error_flag = False 46 | 47 | # Fill in attributes from the config, platform and prior results. 48 | if self.config is not None: 49 | self.filename = self.config.results_filename() 50 | self.init_from_config() 51 | self.config_text = self.config.text 52 | if platform is not None: 53 | self._audit = Audit(platform.audit) 54 | else: 55 | self.audit = dict() 56 | 57 | # Import data from a Results object serialised on disk. 58 | if results_file is not None: 59 | self.read_from_file(results_file) 60 | 61 | def instantiation_check(self): 62 | if not Results.ok_to_instantiate: 63 | fatal("Results instance loaded prior to a process execution") 64 | 65 | @property 66 | def audit(self): 67 | return self._audit 68 | 69 | @audit.setter 70 | def audit(self, audit_dict): 71 | self._audit = Audit(audit_dict) 72 | 73 | def init_from_config(self): 74 | """Scaffold dictionaries based on a given configuration. 75 | """ 76 | # Initialise dictionaries based on config information. 77 | for vm_name, vm_info in self.config.VMS.items(): 78 | for bmark, _ in self.config.BENCHMARKS.items(): 79 | for variant in vm_info["variants"]: 80 | key = ":".join((bmark, vm_name, variant)) 81 | self.wallclock_times[key] = [] 82 | self.core_cycle_counts[key] = [] 83 | self.aperf_counts[key] = [] 84 | self.mperf_counts[key] = [] 85 | self.pexec_flags[key] = [] 86 | self.eta_estimates[key] = [] 87 | 88 | def read_from_file(self, results_file): 89 | """Initialise object from serialised file on disk. 90 | """ 91 | with bz2.BZ2File(results_file, "rb") as f: 92 | results = json.loads(f.read()) 93 | config = results.pop("config") 94 | self.__dict__.update(results) 95 | # Ensure that self.audit and self.config have correct types. 96 | self.config_text = config 97 | if self.config is not None: 98 | self.config.check_config_consistency(config, results_file) 99 | self.audit = results["audit"] 100 | 101 | def integrity_check(self): 102 | """Check the results make sense""" 103 | 104 | num_cores = self.platform.num_per_core_measurements 105 | for key in self.wallclock_times.iterkeys(): 106 | wct_len = len(self.wallclock_times[key]) 107 | eta_len = len(self.eta_estimates[key]) 108 | cycles_len = len(self.core_cycle_counts[key]) 109 | aperf_len = len(self.aperf_counts[key]) 110 | mperf_len = len(self.mperf_counts[key]) 111 | pexec_flags_len = len(self.pexec_flags[key]) 112 | 113 | if eta_len != wct_len: 114 | fatal("inconsistent etas length: %s: %d vs %d" % (key, eta_len, wct_len)) 115 | 116 | if cycles_len != wct_len: 117 | fatal("inconsistent cycles length: %s: %d vs %d" % (key, cycles_len, wct_len)) 118 | 119 | if aperf_len != wct_len: 120 | fatal("inconsistent aperf length: %s: %d vs %d" % (key, aperf_len, wct_len)) 121 | 122 | if mperf_len != wct_len: 123 | fatal("inconsistent mperf length: %s: %d vs %d" % (key, mperf_len, wct_len)) 124 | 125 | if pexec_flags_len != wct_len: 126 | fatal("inconsistent pexec flags length: %s: %d vs %d" % (key, pexec_flags_len, wct_len)) 127 | 128 | # Check the length of the different measurements match and that the 129 | # number of per-core measurements is consistent. 130 | for exec_idx in xrange(len(self.wallclock_times[key])): 131 | expect_num_iters = len(self.wallclock_times[key][exec_idx]) 132 | 133 | cycles_num_cores = len(self.core_cycle_counts[key][exec_idx]) 134 | if cycles_num_cores != self.platform.num_per_core_measurements: 135 | fatal("wrong #cores in core_cycle_counts: %s[%d]: %d vs %d" % 136 | (key, exec_idx, num_cores, cycles_num_cores)) 137 | for core_idx, core in enumerate(self.core_cycle_counts[key][exec_idx]): 138 | core_len = len(core) 139 | if core_len != expect_num_iters: 140 | fatal("inconsistent #iters in core_cycle_counts: " 141 | "%s[%d][%d]. %d vs %d" % 142 | (key, exec_idx, core_idx, core_len, expect_num_iters)) 143 | 144 | aperf_num_cores = len(self.aperf_counts[key][exec_idx]) 145 | if aperf_num_cores != self.platform.num_per_core_measurements: 146 | fatal("wrong #cores in aperf_counts: %s[%d]: %d vs %d" % 147 | (key, exec_idx, num_cores, aperf_num_cores)) 148 | for core_idx, core in enumerate(self.aperf_counts[key][exec_idx]): 149 | core_len = len(core) 150 | if core_len != expect_num_iters: 151 | fatal("inconsistent #iters in aperf_counts: " 152 | "%s[%d][%d]. %d vs %d" % 153 | (key, exec_idx, core_idx, core_len, expect_num_iters)) 154 | 155 | mperf_num_cores = len(self.mperf_counts[key][exec_idx]) 156 | if mperf_num_cores != self.platform.num_per_core_measurements: 157 | fatal("wrong #cores in mperf_counts: %s[%d]: %d vs %d" % 158 | (key, exec_idx, num_cores, mperf_num_cores)) 159 | for core_idx, core in enumerate(self.mperf_counts[key][exec_idx]): 160 | core_len = len(core) 161 | if core_len != expect_num_iters: 162 | fatal("inconsistent #iters in mperf_counts: " 163 | "%s[%d][%d]. %d vs %d" % 164 | (key, exec_idx, core_idx, core_len, expect_num_iters)) 165 | 166 | def write_to_file(self): 167 | """Serialise object on disk.""" 168 | 169 | debug("Writing results out to: %s" % self.filename) 170 | self.integrity_check() 171 | 172 | to_write = { 173 | "config": self.config.text, 174 | "wallclock_times": self.wallclock_times, 175 | "core_cycle_counts": self.core_cycle_counts, 176 | "aperf_counts": self.aperf_counts, 177 | "mperf_counts": self.mperf_counts, 178 | "pexec_flags": self.pexec_flags, 179 | "audit": self.audit.audit, 180 | "eta_estimates": self.eta_estimates, 181 | "error_flag": self.error_flag, 182 | } 183 | with bz2.BZ2File(self.filename, "w") as f: 184 | f.write(json.dumps(to_write, 185 | indent=1, sort_keys=True, encoding='utf-8')) 186 | 187 | def jobs_completed(self, key): 188 | """Return number of executions for which we have data for a given 189 | benchmark / vm / variant triplet. 190 | """ 191 | return len(self.wallclock_times[key]) 192 | 193 | def __eq__(self, other): 194 | if not isinstance(other, self.__class__): 195 | return False 196 | return (self.config == other.config and 197 | self.wallclock_times == other.wallclock_times and 198 | self.core_cycle_counts == other.core_cycle_counts and 199 | self.aperf_counts == other.aperf_counts and 200 | self.mperf_counts == other.mperf_counts and 201 | self.pexec_flags == other.pexec_flags and 202 | self.audit == other.audit and 203 | self.eta_estimates == other.eta_estimates and 204 | self.error_flag == other.error_flag) 205 | 206 | def append_exec_measurements(self, key, measurements, flag): 207 | """Unpacks a measurements dict into the Results instance""" 208 | 209 | # Only a subset of flags can arise at this time. 210 | assert flag in ("C", "E", "T") 211 | 212 | # Consistently format monotonic time doubles 213 | wallclock_times = format_raw_exec_results( 214 | measurements["wallclock_times"]) 215 | 216 | self.pexec_flags[key].append(flag) 217 | self.wallclock_times[key].append(wallclock_times) 218 | self.core_cycle_counts[key].append(measurements["core_cycle_counts"]) 219 | self.aperf_counts[key].append(measurements["aperf_counts"]) 220 | self.mperf_counts[key].append(measurements["mperf_counts"]) 221 | 222 | def dump(self, what): 223 | if what == "config": 224 | return unicode(self.config_text) 225 | if what == "audit": 226 | return unicode(self.audit) 227 | return json.dumps(getattr(self, what), 228 | sort_keys=True, indent=2) 229 | -------------------------------------------------------------------------------- /krun/tests/__init__.py: -------------------------------------------------------------------------------- 1 | from krun.tests.mocks import MockPlatform, MockMailer 2 | from abc import ABCMeta 3 | from krun.platform import detect_platform 4 | from krun.config import Config 5 | import pytest 6 | import os 7 | 8 | 9 | TEST_DIR = os.path.abspath(os.path.dirname(__file__)) 10 | 11 | 12 | @pytest.fixture(autouse=True) 13 | def no_sleep(monkeypatch): 14 | monkeypatch.setattr("time.sleep", lambda x: None) 15 | 16 | 17 | def subst_env_arg(lst, var): 18 | """Returns a copy of the list with elements starting with 'var=' changed to 19 | literally 'var='. Used in tests where an environment variable argument to 20 | env(1) contains a system-specific path.""" 21 | 22 | find = var + "=" 23 | new = [] 24 | for i in lst: 25 | if i.startswith(find): 26 | i = find 27 | new.append(i) 28 | return new 29 | 30 | 31 | class BaseKrunTest(object): 32 | """Abstract class defining common functionality for Krun tests.""" 33 | 34 | __metaclass__ = ABCMeta 35 | 36 | @pytest.fixture 37 | def mock_platform(self): 38 | return MockPlatform(MockMailer(), Config()) 39 | 40 | @pytest.fixture 41 | def platform(self): 42 | return detect_platform(MockMailer(), Config()) 43 | -------------------------------------------------------------------------------- /krun/tests/broken_etas_results.json.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/softdevteam/krun/a0c8e5bfb91d192695df63a500be96b7e6764491/krun/tests/broken_etas_results.json.bz2 -------------------------------------------------------------------------------- /krun/tests/corrupt.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import (PythonVMDef, JavaVMDef) 3 | from krun import EntryPoint 4 | 5 | # Who to mail 6 | MAIL_TO = [[ 7 | 8 | # Maximum number of error emails to send per-run 9 | MAX_MAILS = 2 10 | -------------------------------------------------------------------------------- /krun/tests/custom_dmesg_whitelist0001.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import PythonVMDef 3 | from krun import EntryPoint 4 | 5 | VARIANTS = { 6 | "default-python": EntryPoint("bench.py", subdir="python"), 7 | } 8 | 9 | N_EXECUTIONS = 2 10 | 11 | VMS = { 12 | 'CPython': { 13 | 'vm_def': PythonVMDef('/usr/bin/python2'), 14 | 'variants': ['default-python'], 15 | 'n_iterations': 10, 16 | } 17 | } 18 | 19 | BENCHMARKS = { 20 | 'nbody': 1000, 21 | } 22 | 23 | def custom_dmesg_whitelist(default_whitelist): 24 | return default_whitelist + ["^custom1*", "^.custom2$"] 25 | 26 | HEAP_LIMIT = 2097152 27 | STACK_LIMIT = 8192 28 | -------------------------------------------------------------------------------- /krun/tests/custom_dmesg_whitelist0002.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import PythonVMDef 3 | from krun import EntryPoint 4 | 5 | VARIANTS = { 6 | "default-python": EntryPoint("bench.py", subdir="python"), 7 | } 8 | 9 | N_EXECUTIONS = 2 10 | 11 | VMS = { 12 | 'CPython': { 13 | 'vm_def': PythonVMDef('/usr/bin/python2'), 14 | 'variants': ['default-python'], 15 | 'n_iterations': 10, 16 | } 17 | } 18 | 19 | BENCHMARKS = { 20 | 'nbody': 1000, 21 | } 22 | 23 | def custom_dmesg_whitelist(default_whitelist): 24 | return ["^.no+", "^defaults", "^here+"] 25 | 26 | HEAP_LIMIT = 2097152 27 | STACK_LIMIT = 8192 28 | -------------------------------------------------------------------------------- /krun/tests/env.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import PythonVMDef, PyPyVMDef 3 | from krun import EntryPoint 4 | 5 | MAIL_TO = [] 6 | 7 | HEAP_LIMIT = 2097152 8 | STACK_LIMIT = 8192 9 | 10 | PY_ENV = { 11 | "LD_LIBRARY_PATH": "/wibble/lib", 12 | "ANOTHER_ENV": "arbitrary_user_val", 13 | } 14 | 15 | VARIANTS = { 16 | "default-python": EntryPoint("bench.py", subdir="python"), 17 | } 18 | 19 | VMS = { 20 | 'CPython': { 21 | 'vm_def': PythonVMDef('/usr/bin/python2', env=PY_ENV), 22 | 'variants': ['default-python'], 23 | 'n_iterations': 1, 24 | }, 25 | 'PyPy': { 26 | 'vm_def': PyPyVMDef('/opt/pypy/pypy/goal/pypy-c', env=PY_ENV), 27 | 'variants': ['default-python'], 28 | 'n_iterations': 1, 29 | } 30 | } 31 | 32 | 33 | BENCHMARKS = { 34 | 'dummy': 1000, 35 | } 36 | 37 | SKIP=[] 38 | 39 | N_EXECUTIONS = 2 40 | -------------------------------------------------------------------------------- /krun/tests/example.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import (PythonVMDef, JavaVMDef) 3 | from krun import EntryPoint 4 | 5 | # Who to mail 6 | MAIL_TO = [] 7 | 8 | # Maximum number of error emails to send per-run 9 | #MAX_MAILS = 2 10 | 11 | DIR = os.getcwd() 12 | JKRUNTIME_DIR = os.path.join(DIR, "krun", "libkruntime", "") 13 | 14 | HEAP_LIMIT = 2097152 15 | STACK_LIMIT = 8192 16 | 17 | # Variant name -> EntryPoint 18 | VARIANTS = { 19 | "default-java": EntryPoint("KrunEntry", subdir="java"), 20 | "default-python": EntryPoint("bench.py", subdir="python"), 21 | } 22 | 23 | ITERATIONS_ALL_VMS = 5 # Small number for testing. 24 | 25 | VMS = { 26 | 'Java': { 27 | 'vm_def': JavaVMDef('/usr/bin/java'), 28 | 'variants': ['default-java'], 29 | 'n_iterations': ITERATIONS_ALL_VMS, 30 | }, 31 | 'CPython': { 32 | 'vm_def': PythonVMDef('/usr/bin/python2'), 33 | 'variants': ['default-python'], 34 | 'n_iterations': ITERATIONS_ALL_VMS, 35 | } 36 | } 37 | 38 | 39 | BENCHMARKS = { 40 | 'dummy': 1000, 41 | 'nbody': 1000, 42 | } 43 | 44 | # list of "bench:vm:variant" 45 | SKIP=[ 46 | #"*:CPython:*", 47 | #"*:Java:*", 48 | ] 49 | 50 | N_EXECUTIONS = 2 # Number of fresh processes. 51 | 52 | TEMP_READ_PAUSE = 1 53 | -------------------------------------------------------------------------------- /krun/tests/example_all_skip.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import (PythonVMDef, JavaVMDef) 3 | from krun import EntryPoint 4 | 5 | DIR = os.getcwd() 6 | JKRUNTIME_DIR = os.path.join(DIR, "krun", "libkruntime", "") 7 | 8 | HEAP_LIMIT = 2097152 9 | STACK_LIMIT = 8192 10 | 11 | VARIANTS = { 12 | "default-java": EntryPoint("KrunEntry", subdir="java"), 13 | "default-python": EntryPoint("bench.py", subdir="python"), 14 | } 15 | 16 | ITERATIONS_ALL_VMS = 5 17 | 18 | VMS = { 19 | 'Java': { 20 | 'vm_def': JavaVMDef('/usr/bin/java'), 21 | 'variants': ['default-java'], 22 | 'n_iterations': ITERATIONS_ALL_VMS, 23 | }, 24 | 'CPython': { 25 | 'vm_def': PythonVMDef('/usr/bin/python2'), 26 | 'variants': ['default-python'], 27 | 'n_iterations': ITERATIONS_ALL_VMS, 28 | } 29 | } 30 | 31 | 32 | BENCHMARKS = { 33 | 'dummy': 1000, 34 | 'nbody': 1000, 35 | } 36 | 37 | SKIP= [ 38 | "*:CPython:*", 39 | "*:Java:*", 40 | ] 41 | 42 | N_EXECUTIONS = 2 43 | 44 | TEMP_READ_PAUSE = 1 45 | -------------------------------------------------------------------------------- /krun/tests/example_skip_1vm.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import (PythonVMDef, JavaVMDef) 3 | from krun import EntryPoint 4 | 5 | DIR = os.getcwd() 6 | JKRUNTIME_DIR = os.path.join(DIR, "krun", "libkruntime", "") 7 | 8 | HEAP_LIMIT = 2097152 9 | STACK_LIMIT = 8192 10 | 11 | VARIANTS = { 12 | "default-java": EntryPoint("KrunEntry", subdir="java"), 13 | "default-python": EntryPoint("bench.py", subdir="python"), 14 | } 15 | 16 | ITERATIONS_ALL_VMS = 5 17 | 18 | VMS = { 19 | 'Java': { 20 | 'vm_def': JavaVMDef('/usr/bin/java'), 21 | 'variants': ['default-java'], 22 | 'n_iterations': ITERATIONS_ALL_VMS, 23 | }, 24 | 'CPython': { 25 | 'vm_def': PythonVMDef('/usr/bin/python2'), 26 | 'variants': ['default-python'], 27 | 'n_iterations': ITERATIONS_ALL_VMS, 28 | } 29 | } 30 | 31 | 32 | BENCHMARKS = { 33 | 'dummy': 1000, 34 | 'nbody': 1000, 35 | } 36 | 37 | SKIP= ["*:CPython:*"] 38 | 39 | N_EXECUTIONS = 2 40 | 41 | TEMP_READ_PAUSE = 1 42 | -------------------------------------------------------------------------------- /krun/tests/mocks.py: -------------------------------------------------------------------------------- 1 | from krun.platform import BasePlatform 2 | from krun.config import Config 3 | from krun.mail import Mailer 4 | import pytest 5 | 6 | 7 | class MockMailer(Mailer): 8 | def __init__(self, recipients=None, max_mails=5): 9 | Mailer.__init__(self, recipients, max_mails) 10 | self.sent = [] # cache here instead of sending for real 11 | self.hostname = "tests.suite" 12 | self.short_hostname = self.hostname.split(".")[0] 13 | 14 | def _sendmail(self, msg): 15 | self.sent.append(msg) 16 | 17 | 18 | @pytest.fixture 19 | def mock_mailer(): 20 | return MockMailer() 21 | 22 | 23 | class MockPlatform(BasePlatform): 24 | """Pretends to be a Platform instance.""" 25 | 26 | CHANGE_USER_CMD = "" 27 | 28 | def __init__(self, mailer, config): 29 | BasePlatform.__init__(self, mailer, config) 30 | self.mailer = mailer 31 | self.audit = dict() 32 | self.num_cpus = 0 33 | self.num_per_core_measurements = 0 34 | self.no_user_change = True 35 | self.temp_sensors = [] 36 | 37 | def default_dmesg_whitelist(self): 38 | return [] 39 | 40 | def pin_process_args(self): 41 | return [] 42 | 43 | def change_scheduler_args(self): 44 | return [] 45 | 46 | def check_dmesg_for_changes(self, manifest): 47 | pass 48 | 49 | def CHANGE_USER_CMD(self): 50 | pass 51 | 52 | def take_temperature_readings(self): 53 | return {} 54 | 55 | def check_preliminaries(self): 56 | pass 57 | 58 | def unbuffer_fd(self, fd): 59 | pass 60 | 61 | def adjust_env_cmd(self, env_dct): 62 | return [] 63 | 64 | def FORCE_LIBRARY_PATH_ENV_NAME(self): 65 | pass 66 | 67 | def collect_audit(self): 68 | self.audit["uname"] = "MockPlatform" 69 | 70 | def bench_cmdline_adjust(self, args, env_dct): 71 | return args 72 | 73 | def change_user_args(self, user="root"): 74 | return ["sudo"] 75 | 76 | def process_priority_args(self): 77 | return [] 78 | 79 | def get_reboot_cmd(self): 80 | assert False # tests should never try to reboot 81 | 82 | def _change_user_args(self): 83 | return [] 84 | 85 | def _save_power(self): 86 | pass 87 | 88 | def _collect_dmesg_lines(self): 89 | return [] 90 | 91 | def bench_env_changes(args, env_dct): 92 | return [] 93 | 94 | def sanity_checks(self): 95 | pass 96 | 97 | def sync_disks(self): 98 | pass 99 | 100 | def find_temperature_sensors(self): 101 | return [] 102 | 103 | def is_virtual(self): 104 | return False 105 | 106 | def make_fresh_krun_user(self): 107 | pass 108 | 109 | def get_num_temperature_sensors(self): 110 | return 1 111 | 112 | def _read_throttle_counts(self): 113 | return {} 114 | 115 | 116 | @pytest.fixture 117 | def mock_platform(): 118 | return MockPlatform(MockMailer(), Config()) 119 | 120 | 121 | class MockManifestManager(object): 122 | """For tests which need a manifest, but you don't want a file on-disk or a 123 | config instance""" 124 | 125 | def __init__(self): 126 | self.num_mails_sent = 0 127 | 128 | @pytest.fixture 129 | def mock_manifest(): 130 | return MockManifestManager() 131 | -------------------------------------------------------------------------------- /krun/tests/more_complicated.krun: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from krun.vm_defs import (PythonVMDef, LuaVMDef, JavaVMDef, GraalVMDef, 4 | PHPVMDef, TruffleRubyVMDef, V8VMDef, NativeCodeVMDef, 5 | find_internal_jvmci_java_bin, PyPyVMDef) 6 | from krun import EntryPoint 7 | 8 | MAIL_TO = [] 9 | MAX_MAILS = 2 10 | 11 | JDK8_HOME = "dummy" 12 | JDK8_BIN = os.path.join(JDK8_HOME, "bin", "java") 13 | 14 | HEAP_LIMIT = 2097152 15 | STACK_LIMIT = 8192 16 | 17 | VARIANTS = { 18 | "default-c": EntryPoint("bench.so", subdir="c"), 19 | "default-java": EntryPoint("KrunEntry", subdir="java"), 20 | "default-lua": EntryPoint("bench.lua", subdir="lua"), 21 | "default-python": EntryPoint("bench.py", subdir="python"), 22 | "default-php": EntryPoint("bench.php", subdir="php"), 23 | "default-ruby": EntryPoint("bench.rb", subdir="ruby"), 24 | "default-javascript": EntryPoint("bench.js", subdir="javascript"), 25 | } 26 | 27 | ITERATIONS_ALL_VMS = 2000 28 | 29 | VMS = { 30 | 'C': { 31 | 'vm_def': NativeCodeVMDef(), 32 | 'variants': ['default-c'], 33 | 'n_iterations': ITERATIONS_ALL_VMS, 34 | 35 | }, 36 | 'PyPy': { 37 | 'vm_def': PyPyVMDef('work/pypy/pypy/goal/pypy-c'), 38 | 'variants': ['default-python'], 39 | 'n_iterations': ITERATIONS_ALL_VMS, 40 | }, 41 | 'Hotspot': { 42 | 'vm_def': JavaVMDef(JDK8_BIN), 43 | 'variants': ['default-java'], 44 | 'n_iterations': ITERATIONS_ALL_VMS, 45 | }, 46 | 'LuaJIT': { 47 | 'vm_def': LuaVMDef('work/luajit/src/luajit'), 48 | 'variants': ['default-lua'], 49 | 'n_iterations': ITERATIONS_ALL_VMS, 50 | }, 51 | 'V8': { 52 | 'vm_def': V8VMDef('work/v8/out/native/d8'), 53 | 'variants': ['default-javascript'], 54 | 'n_iterations': ITERATIONS_ALL_VMS, 55 | }, 56 | 'CPython': { 57 | 'vm_def': PythonVMDef('work/cpython-inst/bin/python'), 58 | 'variants': ['default-python'], 59 | 'n_iterations': ITERATIONS_ALL_VMS, 60 | }, 61 | 'Graal': { 62 | 'vm_def': "dummy", 63 | 'variants': ['default-java'], 64 | 'n_iterations': ITERATIONS_ALL_VMS, 65 | }, 66 | 'HHVM': { 67 | 'vm_def': PHPVMDef('work/hhvm/hphp/hhvm/php'), 68 | 'variants': ['default-php'], 69 | 'n_iterations': ITERATIONS_ALL_VMS, 70 | }, 71 | 'TruffleRuby' : { 72 | 'vm_def': TruffleRubyVMDef(truffleruby_dir='work/truffleruby', 73 | graal_home='work/graal', 74 | mx_dir='work/mx', 75 | jvmci_home='work/graal-jvmci-8/jdk1.8.0/product'), 76 | 'variants': ['default-ruby'], 77 | 'n_iterations': ITERATIONS_ALL_VMS, 78 | }, 79 | } 80 | 81 | 82 | BENCHMARKS = { 83 | 'binarytrees': 25, 84 | 'richards': 500, 85 | 'spectralnorm': 3, 86 | 'nbody': 15, 87 | 'fasta': 100, 88 | 'fannkuch_redux': 200, 89 | } 90 | 91 | SKIP= [ 92 | "fasta:TruffleRuby:default-ruby", 93 | "richards:HHVM:default-php", 94 | "spectralnorm:TruffleRuby:default-ruby", 95 | "*:CPython:*", 96 | ] 97 | 98 | N_EXECUTIONS = 2 99 | -------------------------------------------------------------------------------- /krun/tests/one_exec.krun: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from krun.vm_defs import (PythonVMDef, JavaVMDef) 4 | from krun import EntryPoint 5 | 6 | HEAP_LIMIT = 2097152 7 | STACK_LIMIT = 8192 8 | 9 | VARIANTS = {"default-python": EntryPoint("bench.py", subdir="python")} 10 | 11 | VMS = { 12 | 'CPython': { 13 | 'vm_def': PythonVMDef(sys.executable), 14 | 'variants': ['default-python'], 15 | 'n_iterations': 3, 16 | } 17 | } 18 | 19 | BENCHMARKS = { 20 | 'dummy': 1000, 21 | } 22 | 23 | N_EXECUTIONS = 1 24 | TEMP_READ_PAUSE = 1 25 | -------------------------------------------------------------------------------- /krun/tests/quick.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import (PythonVMDef, JavaVMDef) 3 | from krun import EntryPoint 4 | 5 | # Who to mail 6 | MAIL_TO = [] 7 | 8 | # Maximum number of error emails to send per-run 9 | #MAX_MAILS = 2 10 | 11 | DIR = os.getcwd() 12 | JKRUNTIME_DIR = os.path.join(DIR, "krun", "libkruntime", "") 13 | 14 | HEAP_LIMIT = 2097152 15 | STACK_LIMIT = 8192 16 | 17 | # Variant name -> EntryPoint 18 | VARIANTS = { 19 | "default-java": EntryPoint("KrunEntry", subdir="java"), 20 | "default-python": EntryPoint("bench.py", subdir="python"), 21 | } 22 | 23 | ITERATIONS_ALL_VMS = 1 # Small number for testing. 24 | 25 | VMS = { 26 | 'Java': { 27 | 'vm_def': JavaVMDef('/usr/bin/java'), 28 | 'variants': ['default-java'], 29 | 'n_iterations': ITERATIONS_ALL_VMS, 30 | }, 31 | 'CPython': { 32 | 'vm_def': PythonVMDef('/usr/bin/python2'), 33 | 'variants': ['default-python'], 34 | 'n_iterations': ITERATIONS_ALL_VMS, 35 | } 36 | } 37 | 38 | 39 | BENCHMARKS = { 40 | 'dummy': 1000, 41 | 'nbody': 1000, 42 | } 43 | 44 | # list of "bench:vm:variant" 45 | SKIP=[ 46 | #"*:CPython:*", 47 | #"*:Java:*", 48 | ] 49 | 50 | N_EXECUTIONS = 1 # Number of fresh processes. 51 | -------------------------------------------------------------------------------- /krun/tests/quick_results.json.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/softdevteam/krun/a0c8e5bfb91d192695df63a500be96b7e6764491/krun/tests/quick_results.json.bz2 -------------------------------------------------------------------------------- /krun/tests/skips.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import (PythonVMDef, JavaVMDef) 3 | from krun import EntryPoint 4 | 5 | # Who to mail 6 | MAIL_TO = [] 7 | 8 | # Maximum number of error emails to send per-run 9 | #MAX_MAILS = 2 10 | 11 | DIR = os.getcwd() 12 | JKRUNTIME_DIR = os.path.join(DIR, "krun", "libkruntime", "") 13 | 14 | HEAP_LIMIT = 2097152 15 | STACK_LIMIT = 8192 16 | 17 | # Variant name -> EntryPoint 18 | VARIANTS = { 19 | "default-java": EntryPoint("KrunEntry", subdir="java"), 20 | "default-python": EntryPoint("bench.py", subdir="python"), 21 | } 22 | 23 | ITERATIONS_ALL_VMS = 1 # Small number for testing. 24 | 25 | VMS = { 26 | 'Java': { 27 | 'vm_def': JavaVMDef('/usr/bin/java'), 28 | 'variants': ['default-java'], 29 | 'n_iterations': ITERATIONS_ALL_VMS, 30 | }, 31 | 'CPython': { 32 | 'vm_def': PythonVMDef('/usr/bin/python2'), 33 | 'variants': ['default-python'], 34 | 'n_iterations': ITERATIONS_ALL_VMS, 35 | } 36 | } 37 | 38 | 39 | BENCHMARKS = { 40 | 'dummy': 1000, 41 | 'nbody': 1000, 42 | } 43 | 44 | # list of "bench:vm:variant" 45 | SKIP=[ 46 | "*:PyPy:*", 47 | "*:CPython:*", 48 | "*:Hotspot:*", 49 | "*:Graal:*", 50 | "*:LuaJIT:*", 51 | "*:HHVM:*", 52 | "*:TruffleRuby:*", 53 | "*:V8:*", 54 | ] 55 | 56 | N_EXECUTIONS = 1 # Number of fresh processes. 57 | -------------------------------------------------------------------------------- /krun/tests/space_in_benchmark_name.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import PythonVMDef 3 | from krun import EntryPoint 4 | 5 | VARIANTS = { 6 | "default-python": EntryPoint("bench.py", subdir="python"), 7 | } 8 | 9 | ITERATIONS_ALL_VMS = 5 10 | 11 | VMS = { 12 | 'CPython': { 13 | 'vm_def': PythonVMDef('/usr/bin/python2'), 14 | 'variants': ['default-python'], 15 | 'n_iterations': ITERATIONS_ALL_VMS, 16 | } 17 | } 18 | 19 | 20 | BENCHMARKS = { 21 | 'dummy benchmark': 1000, 22 | } 23 | 24 | N_EXECUTIONS = 2 25 | 26 | HEAP_LIMIT = 2097152 27 | STACK_LIMIT = 8192 28 | -------------------------------------------------------------------------------- /krun/tests/space_in_variant_name.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import PythonVMDef 3 | from krun import EntryPoint 4 | 5 | VARIANTS = { 6 | "default-python 123": EntryPoint("bench.py", subdir="python"), 7 | } 8 | 9 | ITERATIONS_ALL_VMS = 5 10 | 11 | VMS = { 12 | 'CPython': { 13 | 'vm_def': PythonVMDef('/usr/bin/python2'), 14 | 'variants': ['default-python 123'], 15 | 'n_iterations': ITERATIONS_ALL_VMS, 16 | } 17 | } 18 | 19 | 20 | BENCHMARKS = { 21 | 'dummy': 1000, 22 | } 23 | 24 | N_EXECUTIONS = 2 25 | 26 | HEAP_LIMIT = 2097152 27 | STACK_LIMIT = 8192 28 | -------------------------------------------------------------------------------- /krun/tests/space_in_vm_name.krun: -------------------------------------------------------------------------------- 1 | import os 2 | from krun.vm_defs import PythonVMDef 3 | from krun import EntryPoint 4 | 5 | VARIANTS = { 6 | "default-python": EntryPoint("bench.py", subdir="python"), 7 | } 8 | 9 | ITERATIONS_ALL_VMS = 5 10 | 11 | VMS = { 12 | 'C Python': { 13 | 'vm_def': PythonVMDef('/usr/bin/python2'), 14 | 'variants': ['default-python'], 15 | 'n_iterations': ITERATIONS_ALL_VMS, 16 | } 17 | } 18 | 19 | 20 | BENCHMARKS = { 21 | 'dummy': 1000, 22 | } 23 | 24 | N_EXECUTIONS = 2 25 | 26 | HEAP_LIMIT = 2097152 27 | STACK_LIMIT = 8192 28 | -------------------------------------------------------------------------------- /krun/tests/test_amperf.py: -------------------------------------------------------------------------------- 1 | from krun.amperf import check_core_amperf_ratios 2 | 3 | 4 | GHZ_3_6 = 3.6 * 1024 * 1024 * 1024 # 3.6 GHz 5 | 6 | 7 | def test_ok_ratios0001(): 8 | """Check ratios which are all 1.0 are OK""" 9 | 10 | busy_threshold = GHZ_3_6 / 1000 11 | ratio_bounds = 0.999, 1.001 12 | 13 | aperfs = mperfs = [GHZ_3_6 for x in xrange(2000)] 14 | wcts = [1.0 for x in xrange(2000)] 15 | 16 | ratios = check_core_amperf_ratios( 17 | 0, aperfs, mperfs, wcts, busy_threshold, ratio_bounds) 18 | 19 | assert all([r == 1.0 for r in ratios.vals]) 20 | assert ratios.ok() 21 | 22 | 23 | def test_ok_ratios0002(): 24 | """Check normalisation by time is working""" 25 | 26 | busy_threshold = GHZ_3_6 / 1000 27 | ratio_bounds = 0.999, 1.001 28 | 29 | aperfs = mperfs = [GHZ_3_6 / 2.0 for x in xrange(2000)] 30 | wcts = [0.5 for x in xrange(2000)] 31 | 32 | ratios = check_core_amperf_ratios( 33 | 0, aperfs, mperfs, wcts, busy_threshold, ratio_bounds) 34 | 35 | assert all([r == 1.0 for r in ratios.vals]) 36 | assert ratios.ok() 37 | 38 | 39 | def test_bad_ratios0001(): 40 | """Check throttle problems are detected""" 41 | 42 | busy_threshold = GHZ_3_6 / 1000 43 | ratio_bounds = 0.9, 1.1 44 | 45 | aperfs = [GHZ_3_6 for x in xrange(2000)] 46 | mperfs = aperfs[:] 47 | wcts = [1.0 for x in xrange(2000)] 48 | aperfs[501] = GHZ_3_6 / 4 49 | 50 | ratios = check_core_amperf_ratios( 51 | 0, aperfs, mperfs, wcts, busy_threshold, ratio_bounds) 52 | 53 | assert not all([r == 1.0 for r in ratios.vals]) 54 | assert not ratios.ok() 55 | assert ratios.violations["throttle"] == [501] 56 | 57 | 58 | def test_bad_ratios0002(): 59 | """Check turbo problems are detected""" 60 | 61 | busy_threshold = GHZ_3_6 / 1000 62 | ratio_bounds = 0.9, 1.1 63 | 64 | aperfs = [GHZ_3_6 for x in xrange(2000)] 65 | mperfs = aperfs[:] 66 | wcts = [1.0 for x in xrange(2000)] 67 | aperfs[666] = GHZ_3_6 * 1.25 68 | 69 | ratios= check_core_amperf_ratios( 70 | 0, aperfs, mperfs, wcts, busy_threshold, ratio_bounds) 71 | 72 | assert not all([r == 1.0 for r in ratios.vals]) 73 | assert not ratios.ok() 74 | assert ratios.violations["turbo"] == [666] 75 | 76 | 77 | def test_bad_ratios0003(): 78 | """Check a mix of problems are detected""" 79 | 80 | busy_threshold = GHZ_3_6 / 1000 81 | ratio_bounds = 0.9, 1.1 82 | 83 | aperfs = [GHZ_3_6 for x in xrange(2000)] 84 | mperfs = aperfs[:] 85 | wcts = [1.0 for x in xrange(2000)] 86 | 87 | # Mixed bag of problems here 88 | aperfs[14] = GHZ_3_6 * 0.77 # throttle 89 | mperfs[307] = GHZ_3_6 * 0.8 # turbo 90 | aperfs[788] = GHZ_3_6 * 1.15 # turbo 91 | aperfs[1027] = GHZ_3_6 * 0.62 # throttle 92 | mperfs[1027] = GHZ_3_6 * 0.84 # ^^^^^^^^ 93 | 94 | ratios = check_core_amperf_ratios( 95 | 0, aperfs, mperfs, wcts, busy_threshold, ratio_bounds) 96 | 97 | assert not all([r == 1.0 for r in ratios.vals]) 98 | assert not ratios.ok() 99 | assert ratios.violations["turbo"] == [307, 788] 100 | assert ratios.violations["throttle"] == [14, 1027] 101 | -------------------------------------------------------------------------------- /krun/tests/test_audit.py: -------------------------------------------------------------------------------- 1 | from krun.audit import Audit 2 | 3 | 4 | def test_eq(): 5 | audit = Audit({"a": 100, "b": 200}) 6 | empty = Audit(dict()) 7 | assert audit == audit 8 | assert empty == empty 9 | assert not empty == audit 10 | assert not list() == audit 11 | a0 = {"uname": "Linux"} 12 | a1 = {"uname": "BSD"} 13 | assert not a0 == a1 14 | assert a0 == a0 15 | assert a1 == a1 16 | 17 | 18 | def test_get_set_item(): 19 | audit = Audit({"a": 100, "b": 200}) 20 | empty = Audit(dict()) 21 | assert audit["a"] == 100 22 | assert audit["b"] == 200 23 | empty["a"] = 100 24 | empty["b"] = 200 25 | assert audit == empty 26 | 27 | 28 | def test_contains(): 29 | audit = Audit({"a": 100, "b": 200}) 30 | assert "a" in audit 31 | assert not "c" in audit 32 | 33 | 34 | def test_property(): 35 | audit = Audit({"a": 100, "b": 200}) 36 | assert audit.audit == {"a": 100, "b": 200} 37 | empty = Audit(dict()) 38 | empty.audit = {"a": 100, "b": 200} 39 | assert empty == audit 40 | 41 | 42 | def test_unicode(): 43 | audit = Audit({"a": 100, "b": 200}) 44 | spacer = "#" * 78 45 | expected = "Audit Section: a\n" 46 | expected += spacer + u"\n\n" 47 | expected += "100\n\n" 48 | expected += "Audit Section: b\n" 49 | expected += spacer + "\n\n" 50 | expected += "200\n\n" 51 | assert unicode(expected) == unicode(audit) 52 | -------------------------------------------------------------------------------- /krun/tests/test_config.py: -------------------------------------------------------------------------------- 1 | from krun import LOGFILE_FILENAME_TIME_FORMAT 2 | from krun.config import Config 3 | from krun.env import EnvChangeAppend 4 | from krun.util import FatalKrunError 5 | 6 | import os 7 | import pytest 8 | import time 9 | import sys 10 | import krun.platform 11 | from distutils.spawn import find_executable 12 | 13 | 14 | JAVA = find_executable("java") 15 | from krun.tests import TEST_DIR 16 | 17 | def touch(fname): 18 | with open(fname, 'a'): 19 | os.utime(fname, None) 20 | 21 | 22 | def test_str(): 23 | path = os.path.join(TEST_DIR, "example.krun") 24 | config = Config(path) 25 | assert config.text == str(config) 26 | 27 | def test_eq(): 28 | path = os.path.join(TEST_DIR, "example.krun") 29 | example_config = Config(path) 30 | assert example_config == example_config 31 | assert not example_config == None 32 | assert not example_config == Config("krun/tests/quick.krun") 33 | 34 | 35 | def test_log_filename0001(): 36 | path = os.path.join(TEST_DIR, "example.krun") 37 | example_config = Config(path) 38 | expect_path = os.path.join(TEST_DIR, "example.log") 39 | assert example_config.log_filename(False) == expect_path 40 | 41 | 42 | def test_read_config_from_file(): 43 | path = os.path.join(TEST_DIR, "example.krun") 44 | config0 = Config(path) 45 | config1 = Config(None) 46 | config1.read_from_file(path) 47 | assert config0 == config1 48 | 49 | 50 | def test_check_config_consistency(): 51 | path = os.path.join(TEST_DIR, "example.krun") 52 | config = Config(path) 53 | with open(path) as fp: 54 | config_string = fp.read() 55 | config.check_config_consistency(config_string, "fakefilename") 56 | 57 | def test_check_config_consistency_fails(): 58 | path = os.path.join(TEST_DIR, "example.krun") 59 | config = Config(path) 60 | with open(path) as fp: 61 | config_string = fp.read() 62 | with pytest.raises(Exception) as excinfo: 63 | config.check_config_consistency(config_string + "\n# different config!", 64 | "fakefilename") 65 | assert "+# different config!" in excinfo.value.args[0] 66 | 67 | @pytest.mark.skipif(JAVA is None, reason="No Java found") 68 | def test_config_init(): 69 | path = os.path.join(TEST_DIR, "example.krun") 70 | config = Config(path) 71 | assert config is not None 72 | assert config.BENCHMARKS == {"dummy": 1000, "nbody": 1000} 73 | assert config.N_EXECUTIONS == 2 74 | assert config.SKIP == [] 75 | assert config.MAIL_TO == [] 76 | assert config.ITERATIONS_ALL_VMS == 5 77 | assert config.HEAP_LIMIT == 2097152 78 | 79 | 80 | def test_read_corrupt_config(): 81 | path = os.path.join(TEST_DIR, "corrupt.krun") 82 | with pytest.raises(Exception): 83 | Config(path) 84 | 85 | 86 | def test_results_filename(): 87 | example = os.path.join(TEST_DIR, "example.krun") 88 | touch(example) 89 | example_config = Config(example) 90 | # not exact match due to absolute path 91 | assert example_config.results_filename().endswith("example_results.json.bz2") 92 | 93 | 94 | def test_skip0001(): 95 | path = os.path.join(TEST_DIR, "skips.krun") 96 | config = Config(path) 97 | expected = ["*:PyPy:*", 98 | "*:CPython:*", 99 | "*:Hotspot:*", 100 | "*:Graal:*", 101 | "*:LuaJIT:*", 102 | "*:HHVM:*", 103 | "*:TruffleRuby:*", 104 | "*:V8:*", 105 | ] 106 | for triplet in expected: 107 | assert config.should_skip(triplet) 108 | assert config.should_skip("nbody:HHVM:default-php") 109 | assert not config.should_skip("nbody:MYVM:default-php") 110 | 111 | 112 | def test_skip0002(): 113 | config = Config() 114 | config.SKIP = ["mybench:CPython:default-python"] 115 | 116 | assert config.should_skip("mybench:CPython:default-python") 117 | assert not config.should_skip("myotherbench:CPython:default-python") 118 | assert not config.should_skip("mybench:PyPy:default-python") 119 | assert not config.should_skip("mybench:CPython:special-python") 120 | 121 | 122 | def test_skip0003(): 123 | config = Config() 124 | config.SKIP = ["*:CPython:default-python"] 125 | 126 | assert config.should_skip("mybench:CPython:default-python") 127 | assert config.should_skip("myotherbench:CPython:default-python") 128 | assert not config.should_skip("mybench:PyPy:default-python") 129 | assert not config.should_skip("mybench:CPython:special-python") 130 | 131 | 132 | def test_skip0004(): 133 | config = Config() 134 | config.SKIP = ["mybench:*:default-python"] 135 | 136 | assert config.should_skip("mybench:CPython:default-python") 137 | assert not config.should_skip("myotherbench:CPython:default-python") 138 | assert config.should_skip("mybench:PyPy:default-python") 139 | assert not config.should_skip("mybench:CPython:special-python") 140 | 141 | def test_skip0005(): 142 | config = Config() 143 | config.SKIP = ["mybench:CPython:*"] 144 | 145 | assert config.should_skip("mybench:CPython:default-python") 146 | assert not config.should_skip("myotherbench:CPython:default-python") 147 | assert not config.should_skip("mybench:PyPy:default-python") 148 | assert config.should_skip("mybench:CPython:special-python") 149 | 150 | 151 | def test_skip0006(): 152 | config = Config() 153 | config.SKIP = ["*:*:*"] 154 | 155 | assert config.should_skip("mybench:CPython:default-python") 156 | assert config.should_skip("myotherbench:CPython:default-python") 157 | assert config.should_skip("mybench:PyPy:default-python") 158 | assert config.should_skip("mybench:CPython:special-python") 159 | 160 | 161 | def test_skip0007(): 162 | config = Config() 163 | 164 | with pytest.raises(ValueError) as e: 165 | config.should_skip("wobble") 166 | 167 | assert e.value.args[0] == "bad benchmark key: wobble" 168 | 169 | def test_skip0008(): 170 | config = Config() 171 | config.SKIP = ["*:SomeVM:*", "fasta:TruffleRuby:default-ruby"] 172 | 173 | assert config.should_skip("fasta:TruffleRuby:default-ruby") 174 | 175 | def test_skip0009(): 176 | config = Config() 177 | config.SKIP = ["*:SomeVM:*", 178 | "fasta:TruffleRuby:default-ruby", 179 | "bench:*:*", 180 | "bench:vm:skipvariant", 181 | "*:*:skipvariant", 182 | ] 183 | 184 | assert config.should_skip("fasta:TruffleRuby:default-ruby") 185 | assert not config.should_skip("fasta:TruffleRuby:default-ruby2") 186 | assert config.should_skip("bench:lala:hihi") 187 | assert config.should_skip("bench:lala:hihi2") 188 | assert not config.should_skip("bench1:lala:hihi") 189 | assert config.should_skip("bench1:lala:skipvariant") 190 | assert config.should_skip("bench1:lala2:skipvariant") 191 | 192 | def test_skip0010(): 193 | config = Config() 194 | config.SKIP = ["*:SomeVM:*", 195 | "fasta:TruffleRuby:default-ruby", 196 | "bench:*:*", 197 | "bench:vm:skipvariant", 198 | "*:*:skipvariant", 199 | "*:*:*", # everything should be skipped due to this 200 | ] 201 | 202 | import uuid 203 | def rand_str(): 204 | return uuid.uuid4().hex 205 | 206 | for i in xrange(25): 207 | key = "%s:%s:%s" % tuple([rand_str() for x in xrange(3)]) 208 | assert config.should_skip(key) 209 | 210 | def test_temp_read_pause0001(): 211 | config = Config() 212 | assert config.TEMP_READ_PAUSE == 60 # default 213 | 214 | def test_temp_read_pause0002(): 215 | config = Config(os.path.join(TEST_DIR, "example.krun")) 216 | assert config.TEMP_READ_PAUSE == 1 217 | 218 | 219 | def test_user_env0001(): 220 | config = Config(os.path.join(TEST_DIR, "env.krun")) 221 | vm_def = config.VMS["CPython"]["vm_def"] 222 | 223 | env = {} 224 | vm_def.apply_env_changes([], env) 225 | assert env == { 226 | 'ANOTHER_ENV': 'arbitrary_user_val', 227 | 'LD_LIBRARY_PATH': '/wibble/lib', 228 | } 229 | 230 | 231 | def test_user_env0002(): 232 | config = Config(os.path.join(TEST_DIR, "env.krun")) 233 | vm_def = config.VMS["CPython"]["vm_def"] 234 | 235 | env = {"LD_LIBRARY_PATH": "zzz"} 236 | vm_def.apply_env_changes([], env) 237 | assert env == { 238 | 'ANOTHER_ENV': 'arbitrary_user_val', 239 | 'LD_LIBRARY_PATH': 'zzz:/wibble/lib', 240 | } 241 | 242 | 243 | def test_user_env0003(): 244 | config = Config(os.path.join(TEST_DIR, "env.krun")) 245 | vm_def = config.VMS["CPython"]["vm_def"] 246 | 247 | env = {"LD_LIBRARY_PATH": "zzz"} 248 | 249 | bench_env_changes = [EnvChangeAppend("LD_LIBRARY_PATH", "abc")] 250 | vm_def.apply_env_changes(bench_env_changes, env) 251 | assert env == { 252 | 'ANOTHER_ENV': 'arbitrary_user_val', 253 | 'LD_LIBRARY_PATH': 'zzz:/wibble/lib:abc', 254 | } 255 | 256 | def test_user_env0004(): 257 | """Interesting case as PyPy forces a lib path at the VM level""" 258 | 259 | config = Config(os.path.join(TEST_DIR, "env.krun")) 260 | vm_def = config.VMS["PyPy"]["vm_def"] 261 | 262 | env = {} 263 | 264 | vm_def.apply_env_changes([], env) 265 | # Expect the user's env to come first 266 | assert env == { 267 | 'ANOTHER_ENV': 'arbitrary_user_val', 268 | 'LD_LIBRARY_PATH': '/wibble/lib:/opt/pypy/pypy/goal', 269 | } 270 | 271 | def test_space_in_vm_name0001(): 272 | path = os.path.join(TEST_DIR, "space_in_vm_name.krun") 273 | with pytest.raises(FatalKrunError) as e: 274 | config = Config(path) 275 | assert "VM names must not contain spaces" in str(e) 276 | 277 | 278 | def test_space_in_benchmark_name0001(): 279 | path = os.path.join(TEST_DIR, "space_in_benchmark_name.krun") 280 | with pytest.raises(FatalKrunError) as e: 281 | config = Config(path) 282 | assert "Benchmark names must not contain spaces" in str(e) 283 | 284 | 285 | def test_space_in_variant_name0001(): 286 | path = os.path.join(TEST_DIR, "space_in_variant_name.krun") 287 | with pytest.raises(FatalKrunError) as e: 288 | config = Config(path) 289 | assert "Variant names must not contain spaces" in str(e) 290 | 291 | 292 | def test_custom_dmesg_whitelist0001(monkeypatch): 293 | """Test a config file that appends two patterns to the default whitelist""" 294 | 295 | path = os.path.join(TEST_DIR, "custom_dmesg_whitelist0001.krun") 296 | config = Config(path) 297 | platform = krun.platform.detect_platform(None, config) 298 | patterns = [p.pattern for p in platform.get_dmesg_whitelist()] 299 | assert patterns == platform.default_dmesg_whitelist() + \ 300 | ["^custom1*", "^.custom2$"] 301 | 302 | 303 | def test_custom_dmesg_whitelist0002(monkeypatch): 304 | """Test a config file that replaces entirely the dmesg whitelist""" 305 | 306 | path = os.path.join(TEST_DIR, "custom_dmesg_whitelist0002.krun") 307 | config = Config(path) 308 | platform = krun.platform.detect_platform(None, config) 309 | patterns = [p.pattern for p in platform.get_dmesg_whitelist()] 310 | assert patterns == ["^.no+", "^defaults", "^here+"] 311 | 312 | 313 | def test_custom_dmesg_whitelist0003(monkeypatch): 314 | """Test a config file that uses no custom whitelist""" 315 | 316 | path = os.path.join(TEST_DIR, "example.krun") 317 | config = Config(path) 318 | platform = krun.platform.detect_platform(None, config) 319 | patterns = [p.pattern for p in platform.get_dmesg_whitelist()] 320 | assert patterns == platform.default_dmesg_whitelist() 321 | -------------------------------------------------------------------------------- /krun/tests/test_entry_point.py: -------------------------------------------------------------------------------- 1 | from krun import EntryPoint 2 | 3 | 4 | def test_eq(): 5 | ep0 = EntryPoint("test0", subdir="/root/") 6 | ep1 = EntryPoint("test1", subdir="/home/krun/") 7 | assert ep0 == ep0 8 | assert ep1 == ep1 9 | assert not ep0 == ep1 10 | assert not ep0 == list() 11 | -------------------------------------------------------------------------------- /krun/tests/test_env.py: -------------------------------------------------------------------------------- 1 | from krun.env import EnvChangeSet, EnvChangeAppend 2 | from krun.util import FatalKrunError 3 | 4 | import os 5 | import pytest 6 | 7 | 8 | def test_env_change_set(monkeypatch, caplog): 9 | env = EnvChangeSet("bach", 1685) 10 | assert env.var == "bach" 11 | assert env.val == 1685 12 | with pytest.raises(FatalKrunError): 13 | env.apply({"bach": 1695}) 14 | assert "Environment bach is already defined" in caplog.text 15 | 16 | 17 | def test_env_change_set_apply(): 18 | env = EnvChangeSet("bach", 1685) 19 | my_dict = {"handel": 1685} 20 | env.apply(my_dict) 21 | assert my_dict["bach"] == 1685 22 | assert my_dict["handel"] == 1685 23 | 24 | 25 | def test_env_change_append(): 26 | env = EnvChangeAppend("bach", 1685) 27 | assert env.var == "bach" 28 | assert env.val == 1685 29 | my_dict0 = {"handel": 1685} 30 | env.apply(my_dict0) 31 | assert my_dict0["bach"] == 1685 32 | assert my_dict0["handel"] == 1685 33 | my_dict1 = {"bach": 1750, "handel": 1759} 34 | env.apply(my_dict1) 35 | assert my_dict1["bach"] == "1750" + os.pathsep + "1685" 36 | assert my_dict1["handel"] == 1759 37 | 38 | 39 | def test_env_apply_all(): 40 | env0 = EnvChangeSet("bach", 1685) 41 | env1 = EnvChangeSet("handel", 1685) 42 | env2 = EnvChangeSet("vivaldi", 1678) 43 | assert env0.var == "bach" 44 | assert env0.val == 1685 45 | dict0 = {} 46 | dict1 = {"handel": 1685, "vivaldi": 1678, "bach": 1685} 47 | env0.apply_all((env0, env1, env2), dict0) 48 | assert dict0 == dict1 49 | -------------------------------------------------------------------------------- /krun/tests/test_genericplatform.py: -------------------------------------------------------------------------------- 1 | from krun.tests import BaseKrunTest 2 | from krun.util import FatalKrunError 3 | from krun.platform import BasePlatform 4 | from krun.tests.mocks import mock_manifest 5 | import pytest 6 | import re 7 | 8 | class TestGenericPlatform(BaseKrunTest): 9 | """Platform tests that can be run on any platform""" 10 | 11 | def test_temperature_thresholds0001(self, mock_platform, monkeypatch, caplog): 12 | temps = {"x": 30.0, "y": 12.34, "z": 666.0} 13 | mock_platform.temp_sensors = ["x", "y", "z"] 14 | mock_platform.starting_temperatures = temps 15 | 16 | def mock_take_temperature_readings(): 17 | # a little hotter than we started 18 | return {name: temp + 1 for name, temp in temps.iteritems()} 19 | monkeypatch.setattr(mock_platform, "take_temperature_readings", 20 | mock_take_temperature_readings) 21 | 22 | mock_platform.wait_for_temperature_sensors(testing=True) 23 | # should exit without crashing, no assert. 24 | 25 | def test_temperature_thresholds0002(self, mock_platform, monkeypatch, caplog): 26 | temps = {"x": 30.0} 27 | mock_platform.temp_sensors = ["x"] 28 | mock_platform.starting_temperatures = temps 29 | 30 | def mock_take_temperature_readings(): 31 | return {"x": 999} # system on fire 32 | monkeypatch.setattr(mock_platform, "take_temperature_readings", 33 | mock_take_temperature_readings) 34 | 35 | with pytest.raises(FatalKrunError): 36 | mock_platform.wait_for_temperature_sensors(testing=True) 37 | 38 | expect = ("Temperature timeout: Temperature reading 'x' not " 39 | "within interval: (27 <= 999 <= 33)") 40 | assert expect in caplog.text 41 | 42 | def test_temperature_thresholds0003(self, mock_platform, monkeypatch, caplog): 43 | temps = {"x": 30.0} 44 | mock_platform.temp_sensors = ["x"] 45 | mock_platform.starting_temperatures = temps 46 | 47 | def mock_take_temperature_readings(): 48 | return {"x": -999} # system in the arctic 49 | monkeypatch.setattr(mock_platform, "take_temperature_readings", 50 | mock_take_temperature_readings) 51 | 52 | with pytest.raises(FatalKrunError): 53 | mock_platform.wait_for_temperature_sensors(testing=True) 54 | 55 | expect = ("Temperature timeout: Temperature reading 'x' not " 56 | "within interval: (27 <= -999 <= 33)") 57 | assert expect in caplog.text 58 | 59 | def test_temperature_thresholds0004(self, mock_platform, monkeypatch, caplog): 60 | temps = {"x": 30.0} 61 | mock_platform.temp_sensors = ["x"] 62 | mock_platform.starting_temperatures = temps 63 | 64 | def mock_take_temperature_readings(): 65 | return {"x": 999} # system on fire 66 | monkeypatch.setattr(mock_platform, "take_temperature_readings", 67 | mock_take_temperature_readings) 68 | 69 | flag, _ = mock_platform.temp_sensors_within_interval() 70 | assert flag == BasePlatform.TEMP_TOO_HOT 71 | 72 | def test_temperature_thresholds0005(self, mock_platform, monkeypatch, caplog): 73 | temps = {"x": 30.0} 74 | mock_platform.temp_sensors = ["x"] 75 | mock_platform.starting_temperatures = temps 76 | 77 | def mock_take_temperature_readings(): 78 | return {"x": -999} # system in the arctic again 79 | monkeypatch.setattr(mock_platform, "take_temperature_readings", 80 | mock_take_temperature_readings) 81 | 82 | flag, _ = mock_platform.temp_sensors_within_interval() 83 | assert flag == BasePlatform.TEMP_TOO_COLD 84 | 85 | def test_temperature_thresholds0006(self, mock_platform, monkeypatch, caplog): 86 | temps = {"x": 30.0} 87 | mock_platform.temp_sensors = ["x"] 88 | mock_platform.starting_temperatures = temps 89 | 90 | def mock_take_temperature_readings(): 91 | return {"x": 31} # almost spot on 92 | monkeypatch.setattr(mock_platform, "take_temperature_readings", 93 | mock_take_temperature_readings) 94 | 95 | flag, _ = mock_platform.temp_sensors_within_interval() 96 | assert flag == BasePlatform.TEMP_OK 97 | 98 | def test_inconsistent_sensors0001(self, platform, caplog): 99 | # The platform has already detected the available sensors. Now we 100 | # confuse it, by moving a sensor. This shouldn't happen of course, but 101 | # tests are good nevertheless. 102 | 103 | if not platform.temp_sensors: 104 | pytest.skip("no temperature sensors") 105 | 106 | platform.temp_sensors[0] += "_moved" 107 | 108 | with pytest.raises(FatalKrunError): 109 | platform.take_temperature_readings() 110 | 111 | expect = "Failed to read sensor" 112 | assert expect in caplog.text 113 | 114 | 115 | def test_inconsistent_sensors0002(self, platform, caplog): 116 | platform.temp_sensors = ["different", "sensors"] 117 | 118 | with pytest.raises(FatalKrunError): 119 | platform.starting_temperatures = {"a": 1000, "b": 2000} 120 | 121 | expect = "Inconsistent sensors. ['a', 'b'] vs ['different', 'sensors']" 122 | assert expect in caplog.text 123 | 124 | def test_inconsistent_sensors0003(self, platform, caplog): 125 | platform.temp_sensors = ["a"] 126 | 127 | with pytest.raises(FatalKrunError): 128 | platform.starting_temperatures = {"a": 1000, "b": 2000} 129 | 130 | expect = "Inconsistent sensors. ['a', 'b'] vs ['a']" 131 | assert expect in caplog.text 132 | 133 | def test_dmesg_filter0001(self, mock_platform, caplog, mock_manifest): 134 | last_dmesg = ["line1", "line2"] 135 | new_dmesg = ["line1", "line2", "line3"] 136 | 137 | # this should indicate change 138 | assert mock_platform._check_dmesg_for_changes( 139 | [], last_dmesg, new_dmesg, mock_manifest) 140 | 141 | # and the log will indicate this also 142 | assert "New dmesg lines" in caplog.text 143 | assert "\nline3" in caplog.text 144 | 145 | def test_dmesg_filter0002(self, mock_platform, caplog, mock_manifest): 146 | last_dmesg = ["line1", "line2"] 147 | new_dmesg = ["line1", "line2", "sliced_bread"] 148 | 149 | # this should indicate no change because we allowed the change 150 | patterns = [re.compile("red.*herring"), re.compile("sl[ixd]c.*bread$")] 151 | assert not mock_platform._check_dmesg_for_changes( 152 | patterns, last_dmesg, new_dmesg, mock_manifest) 153 | 154 | def test_dmesg_filter0003(self, mock_platform, caplog, mock_manifest): 155 | # simulate 2 lines falling off the top of the dmesg buffer 156 | last_dmesg = ["line1", "line2", "line3", "line4"] 157 | new_dmesg = ["line3", "line4"] 158 | 159 | # despite lines dropping off, this should still indicate no change 160 | assert not mock_platform._check_dmesg_for_changes( 161 | [], last_dmesg, new_dmesg, mock_manifest) 162 | 163 | def test_dmesg_filter0004(self, mock_platform, caplog, mock_manifest): 164 | # simulate 2 lines falling off the top of the dmesg buffer, *and* a 165 | # new line coming on the bottom of the buffer. 166 | last_dmesg = ["line1", "line2", "line3", "line4"] 167 | new_dmesg = ["line3", "line4", "line5"] 168 | 169 | # line5 is a problem 170 | assert mock_platform._check_dmesg_for_changes( 171 | [], last_dmesg, new_dmesg, mock_manifest) 172 | assert "\nline5\n" in caplog.text 173 | for num in xrange(1, 5): 174 | assert not ("\nline%s\n" % num) in caplog.text 175 | 176 | def test_dmesg_filter0005(self, mock_platform, caplog, mock_manifest): 177 | # simulate 2 lines falling off the top of the dmesg buffer, *and* a 178 | # new line coming on the bottom of the buffer, but the filter accepts 179 | # the new line. 180 | last_dmesg = ["line1", "line2", "line3", "line4"] 181 | new_dmesg = ["line3", "line4", "line5"] 182 | 183 | patterns = [re.compile(".*5$")] 184 | assert not mock_platform._check_dmesg_for_changes( 185 | patterns, last_dmesg, new_dmesg, mock_manifest) 186 | 187 | def test_dmesg_filter0006(self, mock_platform, caplog, mock_manifest): 188 | # Simulate partial line falling off the dmesg buffer due to a new line. 189 | # The change incurred by the partial line should not trigger our "dmesg 190 | # changed" flagging code. 191 | last_dmesg = ["line1", "line2", "line3"] 192 | new_dmesg = ["e1", "line2", "line3", "xx"] # 3 chars 'xx\n' 193 | 194 | patterns = [re.compile("^xx$")] 195 | assert not mock_platform._check_dmesg_for_changes( 196 | patterns, last_dmesg, new_dmesg, mock_manifest) 197 | 198 | def test_dmesg_filter0007(self, mock_platform, caplog, mock_manifest): 199 | # Simulate partial dmesg buffer completely replaced! 200 | # This should be an error as we have potentially missed other 201 | # important messages that flew off the top of the buffer too! 202 | last_dmesg = ["x", "x", "x"] 203 | new_dmesg = ["y", "y", "y"] 204 | 205 | assert mock_platform._check_dmesg_for_changes( 206 | [], last_dmesg, new_dmesg, mock_manifest) 207 | -------------------------------------------------------------------------------- /krun/tests/test_mailer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | from krun.tests.mocks import mock_mailer, mock_platform 4 | from krun.scheduler import ManifestManager 5 | from krun.tests import TEST_DIR 6 | from krun.config import Config 7 | from contextlib import contextmanager 8 | 9 | 10 | @pytest.yield_fixture 11 | def example_manifest(mock_platform): 12 | # setup 13 | config = Config(os.path.join(TEST_DIR, "example.krun")) 14 | manifest = ManifestManager(config, mock_platform, new_file=True) 15 | 16 | yield manifest 17 | 18 | # teardown 19 | if os.path.exists(manifest.path): 20 | os.unlink(manifest.path) 21 | 22 | 23 | def test_mailer0001(mock_mailer, example_manifest): 24 | mock_mailer.max_mails = 5 25 | mock_mailer.set_recipients(["noone@localhost"]) 26 | 27 | assert example_manifest.num_mails_sent == 0 28 | mock_mailer.send("subject1", "body1", manifest=example_manifest) 29 | assert example_manifest.num_mails_sent == 1 30 | 31 | example_manifest._parse() 32 | assert example_manifest.num_mails_sent == 1 33 | 34 | assert len(mock_mailer.sent) == 1 35 | msg = mock_mailer.sent[0] 36 | assert msg["subject"] == "[krun:tests] subject1" 37 | assert msg["to"] == "noone@localhost" 38 | expect_body = "Message from krun running on tests.suite:\n\nbody1\n" 39 | assert msg.get_payload() == expect_body 40 | 41 | 42 | def test_mailer0002(mock_mailer, example_manifest): 43 | mock_mailer.max_mails = 5 44 | mock_mailer.set_recipients(["noone@localhost", "ghandi@localhost", 45 | "rasputin@localhost"]) 46 | 47 | assert example_manifest.num_mails_sent == 0 48 | subject = "subject longer, much longer, blah, wibble, noodles" 49 | mock_mailer.send(subject, "body1\nbody2\nbody3", manifest=example_manifest) 50 | assert example_manifest.num_mails_sent == 1 51 | 52 | example_manifest._parse() 53 | assert example_manifest.num_mails_sent == 1 54 | 55 | assert len(mock_mailer.sent) == 1 56 | msg = mock_mailer.sent[0] 57 | assert msg["subject"] == "[krun:tests] %s" % subject 58 | 59 | assert msg["to"] == "noone@localhost, ghandi@localhost, rasputin@localhost" 60 | expect_body = "Message from krun running on tests.suite:\n" \ 61 | "\nbody1\nbody2\nbody3\n" 62 | assert msg.get_payload() == expect_body 63 | 64 | 65 | def test_mailer0003(mock_mailer, example_manifest): 66 | """Check message limit works""" 67 | mock_mailer.max_mails = 3 68 | mock_mailer.set_recipients(["noone@localhost"]) 69 | 70 | assert example_manifest.num_mails_sent == 0 71 | for i in xrange(10): # too many emails 72 | mock_mailer.send("subject%s" % i, "body%s" % i, manifest=example_manifest) 73 | 74 | assert example_manifest.num_mails_sent == 3 75 | msgs = mock_mailer.sent 76 | assert len(msgs) == 3 77 | for i in xrange(3): 78 | assert msgs[i]["subject"].endswith("subject%s" % i) 79 | 80 | # It should however, be possible to send more mail by bypassing the limit 81 | mock_mailer.send("subject", "body", bypass_limiter=True) 82 | assert len(msgs) == 4 83 | 84 | 85 | def test_mailer0004(mock_mailer): 86 | """Check mailing with no manifest works as expected""" 87 | 88 | mock_mailer.max_mails = 3 89 | mock_mailer.set_recipients(["noone@localhost"]) 90 | 91 | with pytest.raises(AssertionError): 92 | mock_mailer.send("subject", "body") # No manifest 93 | 94 | assert len(mock_mailer.sent) == 0 95 | mock_mailer.send("subject", "body", bypass_limiter=True) # no raise 96 | assert len(mock_mailer.sent) == 1 97 | -------------------------------------------------------------------------------- /krun/tests/test_openbsdplatform.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import krun.platform 3 | import sys 4 | from krun.tests import BaseKrunTest, subst_env_arg 5 | from krun.util import run_shell_cmd, FatalKrunError 6 | from krun.vm_defs import PythonVMDef 7 | from krun.platform import OpenBSDPlatform 8 | 9 | 10 | def make_dummy_get_apm_output_fn(output): 11 | def _get_apm_output(self): 12 | return output 13 | return _get_apm_output 14 | 15 | @pytest.mark.skipif(not sys.platform.startswith("openbsd"), reason="not OpenBSD") 16 | class TestOpenBSDPlatform(BaseKrunTest): 17 | """Check stuff specific to OpenBSD in krun.platform""" 18 | 19 | def test_take_temperature_readings0001(self, platform): 20 | """Test live readings off test machine""" 21 | 22 | temps = platform.take_temperature_readings() 23 | assert type(temps) is dict 24 | assert all([x.startswith("hw.sensors.") for x in temps.iterkeys()]) 25 | # check temperature readings are within reasonable parameters 26 | assert all([type(v) == float for v in temps.itervalues()]) 27 | assert all([10 <= v <= 120 for v in temps.itervalues()]) 28 | 29 | def test_take_temperature_readings0002(self, platform, monkeypatch): 30 | """Test with fake readings""" 31 | 32 | platform.temp_sensors = [ 33 | "hw.sensors.cpu0.temp0", 34 | "hw.sensors.acpitz0.temp0", 35 | ] 36 | 37 | def fake__raw_read_temperature_sensor(self, sensor): 38 | if sensor == "hw.sensors.cpu0.temp0": 39 | return "hw.sensors.cpu0.temp0=64.00 degC" 40 | elif sensor == "hw.sensors.acpitz0.temp0": 41 | return "hw.sensors.acpitz0.temp0=65.58 degC (zone temperature)" 42 | else: 43 | assert False 44 | 45 | monkeypatch.setattr(krun.platform.OpenBSDPlatform, 46 | "_raw_read_temperature_sensor", 47 | fake__raw_read_temperature_sensor) 48 | 49 | # Results were already in degrees C 50 | expect = { 51 | "hw.sensors.cpu0.temp0": 64.00, 52 | "hw.sensors.acpitz0.temp0": 65.58, 53 | } 54 | got = platform.take_temperature_readings() 55 | 56 | assert expect == got 57 | 58 | def test_read_broken_temperatures0001(self, monkeypatch, platform, caplog): 59 | platform.temp_sensors = ["hw.sensors.some_temp0"] 60 | 61 | def dummy(self, sensor): 62 | # Unit is missing (expect degC suffix) 63 | return "hw.sensors.some_temp0=10" 64 | 65 | monkeypatch.setattr(krun.platform.OpenBSDPlatform, 66 | "_raw_read_temperature_sensor", dummy) 67 | 68 | with pytest.raises(FatalKrunError): 69 | platform.take_temperature_readings() 70 | 71 | assert "Odd non-degC value" in caplog.text 72 | 73 | def test_read_broken_temperatures0002(self, monkeypatch, platform, caplog): 74 | platform.temp_sensors = ["hw.sensors.some_temp0"] 75 | 76 | def dummy(self, sensor): 77 | # value (prior to degC) should be float()able 78 | return "hw.sensors.some_temp0=inferno degC" 79 | 80 | monkeypatch.setattr(krun.platform.OpenBSDPlatform, 81 | "_raw_read_temperature_sensor", dummy) 82 | 83 | with pytest.raises(FatalKrunError): 84 | platform.take_temperature_readings() 85 | 86 | assert "Non-numeric value" in caplog.text 87 | 88 | def test_read_broken_temperatures0003(self, monkeypatch, platform, caplog): 89 | platform.temp_sensors = ["hw.sensors.some_temp0"] 90 | 91 | def dummy(self, sensor): 92 | # Weird unit (not degC) 93 | return "hw.sensors.some_temp0=66 kravits" 94 | 95 | monkeypatch.setattr(krun.platform.OpenBSDPlatform, 96 | "_raw_read_temperature_sensor", dummy) 97 | 98 | with pytest.raises(FatalKrunError): 99 | platform.take_temperature_readings() 100 | 101 | assert "Odd non-degC value" in caplog.text 102 | 103 | def test_apm_state0001(self, platform, caplog): 104 | run_shell_cmd("apm -C") # cool mode; forces krun to change this. 105 | 106 | platform._check_apm_state() 107 | 108 | if "hw.setperf is not available" in caplog.text: 109 | pytest.skip() 110 | 111 | assert "performance mode is not manual" in caplog.text 112 | # Hard to check hw.setperf, as it may well be temproarily 100 113 | assert "adjusting performance mode" in caplog.text 114 | 115 | out, err, rc = run_shell_cmd("test `sysctl hw.setperf` == 'hw.setperf=100'") 116 | assert out == err == "" 117 | assert rc == 0 118 | 119 | # cool mode. 120 | # Sadly there is no way to query the current mode (e.g. -C or -H), 121 | # othwerwise we could restore the APM state to how the user had it 122 | # before. 123 | run_shell_cmd("apm -C") 124 | 125 | def test_apm_state0002(self, platform, caplog, monkeypatch): 126 | monkey_func = make_dummy_get_apm_output_fn("flibbles") 127 | monkeypatch.setattr(krun.platform.OpenBSDPlatform, 128 | "_get_apm_output", monkey_func) 129 | 130 | with pytest.raises(FatalKrunError): 131 | platform._check_apm_state() 132 | assert "Expected 3 lines of output from apm(8)" in caplog.text 133 | 134 | def test_save_power0001(self, platform): 135 | run_shell_cmd("apm -H") 136 | platform.save_power() 137 | out, _, _ = run_shell_cmd("apm") 138 | lines = out.split("\n") 139 | line3 = lines[2].strip() 140 | assert line3.startswith("Performance adjustment mode: auto") 141 | # Would have been "manual" if we were still in "high-performance" mode. 142 | 143 | def test_bench_cmdline_adjust0001(self, platform): 144 | expect = ['env', 'LD_LIBRARY_PATH=', 'MALLOC_OPTIONS=cfgrux'] 145 | 146 | args = subst_env_arg(platform.bench_cmdline_adjust([], {}), "LD_LIBRARY_PATH") 147 | assert args == expect 148 | 149 | def test_bench_cmdline_adjust0002(self, platform): 150 | expect = ['env', 'MYENV=some_value', 'LD_LIBRARY_PATH=', 151 | 'MALLOC_OPTIONS=cfgrux', 'myarg'] 152 | 153 | args = subst_env_arg(platform.bench_cmdline_adjust( 154 | ["myarg"], {"MYENV": "some_value"}), "LD_LIBRARY_PATH") 155 | assert args == expect 156 | 157 | def test_wrapper_args0001(self, platform): 158 | vm_def = PythonVMDef('/dummy/bin/python') 159 | vm_def.set_platform(platform) 160 | wrapper_filename = "/tmp/abcdefg.dash" 161 | got = vm_def._wrapper_args(wrapper_filename) 162 | expect = ['/usr/local/bin/sudo', '-u', 'root', '/usr/bin/nice', '-n', '-20', 163 | '/usr/local/bin/sudo', '-u', 'krun', '/usr/local/bin/dash', 164 | wrapper_filename] 165 | assert got == expect 166 | 167 | def test_wrapper_args0002(self, platform): 168 | # Pinning isn't supported on OpenBSD, so it should make no difference 169 | platform.config.ENABLE_PINNING = False 170 | 171 | vm_def = PythonVMDef('/dummy/bin/python') 172 | vm_def.set_platform(platform) 173 | wrapper_filename = "/tmp/abcdefg.dash" 174 | got = vm_def._wrapper_args(wrapper_filename) 175 | expect = ['/usr/local/bin/sudo', '-u', 'root', '/usr/bin/nice', '-n', '-20', 176 | '/usr/local/bin/sudo', '-u', 'krun', '/usr/local/bin/dash', 177 | wrapper_filename] 178 | assert got == expect 179 | 180 | def test_is_virtual0001(self, monkeypatch): 181 | """Check a machine with vio disks is flagged as virtual""" 182 | 183 | def fake_collect_dmesg_lines(_): 184 | return [ 185 | 'real mem = 17074860032 (16283MB)', 186 | 'avail mem = 16550350848 (15783MB)', 187 | 'virtio3 at pci0 dev 4 function 0 "OpenBSD VMM Control" rev 0x00', 188 | ] 189 | monkeypatch.setattr(OpenBSDPlatform, "_collect_dmesg_lines", fake_collect_dmesg_lines) 190 | platform = OpenBSDPlatform(None, None) 191 | assert platform.is_virtual() 192 | -------------------------------------------------------------------------------- /krun/tests/test_process.py: -------------------------------------------------------------------------------- 1 | from krun.util import print_stderr_linewise 2 | 3 | def test_quadratic(): 4 | l = [] 5 | pr = print_stderr_linewise(l.append) 6 | pr.next() # start it 7 | for i in range(100): 8 | pr.send("abc") 9 | pr.send("\n" * 1000000) 10 | 11 | def test_print_stderr_linewise(): 12 | l = [] 13 | pr = print_stderr_linewise(l.append) 14 | pr.next() # start it 15 | pr.send("abc") 16 | assert l == [] 17 | pr.send("def") 18 | assert l == [] 19 | pr.send("\n") 20 | assert l == ["stderr: abcdef"] 21 | pr.send("ab\nde\nfg") 22 | assert l == ["stderr: abcdef", "stderr: ab", "stderr: de"] 23 | pr.send("\n") 24 | assert l == ["stderr: abcdef", "stderr: ab", "stderr: de", "stderr: fg"] 25 | -------------------------------------------------------------------------------- /krun/tests/test_results.py: -------------------------------------------------------------------------------- 1 | from krun.config import Config 2 | from krun.results import Results 3 | from krun.tests import BaseKrunTest 4 | from krun.util import FatalKrunError 5 | 6 | import os 7 | import pytest 8 | 9 | TEST_DIR = os.path.abspath(os.path.dirname(__file__)) 10 | 11 | 12 | @pytest.fixture 13 | def no_results_instantiation_check(monkeypatch): 14 | monkeypatch.setattr(Results, 'instantiation_check', lambda self: None) 15 | 16 | 17 | @pytest.fixture 18 | def fake_results(mock_platform, no_results_instantiation_check): 19 | results = Results(None, mock_platform) 20 | mock_platform.num_cpus = 2 21 | mock_platform.num_per_core_measurements = 2 22 | 23 | results.eta_estimates = {"bench:vm:variant": [1., 1.]} 24 | results.wallclock_times = {"bench:vm:variant": [[2., 2.], [2., 2.]]} 25 | results.core_cycle_counts = {"bench:vm:variant": 26 | [[[3., 3.], [3., 3.,]], [[3., 3.], [3., 3.]]]} 27 | results.aperf_counts = {"bench:vm:variant": 28 | [[[4., 4.], [4., 4.,]], [[4., 4.], [4., 4.]]]} 29 | results.mperf_counts = {"bench:vm:variant": 30 | [[[5., 5.], [5., 5.,]], [[5., 5.], [5., 5.]]]} 31 | results.pexec_flags = {"bench:vm:variant": ["C", "T"]} 32 | return results 33 | 34 | 35 | class TestResults(BaseKrunTest): 36 | """Test the results data structure and file.""" 37 | 38 | def test_eq(self, mock_platform, no_results_instantiation_check): 39 | results = Results(None, None, 40 | results_file="krun/tests/quick_results.json.bz2") 41 | assert results == results 42 | assert not results == None 43 | assert not results == \ 44 | Results(Config("krun/tests/example.krun"), mock_platform) 45 | 46 | 47 | def test_dump_config(self, no_results_instantiation_check): 48 | """Simulates krun.py --dump-config RESULTS_FILE.json.bz2 49 | """ 50 | 51 | res_path = os.path.join(TEST_DIR, "quick_results.json.bz2") 52 | conf_path = os.path.join(TEST_DIR, "quick.krun") 53 | results = Results(None, None, results_file=res_path) 54 | with open(conf_path) as fp: 55 | config = fp.read() 56 | assert config == results.dump("config") 57 | 58 | 59 | def test_read_results_from_disk(self, no_results_instantiation_check): 60 | config = Config("krun/tests/quick.krun") 61 | results = Results(config, None, 62 | results_file="krun/tests/quick_results.json.bz2") 63 | expected = {u'nbody:CPython:default-python': [[0.022256]], 64 | u'dummy:CPython:default-python': [[1.005115]], 65 | u'nbody:Java:default-java': [[26.002632]], 66 | u'dummy:Java:default-java': [[1.000941]]} 67 | assert results.config == config 68 | assert results.audit[u'uname'] == u'Linux' 69 | assert results.audit[u'debian_version'] == u'jessie/sid' 70 | assert results.wallclock_times == expected 71 | assert results.starting_temperatures == {"x": 3333, "y": 4444} 72 | assert results.eta_estimates == \ 73 | { 74 | u'nbody:CPython:default-python': [0.022256], 75 | u'dummy:CPython:default-python': [1.005115], 76 | u'nbody:Java:default-java': [26.002632], 77 | u'dummy:Java:default-java': [1.000941] 78 | } 79 | 80 | 81 | def test_write_results_to_disk(self, mock_platform, 82 | no_results_instantiation_check): 83 | config = Config("krun/tests/example.krun") 84 | mock_platform.num_cpus = 4 85 | mock_platform.num_per_core_measurements = mock_platform.num_cpus 86 | out_file = "krun/tests/example_results.json.bz2" 87 | results0 = Results(config, mock_platform) 88 | results0.audit = dict() 89 | results0.starting_temperatures = [4355, 9879] 90 | results0.wallclock_times = {u"dummy:Java:default-java": [[1.000726]]} 91 | results0.eta_estimates = {u"dummy:Java:default-java": [1.1]} 92 | results0.core_cycle_counts = {u"dummy:Java:default-java": [[[2], [3], [4], [5]]]} 93 | results0.aperf_counts = {u"dummy:Java:default-java": [[[3], [4], [5], [6]]]} 94 | results0.mperf_counts = {u"dummy:Java:default-java": [[[4], [5], [6], [7]]]} 95 | results0.pexec_flags = {u"dummy:Java:default-java": [[["C"], ["C"], ["C"], ["C"]]]} 96 | results0.reboots = 5 97 | results0.error_flag = False 98 | results0.write_to_file() 99 | results1 = Results(config, None, results_file=out_file) 100 | assert results0 == results1 101 | # Clean-up generated file. 102 | os.unlink(out_file) 103 | 104 | def test_integrity_check_results0001(self, fake_results): 105 | """ETAs don't exist for all jobs for which there is iterations data""" 106 | 107 | fake_results.integrity_check() 108 | 109 | def test_integrity_check_results0002(self, fake_results, caplog, 110 | no_results_instantiation_check): 111 | # remove some eta info 112 | fake_results.eta_estimates["bench:vm:variant"].pop() 113 | with pytest.raises(FatalKrunError): 114 | fake_results.integrity_check() 115 | 116 | expect = "inconsistent etas length: bench:vm:variant: 1 vs 2" 117 | assert expect in caplog.text 118 | 119 | def test_integrity_check_results0003(self, fake_results, caplog): 120 | # remove a per-core measurement 121 | fake_results.core_cycle_counts["bench:vm:variant"].pop() 122 | with pytest.raises(FatalKrunError): 123 | fake_results.integrity_check() 124 | 125 | expect = "inconsistent cycles length: bench:vm:variant: 1 vs 2" 126 | assert expect in caplog.text 127 | 128 | def test_integrity_check_results0004(self, fake_results, caplog): 129 | # remove a core from a per-core measurement 130 | fake_results.core_cycle_counts["bench:vm:variant"][0].pop() 131 | with pytest.raises(FatalKrunError): 132 | fake_results.integrity_check() 133 | 134 | expect = "wrong #cores in core_cycle_counts: bench:vm:variant[0]: 2 vs 1" 135 | assert expect in caplog.text 136 | 137 | def test_integrity_check_results0005(self, fake_results, caplog): 138 | # remove an in-proc iteration from a per-core measurement 139 | fake_results.core_cycle_counts["bench:vm:variant"][0][0].pop() 140 | with pytest.raises(FatalKrunError): 141 | fake_results.integrity_check() 142 | 143 | expect = "inconsistent #iters in core_cycle_counts: bench:vm:variant[0][0]. 1 vs 2" 144 | assert expect in caplog.text 145 | -------------------------------------------------------------------------------- /krun/tests/test_time_estimate.py: -------------------------------------------------------------------------------- 1 | from krun.time_estimate import TimeEstimateFormatter 2 | from krun import UNKNOWN_TIME_DELTA, UNKNOWN_ABS_TIME 3 | 4 | import datetime 5 | import pytest 6 | 7 | PHONY_TIME = datetime.datetime(1970, 1, 1, 12, 0, 0, 0) 8 | 9 | @pytest.fixture 10 | def patch_datetime_now(monkeypatch): 11 | """http://stackoverflow.com/questions/20503373 12 | Code by @sashk 13 | """ 14 | class datetime_patch: 15 | @classmethod 16 | def now(cls): 17 | return PHONY_TIME 18 | monkeypatch.setattr(datetime, 'datetime', datetime_patch) 19 | 20 | 21 | def test_time_estimate_none(patch_datetime_now): 22 | tef = TimeEstimateFormatter(None) 23 | assert tef.start_str == '1970-01-01 12:00:00' 24 | assert tef.finish_str == UNKNOWN_ABS_TIME 25 | assert tef.delta_str == UNKNOWN_TIME_DELTA 26 | 27 | 28 | def test_time_estimate(patch_datetime_now): 29 | tef = TimeEstimateFormatter(100) 30 | assert tef.start_str == '1970-01-01 12:00:00' 31 | assert tef.finish_str == '1970-01-01 12:01:40' 32 | assert tef.delta_str == '0:01:40' 33 | -------------------------------------------------------------------------------- /krun/tests/test_vmdef.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | from tempfile import NamedTemporaryFile 4 | import pytest 5 | import json 6 | from StringIO import StringIO 7 | from krun.vm_defs import (PythonVMDef, PyPyVMDef, JavaVMDef) 8 | from krun.config import Config 9 | from distutils.spawn import find_executable 10 | from krun.env import EnvChange 11 | from krun.tests.mocks import MockPlatform 12 | from krun.tests import BaseKrunTest 13 | from krun import EntryPoint 14 | from krun import util 15 | 16 | 17 | class TestVMDef(BaseKrunTest): 18 | """Test stuff in VM definitions""" 19 | 20 | def test_make_wrapper_script0001(self, mock_platform): 21 | args = ["arg1", "arg2", "arg3"] 22 | heap_lim_k = 1024 * 1024 * 1024 # 1GiB 23 | stack_lim_k = 8192 24 | dash = find_executable("dash") 25 | assert dash is not None 26 | vmdef = PythonVMDef("python2.7") 27 | vmdef.set_platform(mock_platform) 28 | 29 | wrapper_filename, envlog_filename = vmdef.make_wrapper_script( 30 | args, heap_lim_k, stack_lim_k) 31 | expect = [ 32 | '#!%s' % dash, 33 | 'ENVLOG=`env`', 34 | 'ulimit -d %s || exit $?' % heap_lim_k, 35 | 'ulimit -s %s || exit $?' % stack_lim_k, 36 | 'arg1 arg2 arg3', 37 | 'echo "${ENVLOG}" > %s' % envlog_filename, 38 | 'exit $?' 39 | ] 40 | 41 | with open(wrapper_filename) as fh: 42 | got = fh.read().splitlines() 43 | 44 | util.del_envlog_tempfile(envlog_filename, mock_platform) 45 | assert expect == got 46 | 47 | def test_env_ctor0001(self): 48 | vm = PythonVMDef("python2.7", env={"MYENV": "xyz"}) 49 | 50 | assert len(vm.common_env_changes) == 1 51 | 52 | ec = vm.common_env_changes[0] 53 | assert ec.var == "MYENV" 54 | assert ec.val == "xyz" 55 | 56 | def test_env_ctor0002(self): 57 | vm = PyPyVMDef("/bin/pypy", env={"LD_LIBRARY_PATH": "/path/to/happiness"}) 58 | 59 | assert len(vm.common_env_changes) == 2 60 | 61 | ec1 = vm.common_env_changes[0] 62 | assert ec1.var == "LD_LIBRARY_PATH" 63 | assert ec1.val == "/path/to/happiness" 64 | 65 | ec2 = vm.common_env_changes[1] 66 | assert ec2.var == "LD_LIBRARY_PATH" 67 | assert ec2.val == "/bin" 68 | 69 | env = {} 70 | EnvChange.apply_all(vm.common_env_changes, env) 71 | assert env["LD_LIBRARY_PATH"] == '/path/to/happiness:/bin' 72 | 73 | def test_sync_disks0001(self, monkeypatch): 74 | """Check disk sync method is called""" 75 | 76 | config = Config() 77 | platform = MockPlatform(None, config) 78 | ep = EntryPoint("test") 79 | vm_def = PythonVMDef('/dummy/bin/python') 80 | vm_def.set_platform(platform) 81 | 82 | sync_called = [False] 83 | def fake_sync_disks(): 84 | sync_called[0] = True 85 | monkeypatch.setattr(platform, "sync_disks", fake_sync_disks) 86 | 87 | def fake_run_exec_popen(args, stderr_file=None): 88 | return "[1]", "", 0, False # stdout, stderr, exit_code, timed_out 89 | monkeypatch.setattr(vm_def, "_run_exec_popen", fake_run_exec_popen) 90 | 91 | vm_def.run_exec(ep, 1, 1, 1, 1, "test:dummyvm:default", 0) 92 | assert sync_called == [True] 93 | 94 | def test_sync_disks0002(self, monkeypatch): 95 | """We throw away the results from sanity checks, so there's no need to 96 | sync disks (and wait).""" 97 | 98 | stdout = json.dumps({ 99 | "wallclock_times": [123.4], 100 | "core_cycle_counts": [[1], [2], [3], [4]], 101 | "aperf_counts": [[5], [6], [7], [8]], 102 | "mperf_counts": [[9], [10], [11], [12]], 103 | }) 104 | 105 | config = Config() 106 | platform = MockPlatform(None, config) 107 | ep = EntryPoint("test") 108 | vm_def = PythonVMDef('/dummy/bin/python') 109 | 110 | sync_called = [False] 111 | def fake_sync_disks(): 112 | sync_called[0] = True 113 | monkeypatch.setattr(platform, "sync_disks", fake_sync_disks) 114 | 115 | def fake_run_exec_popen(args, stderr_file=None): 116 | return stdout, "", 0, False # stdout, stderr, exit_code, timed_out 117 | 118 | monkeypatch.setattr(vm_def, "_run_exec_popen", fake_run_exec_popen) 119 | 120 | util.spawn_sanity_check(platform, ep, vm_def, "test") 121 | assert sync_called == [False] 122 | 123 | def test_run_exec_popen0001(self, monkeypatch): 124 | """Check normal operation of _run_exec_popen()""" 125 | 126 | config = Config() 127 | platform = MockPlatform(None, config) 128 | vm_def = PythonVMDef('/dummy/bin/python') 129 | vm_def.set_platform(platform) 130 | 131 | args = [sys.executable, "-c", 132 | "import sys; sys.stdout.write('STDOUT'); sys.stderr.write('STDERR')"] 133 | out, err, rv, timed_out = vm_def._run_exec_popen(args) 134 | 135 | assert err == "STDERR" 136 | assert out == "STDOUT" 137 | assert rv == 0 138 | assert timed_out == False 139 | 140 | def test_run_exec_popen0002(self, monkeypatch): 141 | """Check that writing stderr to a file works. Used for instrumentation""" 142 | 143 | config = Config() 144 | platform = MockPlatform(None, config) 145 | vm_def = PythonVMDef('/dummy/bin/python') 146 | vm_def.set_platform(platform) 147 | 148 | args = [sys.executable, "-c", 149 | "import sys; sys.stdout.write('STDOUT'); sys.stderr.write('STDERR')"] 150 | 151 | with NamedTemporaryFile(delete=False, prefix="kruntest") as fh: 152 | filename = fh.name 153 | out, err, rv, timed_out = vm_def._run_exec_popen(args, fh) 154 | 155 | assert err == "" # not here due to redirection 156 | assert out == "STDOUT" # behaviour should be unchanged 157 | assert rv == 0 158 | assert timed_out == False 159 | 160 | # stderr should be in this file 161 | with open(filename) as fh: 162 | assert fh.read() == "STDERR" 163 | 164 | fh.close() 165 | os.unlink(filename) 166 | 167 | def test_pypy_instrumentation0001(self): 168 | pypylog_file = StringIO("\n".join([ 169 | "[41720a93ef67] {gc-minor", 170 | "[41720a941224] {gc-minor-walkroots", 171 | "[41720a942814] gc-minor-walkroots}", 172 | "[41720a9455be] gc-minor}", 173 | "@@@ END_IN_PROC_ITER: 0", 174 | "@@@ JIT_TIME: 0.001", 175 | ])) 176 | 177 | expect = {'raw_vm_events': [ 178 | ['root', None, None, [ 179 | ['gc-minor', 71958059544423, 71958059570622, [ 180 | ['gc-minor-walkroots', 71958059553316, 71958059558932, []]]]]] 181 | ], 182 | "jit_times": [0.001]} 183 | 184 | vmd = PyPyVMDef("/pretend/pypy") 185 | assert vmd.parse_instr_stderr_file(pypylog_file) == expect 186 | 187 | def test_pypy_instrumentation0002(self): 188 | pypylog_file = StringIO("\n".join([ 189 | "[41720a93ef67] {gc-minor", 190 | "[41720a941224] {gc-minor-walkroots", 191 | "[41720a942814] gc-minor-walkroots}", 192 | "[41720a9455be] gc-minor}", 193 | "@@@ END_IN_PROC_ITER: 0", 194 | "@@@ JIT_TIME: 0.001", 195 | "[41720a93ef67] {gc-minor", 196 | "[41720a941224] {gc-minor-walkroots", 197 | "[41720a942814] gc-minor-walkroots}", 198 | "[41720a9455be] gc-minor}", 199 | "@@@ END_IN_PROC_ITER: 1", 200 | "@@@ JIT_TIME: 0.002", 201 | ])) 202 | 203 | expect_one_iter = ['root', None, None, [ 204 | ['gc-minor', 71958059544423, 71958059570622, [ 205 | ['gc-minor-walkroots', 71958059553316, 71958059558932, []]]]] 206 | ] 207 | expect = {'raw_vm_events': [ 208 | expect_one_iter, expect_one_iter 209 | ], 210 | "jit_times": [0.001, 0.002]} 211 | 212 | vmd = PyPyVMDef("/pretend/pypy") 213 | assert vmd.parse_instr_stderr_file(pypylog_file) == expect 214 | 215 | def test_pypy_instrumentation0003(self): 216 | pypylog_file = StringIO("\n".join([ 217 | "[41720a93ef67] {gc-minor", 218 | "[41720a900000] gc-minor}", # stop time invalid 219 | "@@@ END_IN_PROC_ITER: 0", 220 | "@@@ JIT_TIME: 0.001", 221 | ])) 222 | 223 | vmd = PyPyVMDef("/pretend/pypy") 224 | with pytest.raises(AssertionError): 225 | vmd.parse_instr_stderr_file(pypylog_file) 226 | 227 | def test_pypy_instrumentation0004(self): 228 | pypylog_file = StringIO("\n".join([ 229 | "[000000000001] {gc-minor", 230 | "[000000000002] {gc-step", 231 | "[000000000003] gc-minor}", # bad nesting 232 | "[000000000004] gc-step}", 233 | "@@@ END_IN_PROC_ITER: 0", 234 | "@@@ JIT_TIME: 0.001", 235 | ])) 236 | 237 | vmd = PyPyVMDef("/pretend/pypy") 238 | with pytest.raises(AssertionError): 239 | vmd.parse_instr_stderr_file(pypylog_file) 240 | 241 | def test_pypy_instrumentation0005(self): 242 | pypylog_file = StringIO("\n".join([ 243 | "[000000000001] {gc-minor", # unfinished event 244 | "@@@ END_IN_PROC_ITER: 0", 245 | "@@@ JIT_TIME: 0.001", 246 | ])) 247 | 248 | vmd = PyPyVMDef("/pretend/pypy") 249 | with pytest.raises(AssertionError): 250 | vmd.parse_instr_stderr_file(pypylog_file) 251 | 252 | def test_jdk_instrumentation0001(self): 253 | """Check the json passes through correctly""" 254 | 255 | stderr_file = StringIO("\n".join([ 256 | '@@@ JDK_EVENTS: [0, "dummy"]', 257 | '@@@ JDK_EVENTS: [1, "dummy"]', 258 | '@@@ JDK_EVENTS: [2, "dummy"]', 259 | ])) 260 | 261 | vmd = JavaVMDef("/pretend/java") 262 | got = vmd.parse_instr_stderr_file(stderr_file) 263 | 264 | elems = got["raw_vm_events"] 265 | assert len(elems) == 3 266 | for i in xrange(3): 267 | assert elems[i][0] == i 268 | 269 | def test_jdk_instrumentation0002(self): 270 | """Check the parser will bail if json entries out of sequence""" 271 | 272 | stderr_file = StringIO("\n".join([ 273 | '@@@ JDK_EVENTS: [0, "dummy"]', 274 | '@@@ JDK_EVENTS: [3, "dummy"]', # uh-oh! 275 | '@@@ JDK_EVENTS: [2, "dummy"]', 276 | ])) 277 | 278 | vmd = JavaVMDef("/pretend/java") 279 | with pytest.raises(AssertionError): 280 | vmd.parse_instr_stderr_file(stderr_file) 281 | -------------------------------------------------------------------------------- /krun/time_estimate.py: -------------------------------------------------------------------------------- 1 | from krun import ABS_TIME_FORMAT, UNKNOWN_TIME_DELTA, UNKNOWN_ABS_TIME 2 | 3 | import datetime 4 | 5 | class TimeEstimateFormatter(object): 6 | def __init__(self, seconds): 7 | """Generates string representations of time estimates. 8 | Args: 9 | seconds -- estimated seconds into the future. None for unknown. 10 | """ 11 | self.start = datetime.datetime.now() 12 | if seconds is not None: 13 | self.delta = datetime.timedelta(seconds=seconds) 14 | self.finish = self.start + self.delta 15 | else: 16 | self.delta = None 17 | self.finish = None 18 | 19 | @property 20 | def start_str(self): 21 | return str(self.start.strftime(ABS_TIME_FORMAT)) 22 | 23 | @property 24 | def finish_str(self): 25 | if self.finish is not None: 26 | return str(self.finish.strftime(ABS_TIME_FORMAT)) 27 | else: 28 | return UNKNOWN_ABS_TIME 29 | 30 | @property 31 | def delta_str(self): 32 | if self.delta is not None: 33 | return str(self.delta).split(".")[0] 34 | else: 35 | return UNKNOWN_TIME_DELTA 36 | 37 | def now_str(): 38 | """Just return the time now (formatted)""" 39 | 40 | return str(datetime.datetime.now().strftime(ABS_TIME_FORMAT)) 41 | -------------------------------------------------------------------------------- /libkrun/.gitignore: -------------------------------------------------------------------------------- 1 | test/test_prog 2 | -------------------------------------------------------------------------------- /libkrun/Makefile: -------------------------------------------------------------------------------- 1 | LIBKRUNTIME_CFLAGS = -Wall -shared -fPIC 2 | COMMON_CFLAGS = -Wall -pedantic -std=gnu99 3 | SUDO = sudo 4 | 5 | # Sadly this tool requires root 6 | VIRT_WHAT = ${SUDO} /usr/sbin/virt-what 7 | 8 | # Detect if we are virtualised (Linux only for now XXX) 9 | ifeq ($(shell uname -s),Linux) 10 | VIRTUALISED = 0 # default off 11 | 12 | VIRT_FACTS = $(shell ${VIRT_WHAT}) 13 | ifneq ("${VIRT_FACTS}","") 14 | VIRTUALISED = 1 15 | endif 16 | 17 | ifeq ("${TRAVIS}","true") 18 | VIRTUALISED = 1 19 | endif 20 | 21 | # Under virtualised conditions, we have no performance counters. 22 | # MSR-centric code is also guarded on a per-OS basis in libkruntime.c 23 | ifeq (${VIRTUALISED},1) 24 | COMMON_CFLAGS += -DMSRS 25 | else ifeq (${MSRS},1) 26 | # You can also force off MSR support. This can be useful for 27 | # testing on machines without the Krun Linux kernel installed. 28 | COMMON_CFLAGS += -DMSRS 29 | endif 30 | endif 31 | 32 | ifeq ($(shell uname -s),Linux) 33 | LIBKRUNTIME_LDFLAGS = -lrt -ldl 34 | else 35 | LIBKRUNTIME_LDFLAGS = 36 | endif 37 | 38 | ifeq (${ENABLE_JAVA},1) 39 | LIBKRUNTIME_CFLAGS += -DWITH_JAVA=1 40 | endif 41 | 42 | .PHONY: clean 43 | 44 | all: libkruntime.so test/test_prog 45 | 46 | libkruntime.so: libkruntime.c libkruntime.h 47 | ${CC} ${JAVA_CPPFLAGS} ${JAVA_CFLAGS} ${LIBKRUNTIME_CFLAGS} ${CFLAGS} \ 48 | ${CPPFLAGS} ${COMMON_CFLAGS} libkruntime.c -o libkruntime.so \ 49 | ${JAVA_LDFLAGS} ${LDFLAGS} ${LIBKRUNTIME_LDFLAGS} 50 | 51 | test/test_prog: test/test_prog.c libkruntime.so 52 | ${CC} ${CFLAGS} ${CPPFLAGS} ${COMMON_CFLAGS} test/test_prog.c \ 53 | -o test/test_prog ${LDFLAGS} -L. -lkruntime -Wl,-rpath=$(shell pwd) 54 | 55 | clean: 56 | rm -f libkruntime.so test/test_prog 57 | -------------------------------------------------------------------------------- /libkrun/libkruntime.h: -------------------------------------------------------------------------------- 1 | #ifndef __LIBKRUNTIME_H 2 | #define __LIBKRUNTIME_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #ifdef WITH_JAVA 10 | #include 11 | #endif // WITH_JAVA 12 | 13 | #ifdef __cplusplus 14 | extern "C" { 15 | #endif 16 | 17 | // Public API 18 | void krun_init(void); 19 | void krun_done(void); 20 | void krun_measure(int mdata_idx); 21 | double krun_get_wallclock(int mdata_idx); 22 | uint64_t krun_get_core_cycles(int mdata_idx, int core); 23 | uint64_t krun_get_aperf(int mdata_idx, int core); 24 | uint64_t krun_get_mperf(int mdata_idx, int core); 25 | double krun_get_core_cycles_double(int mdata_idx, int core); 26 | double krun_get_aperf_double(int mdata_idx, int core); 27 | double krun_get_mperf_double(int mdata_idx, int core); 28 | int krun_get_num_cores(void); 29 | void *krun_xcalloc(size_t nmemb, size_t size); 30 | 31 | // The are not intended for general public use, but exposed for tests. 32 | double krun_u64_to_double(uint64_t val); 33 | double krun_clock_gettime_monotonic(void); 34 | uint64_t krun_read_core_cycles(int core); 35 | 36 | #ifdef __cplusplus 37 | } 38 | #endif 39 | 40 | #ifdef WITH_JAVA 41 | JNIEXPORT void JNICALL Java_IterationsRunner_JNI_1krun_1init(JNIEnv *e, jclass c); 42 | JNIEXPORT void JNICALL Java_IterationsRunner_JNI_1krun_1done(JNIEnv *e, jclass c); 43 | JNIEXPORT void JNICALL Java_IterationsRunner_JNI_1krun_1measure(JNIEnv *e, jclass c, jint mindex); 44 | JNIEXPORT jdouble JNICALLJava_IterationsRunner_JNI_1krun_1get_1wallclock(JNIEnv *e, jclass c, jint mindex); 45 | JNIEXPORT jdouble JNICALLJava_IterationsRunner_JNI_1krun_1clock_1gettime_1monotonic(JNIEnv *e, jclass c); 46 | JNIEXPORT jlong JNICALL Java_IterationsRunner_JNI_1krun_1get_1core_1cycles(JNIEnv *e, jclass c, jint mindex, jint core); 47 | JNIEXPORT jlong JNICALL Java_IterationsRunner_JNI_1krun_1get_1aperf(JNIEnv *e, jclass c, jint mindex, jint core); 48 | JNIEXPORT jlong JNICALL Java_IterationsRunner_JNI_1krun_1get_1mperf(JNIEnv *e, jclass c, jint mindex, jint core); 49 | JNIEXPORT jint JNICALL Java_IterationsRunner_JNI_1krun_1get_1num_1cores(JNIEnv *e, jclass c); 50 | #endif // WITH_JAVA 51 | 52 | #endif // __LIBKRUNTIME_H 53 | -------------------------------------------------------------------------------- /libkrun/test/test_libkruntime.py: -------------------------------------------------------------------------------- 1 | import subprocess32 2 | import os 3 | import sys 4 | import pytest 5 | 6 | # Some core cycle tests collect two readings as fast as possible, so the delta 7 | # should be pretty small (but it ultimately depends upon the CPU). 8 | NOT_MANY_CYCLES = 500000 9 | 10 | DIR = os.path.abspath(os.path.dirname(__file__)) 11 | TEST_PROG_PATH = os.path.join(DIR, "test_prog") 12 | 13 | sys.path.append(os.path.join(DIR, "..", "..")) 14 | from krun.platform import detect_platform 15 | PLATFORM = detect_platform(None, None) 16 | 17 | MSR_SUPPORT = PLATFORM.num_per_core_measurements > 0 18 | 19 | def invoke_c_prog(mode): 20 | assert os.path.exists(TEST_PROG_PATH) 21 | 22 | p = subprocess32.Popen(TEST_PROG_PATH + " " + mode, 23 | stderr=subprocess32.PIPE, stdout=subprocess32.PIPE, shell=True) 24 | out, err = p.communicate() 25 | return p.returncode, out.strip(), err.strip() 26 | 27 | 28 | def parse_keyvals(out, doubles=False): 29 | dct = {} 30 | for line in out.splitlines(): 31 | key, val = line.split("=") 32 | if doubles: 33 | dct[key.strip()] = float(val) 34 | else: 35 | dct[key.strip()] = int(val) 36 | return dct 37 | 38 | 39 | class TestLibKrunTime(object): 40 | @pytest.mark.skipif(not MSR_SUPPORT, reason="No MSRs") 41 | def test_cycles_u64_0001(self): 42 | rv, out, _ = invoke_c_prog("cycles_u64") 43 | assert rv == 0 44 | dct = parse_keyvals(out) 45 | 46 | assert 0 <= dct["cycles_u64_delta"] <= NOT_MANY_CYCLES 47 | 48 | @pytest.mark.skipif(MSR_SUPPORT, reason="Without MSRs only") 49 | def test_cycles_u64_0002(self): 50 | rv, _, err = invoke_c_prog("cycles_u64") 51 | assert rv != 0 52 | assert "libkruntime was built without MSR support" in err 53 | 54 | @pytest.mark.skipif(not MSR_SUPPORT, reason="No MSRs") 55 | def test_cycles_double(self): 56 | rv, out, _ = invoke_c_prog("cycles_double") 57 | assert rv == 0 58 | dct = parse_keyvals(out, True) 59 | assert 0 <= dct["cycles_double_delta"] <= NOT_MANY_CYCLES 60 | 61 | def test_cycles_double_prec_ok(self): 62 | rv, out, _ = invoke_c_prog("cycles_double_prec_ok") 63 | assert rv == 0 64 | assert out == "OK" 65 | 66 | def test_cycles_double_prec_bad(self): 67 | rv, _, err = invoke_c_prog("cycles_double_prec_bad") 68 | assert rv == 1 69 | assert "Loss of precision detected!" in err 70 | 71 | @pytest.mark.skipif(not MSR_SUPPORT, reason="No MSRs") 72 | def test_cycles_u64_double_ratio(self): 73 | rv, out, _ = invoke_c_prog("cycles_u64_double_ratio") 74 | assert rv == 0 75 | dct = parse_keyvals(out, True) 76 | # within 2x of each other 77 | assert 0.5 <= dct["cycles_u64_double_ratio"] <= 2 78 | 79 | def test_clock_gettime_monotonic(self): 80 | rv, out, _ = invoke_c_prog("clock_gettime_monotonic") 81 | assert rv == 0 82 | dct = parse_keyvals(out, True) 83 | assert dct["monotonic_start"] <= dct["monotonic_stop"] 84 | 85 | @pytest.mark.skipif(not MSR_SUPPORT, reason="No MSRs") 86 | def test_aperf_mperf(self): 87 | rv, out, _ = invoke_c_prog("aperf_mperf") 88 | assert rv == 0 89 | dct = parse_keyvals(out, doubles=False) 90 | 91 | assert dct["aperf"] > 0 92 | assert dct["mperf"] > 0 93 | 94 | # aperf is ticking for a subset of the time mperf is 95 | assert dct["aperf"] <= dct["mperf"] 96 | 97 | @pytest.mark.skipif(not MSR_SUPPORT, reason="No MSRs") 98 | def test_aperf0001(self): 99 | """Check krun_get_aperf when libkruntime has MSR support""" 100 | 101 | rv, out, _ = invoke_c_prog("aperf") 102 | assert rv == 0 103 | dct = parse_keyvals(out) 104 | assert dct["aperf_start"] < dct["aperf_stop"] 105 | 106 | @pytest.mark.skipif(MSR_SUPPORT, reason="Without MSRs only") 107 | def test_aperf0002(self): 108 | """Check krun_get_aperf when libkruntime does not have MSR support""" 109 | 110 | rv, _, err = invoke_c_prog("aperf") 111 | assert rv != 0 112 | assert "libkruntime was built without MSR support" in err 113 | 114 | @pytest.mark.skipif(not MSR_SUPPORT, reason="No MSRs") 115 | def test_mperf0001(self): 116 | """Check krun_get_mperf when libkruntime does not have MSR support""" 117 | 118 | rv, out, _ = invoke_c_prog("mperf") 119 | assert rv == 0 120 | dct = parse_keyvals(out) 121 | assert dct["mperf_start"] < dct["mperf_stop"] 122 | 123 | @pytest.mark.skipif(MSR_SUPPORT, reason="Without MSRs only") 124 | def test_mperf0002(self): 125 | """Check krun_get_aperf when libkruntime does not have MSR support""" 126 | 127 | rv, _, err = invoke_c_prog("mperf") 128 | assert rv != 0 129 | assert "libkruntime was built without MSR support" in err 130 | 131 | @pytest.mark.skipif(not MSR_SUPPORT, reason="No MSRs") 132 | def test_core_bounds_check(self): 133 | rv, _, err = invoke_c_prog("core_bounds_check") 134 | assert rv != 0 135 | assert "core out of range" in err 136 | 137 | @pytest.mark.skipif(not MSR_SUPPORT, reason="No MSRs") 138 | def test_mdata_index_bounds_check(self): 139 | rv, _, err = invoke_c_prog("mdata_index_bounds_check") 140 | assert rv != 0 141 | assert "mdata index out of range" in err 142 | 143 | def test_read_everything_all_cores(self): 144 | rv, out, err = invoke_c_prog("read_everything_all_cores") 145 | assert rv == 0 146 | dct = parse_keyvals(out, doubles=True) 147 | 148 | # Two wallclock measurements 149 | expect = 2 150 | 151 | if MSR_SUPPORT: 152 | # Two more for measurements for each core 153 | expect += PLATFORM.num_cpus * 2 154 | # Two more for measurements for each aperf and mperf on each core 155 | expect += 2 * PLATFORM.num_cpus * 2 156 | 157 | assert len(dct) == expect 158 | -------------------------------------------------------------------------------- /libkrun/test/test_prog.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "../libkruntime.h" 8 | 9 | #define TEST_CORE 0 10 | 11 | void test_cycles_u64(void); 12 | void test_cycles_double(void); 13 | void test_cycles_double_prec_ok(void); 14 | void test_cycles_double_prec_bad(void); 15 | void test_cycles_u64_double_ratio(void); 16 | void test_clock_gettime_monotonic(void); 17 | void test_aperf_mperf(void); 18 | void test_aperf(void); 19 | void test_mperf(void); 20 | void test_core_bounds_check(void); 21 | void test_mdata_index_bounds_check(void); 22 | void test_read_everything_all_cores(void); 23 | 24 | void usage(); 25 | 26 | void 27 | usage() 28 | { 29 | printf("usages:\n"); 30 | printf(" test_prog cycles_u64\n"); 31 | printf(" test_prog cycles_double\n"); 32 | printf(" test_prog cycles_double_prec_ok\n"); 33 | printf(" test_prog cycles_double_prec_bad\n"); 34 | printf(" test_prog cycles_u64_double_ratio\n"); 35 | printf(" test_prog clock_gettime_monotonic\n"); 36 | printf(" test_prog aperf_mperf\n"); 37 | printf(" test_prog aperf\n"); 38 | printf(" test_prog mperf\n"); 39 | printf(" test_prog core_bounds_check\n"); 40 | printf(" test_prog mdata_index_bounds_check\n"); 41 | printf(" test_prog read_everything_all_cores\n"); 42 | } 43 | 44 | int 45 | main(int argc, char **argv) 46 | { 47 | char *mode; 48 | int rv = EXIT_SUCCESS; 49 | 50 | if (argc != 2) { 51 | usage(); 52 | return (EXIT_FAILURE); 53 | } 54 | 55 | mode = argv[1]; 56 | 57 | if (strcmp(mode, "cycles_u64") == 0) { 58 | krun_init(); 59 | test_cycles_u64(); 60 | krun_done(); 61 | } else if (strcmp(mode, "cycles_double") == 0) { 62 | krun_init(); 63 | test_cycles_double(); 64 | krun_done(); 65 | } else if (strcmp(mode, "cycles_double_prec_ok") == 0) { 66 | krun_init(); 67 | test_cycles_double_prec_ok(); 68 | krun_done(); 69 | } else if (strcmp(mode, "cycles_double_prec_bad") == 0) { 70 | krun_init(); 71 | test_cycles_double_prec_bad(); 72 | krun_done(); 73 | } else if (strcmp(mode, "cycles_u64_double_ratio") == 0) { 74 | krun_init(); 75 | test_cycles_u64_double_ratio(); 76 | krun_done(); 77 | } else if (strcmp(mode, "clock_gettime_monotonic") == 0) { 78 | test_clock_gettime_monotonic(); // doesn't need init/done 79 | } else if (strcmp(mode, "aperf_mperf") == 0) { 80 | krun_init(); 81 | test_aperf_mperf(); 82 | krun_done(); 83 | } else if (strcmp(mode, "aperf") == 0) { 84 | krun_init(); 85 | test_aperf(); 86 | krun_done(); 87 | } else if (strcmp(mode, "mperf") == 0) { 88 | krun_init(); 89 | test_mperf(); 90 | krun_done(); 91 | } else if (strcmp(mode, "core_bounds_check") == 0) { 92 | krun_init(); 93 | test_core_bounds_check(); 94 | krun_done(); 95 | } else if (strcmp(mode, "mdata_index_bounds_check") == 0) { 96 | krun_init(); 97 | test_mdata_index_bounds_check(); 98 | krun_done(); 99 | } else if (strcmp(mode, "read_everything_all_cores") == 0) { 100 | krun_init(); 101 | test_read_everything_all_cores(); 102 | krun_done(); 103 | } else { 104 | usage(); 105 | rv = EXIT_FAILURE; 106 | } 107 | 108 | return (rv); 109 | } 110 | 111 | void 112 | test_cycles_u64(void) { 113 | uint64_t t1, t2, delta; 114 | 115 | krun_measure(0); 116 | krun_measure(1); 117 | 118 | t1 = krun_get_core_cycles(0, TEST_CORE); 119 | t2 = krun_get_core_cycles(1, TEST_CORE); 120 | delta = t2 - t1; 121 | 122 | printf("cycles_u64_start= %" PRIu64 "\n", t1); 123 | printf("cycles_u64_stop = %" PRIu64 "\n", t2); 124 | printf("cycles_u64_delta= %" PRIu64 "\n", delta); 125 | } 126 | 127 | void 128 | test_cycles_double(void) 129 | { 130 | double t1, t2, delta; 131 | 132 | t1 = krun_get_core_cycles(0, TEST_CORE); 133 | t2 = krun_get_core_cycles(1, TEST_CORE); 134 | delta = t2 - t1; 135 | 136 | printf("cycles_double_start= %f\n", t1); 137 | printf("cycles_double_stop = %f\n", t2); 138 | printf("cycles_double_delta= %f\n", delta); 139 | } 140 | 141 | void 142 | test_cycles_double_prec_ok(void) 143 | { 144 | (void) krun_u64_to_double(666); 145 | printf("OK\n"); 146 | } 147 | 148 | void 149 | test_cycles_double_prec_bad(void) 150 | { 151 | (void) krun_u64_to_double(((u_int64_t) 1 << 62) - 1); 152 | } 153 | 154 | void 155 | test_cycles_u64_double_ratio(void) 156 | { 157 | u_int64_t i_time1, i_time2, i_delta; 158 | double d_time1, d_time2, d_delta, ratio; 159 | 160 | krun_measure(0); 161 | krun_measure(1); 162 | 163 | i_time1 = krun_get_core_cycles(0, TEST_CORE); 164 | i_time2 = krun_get_core_cycles(1, TEST_CORE); 165 | 166 | d_time1 = krun_get_core_cycles_double(0, TEST_CORE); 167 | d_time2 = krun_get_core_cycles_double(1, TEST_CORE); 168 | 169 | i_delta = i_time2 - i_time1; 170 | d_delta = d_time2 - d_time1; 171 | ratio = i_delta / d_delta; 172 | 173 | printf("cycles_u64_double_ratio=%f\n", ratio); 174 | } 175 | 176 | void 177 | test_clock_gettime_monotonic() 178 | { 179 | double t1, t2; 180 | 181 | krun_measure(0); 182 | sleep(1); 183 | krun_measure(1); 184 | 185 | t1 = krun_get_wallclock(0); 186 | t2 = krun_get_wallclock(1); 187 | 188 | printf("monotonic_start= %f\n", t1); 189 | printf("monotonic_stop = %f\n", t2); 190 | } 191 | 192 | void 193 | test_aperf_mperf(void) 194 | { 195 | uint64_t ap, mp; 196 | 197 | krun_measure(0); 198 | 199 | ap = krun_get_aperf(0, TEST_CORE); 200 | mp = krun_get_mperf(0, TEST_CORE); 201 | 202 | printf("aperf=%" PRIu64 "\n", ap); 203 | printf("mperf=%" PRIu64 "\n", mp); 204 | } 205 | 206 | void 207 | test_aperf(void) 208 | { 209 | uint64_t p1, p2; 210 | 211 | krun_measure(0); 212 | krun_measure(1); 213 | 214 | p1 = krun_get_aperf(0, TEST_CORE); 215 | p2 = krun_get_aperf(1, TEST_CORE); 216 | 217 | printf("aperf_start=%" PRIu64 "\n", p1); 218 | printf("aperf_stop= %" PRIu64 "\n", p2); 219 | } 220 | 221 | void 222 | test_mperf(void) 223 | { 224 | uint64_t p1, p2; 225 | 226 | krun_measure(0); 227 | krun_measure(1); 228 | 229 | p1 = krun_get_mperf(0, TEST_CORE); 230 | p2 = krun_get_mperf(1, TEST_CORE); 231 | 232 | printf("mperf_start=%" PRIu64 "\n", p1); 233 | printf("mperf_stop= %" PRIu64 "\n", p2); 234 | } 235 | 236 | void 237 | test_core_bounds_check(void) 238 | { 239 | int num_cores = krun_get_num_cores(); 240 | 241 | krun_measure(0); 242 | (void) krun_get_mperf(0, num_cores); // one above the last core 243 | /* unreachable as the above crashes */ 244 | } 245 | 246 | void 247 | test_mdata_index_bounds_check(void) 248 | { 249 | krun_measure(0); 250 | (void) krun_get_mperf(2, TEST_CORE); // 2 is not a valid mdata index 251 | /* unreachable as the above crashes */ 252 | } 253 | 254 | void 255 | test_read_everything_all_cores(void) 256 | { 257 | int num_cores = krun_get_num_cores(); 258 | int core, idx; 259 | 260 | krun_measure(0); 261 | krun_measure(1); 262 | 263 | for (idx = 0; idx < 2; idx++) { 264 | printf("wallclock_%d= %f\n", idx, krun_get_wallclock(idx)); 265 | for (core = 0; core < num_cores; core++) { 266 | printf("core_cycles_%d_%d=%" PRIu64 "\n", idx, core, 267 | krun_get_core_cycles(idx, core)); 268 | printf("aperf_%d_%d= %" PRIu64 "\n", idx, core, 269 | krun_get_aperf(idx, core)); 270 | printf("mperf_%d_%d= %" PRIu64 "\n", idx, core, 271 | krun_get_mperf(idx, core)); 272 | } 273 | } 274 | } 275 | -------------------------------------------------------------------------------- /platform_sanity_checks/Makefile: -------------------------------------------------------------------------------- 1 | OS = $(shell uname -s) 2 | 3 | .PHONY: clean all 4 | 5 | all: check_openbsd_malloc_options.so check_nice_priority.so \ 6 | check_linux_cpu_affinity_pinned.so \ 7 | check_linux_cpu_affinity_not_pinned.so \ 8 | check_linux_scheduler.so 9 | 10 | check_openbsd_malloc_options.so: check_openbsd_malloc_options.c 11 | ifeq (${OS},OpenBSD) 12 | ${CC} ${CFLAGS} ${LDFLAGS} ${CPPFLAGS} -shared -Wall -Wextra -o \ 13 | check_openbsd_malloc_options.so check_openbsd_malloc_options.c 14 | endif 15 | 16 | check_nice_priority.so: check_nice_priority.c 17 | ${CC} ${CFLAGS} ${LDFLAGS} ${CPPFLAGS} -fPIC -shared -Wall -Wextra -o \ 18 | check_nice_priority.so check_nice_priority.c 19 | 20 | check_linux_cpu_affinity_pinned.so: check_linux_cpu_affinity_pinned.c 21 | ifeq (${OS},Linux) 22 | ${CC} ${CFLAGS} ${LDFLAGS} ${CPPFLAGS} -fPIC -shared -Wall -Wextra -o \ 23 | check_linux_cpu_affinity_pinned.so check_linux_cpu_affinity_pinned.c 24 | endif 25 | 26 | check_linux_cpu_affinity_not_pinned.so: check_linux_cpu_affinity_not_pinned.c 27 | ifeq (${OS},Linux) 28 | ${CC} ${CFLAGS} ${LDFLAGS} ${CPPFLAGS} -fPIC -shared -Wall -Wextra -o \ 29 | check_linux_cpu_affinity_not_pinned.so check_linux_cpu_affinity_not_pinned.c 30 | endif 31 | 32 | check_linux_scheduler.so: check_linux_scheduler.c 33 | ifeq (${OS},Linux) 34 | ${CC} ${CFLAGS} ${LDFLAGS} ${CPPFLAGS} -fPIC -shared -Wall -Wextra -o \ 35 | check_linux_scheduler.so check_linux_scheduler.c 36 | endif 37 | 38 | clean: 39 | rm -f check_openbsd_malloc_options.so check_nice_priority.so \ 40 | check_linux_cpu_affinity_pinned.so check_linux_scheduler.so \ 41 | check_linux_cpu_affinity_not_pinned.so 42 | -------------------------------------------------------------------------------- /platform_sanity_checks/check_linux_cpu_affinity_not_pinned.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Dummy benchmark that checks the CPU affinity mask for a *unpinned* benchmark. 3 | * 4 | * The mask should contain all CPUs. 5 | * 6 | * This code is Linux specific. 7 | */ 8 | 9 | #define _GNU_SOURCE 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | 20 | void 21 | run_iter(int param) 22 | { 23 | pid_t pid; 24 | cpu_set_t mask; 25 | size_t mask_sz; 26 | int ret, i; 27 | long n_cpus; 28 | 29 | (void) param; 30 | pid = getpid(); 31 | n_cpus = sysconf(_SC_NPROCESSORS_ONLN); 32 | mask_sz = sizeof(mask); 33 | 34 | ret = sched_getaffinity(pid, mask_sz, &mask); 35 | if (ret != 0) { 36 | perror("sched_getaffinity"); 37 | exit(EXIT_FAILURE); 38 | } 39 | 40 | if (CPU_COUNT(&mask) != n_cpus) { 41 | fprintf(stderr, "Wrong number of CPUs in affinity mask\n" 42 | "got %d, expect %ld\n", CPU_COUNT(&mask), n_cpus - 1); 43 | exit(EXIT_FAILURE); 44 | } 45 | 46 | for (i = 0; i < n_cpus; i++) { 47 | if (!CPU_ISSET(i, &mask)) { 48 | fprintf(stderr, "CPU %d not in affinity mask\n", i); 49 | exit(EXIT_FAILURE); 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /platform_sanity_checks/check_linux_cpu_affinity_pinned.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Dummy benchmark that checks the CPU affinity mask for a *pinned* benchmark. 3 | * 4 | * The mask should contain all CPUs apart from the boot processor (enforced by 5 | * a cset shield). 6 | * 7 | * This code is Linux specific. 8 | */ 9 | 10 | #define _GNU_SOURCE 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | 21 | void 22 | run_iter(int param) 23 | { 24 | pid_t pid; 25 | cpu_set_t mask; 26 | size_t mask_sz; 27 | int ret, i; 28 | long n_cpus; 29 | 30 | (void) param; 31 | pid = getpid(); 32 | n_cpus = sysconf(_SC_NPROCESSORS_ONLN); 33 | mask_sz = sizeof(mask); 34 | 35 | ret = sched_getaffinity(pid, mask_sz, &mask); 36 | if (ret != 0) { 37 | perror("sched_getaffinity"); 38 | exit(EXIT_FAILURE); 39 | } 40 | 41 | if (CPU_COUNT(&mask) != n_cpus - 1) { 42 | fprintf(stderr, "Wrong number of CPUs in affinity mask\n" 43 | "got %d, expect %ld\n", CPU_COUNT(&mask), n_cpus - 1); 44 | exit(EXIT_FAILURE); 45 | } 46 | 47 | if (CPU_ISSET(0, &mask)) { 48 | fprintf(stderr, "CPU 0 should not be in affinity mask\n"); 49 | exit(EXIT_FAILURE); 50 | } 51 | 52 | for (i = 1; i < n_cpus; i++) { 53 | if (!CPU_ISSET(i, &mask)) { 54 | fprintf(stderr, "CPU %d not in affinity mask\n", i); 55 | exit(EXIT_FAILURE); 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /platform_sanity_checks/check_linux_scheduler.c: -------------------------------------------------------------------------------- 1 | /* Fake benchmark that checks the right scheduling policy is used on Linux */ 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #define EXPECT_POLICY SCHED_OTHER 9 | 10 | void 11 | run_iter(int param) { 12 | int policy; 13 | 14 | (void) param; 15 | 16 | policy = sched_getscheduler(0); 17 | if (policy != EXPECT_POLICY) { 18 | fprintf(stderr, "Incorrect scheduler in use.\n"); 19 | exit(EXIT_FAILURE); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /platform_sanity_checks/check_nice_priority.c: -------------------------------------------------------------------------------- 1 | /* Fake benchmark that checks we are running in high priority */ 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #define EXPECT_PRIORITY -20 9 | 10 | void 11 | run_iter(int param) { 12 | int prio = getpriority(PRIO_PROCESS, 0); 13 | 14 | (void) param; 15 | 16 | if (prio != EXPECT_PRIORITY) { 17 | fprintf(stderr, "process priority: expect %d got %d\n", EXPECT_PRIORITY, prio); 18 | exit(EXIT_FAILURE); 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /platform_sanity_checks/check_openbsd_malloc_options.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #define EXPECT_OPTS "cfgrux" 6 | 7 | void 8 | run_iter(int param) 9 | { 10 | char *malloc_opts; 11 | 12 | (void) param; /* silence compiler warning */ 13 | 14 | malloc_opts = getenv("MALLOC_OPTIONS"); 15 | if ((malloc_opts == NULL) || (strcmp(malloc_opts, EXPECT_OPTS) != 0)) { 16 | fprintf(stderr, "malloc opts not set or not '%s'\n", EXPECT_OPTS); 17 | exit(EXIT_FAILURE); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /platform_sanity_checks/check_user_change.py: -------------------------------------------------------------------------------- 1 | # Fake benchmark that checks we are running as the krun user. 2 | 3 | import os 4 | import pwd 5 | 6 | KRUN_USER = "krun" 7 | 8 | 9 | def run_iter(n): 10 | env_user = os.environ.get("USER", None) 11 | syscall_user = pwd.getpwuid(os.geteuid())[0] 12 | 13 | ok = True 14 | 15 | if env_user != KRUN_USER: 16 | ok = False 17 | 18 | if syscall_user != KRUN_USER: 19 | ok = False 20 | 21 | if not ok: 22 | raise RuntimeError( 23 | "krun user check failed: env=%s, getuid()=%s, expect=%s" % 24 | (env_user, syscall_user, KRUN_USER)) 25 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cffi==1.12.2 2 | py==1.10.0 3 | pycparser==2.19 4 | pytest==3.10.1 5 | pytest-cov==2.2.0 6 | subprocess32==3.5.4 7 | -------------------------------------------------------------------------------- /scripts/calibrate_amperf_tolerance.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2.7 2 | 3 | """ 4 | Draw plots to help you decide a suitable APERF/MPERF tolerance. 5 | 6 | usages: 7 | calibrate_amperf_tolerance.py analyse ") 8 | calibrate_amperf_tolerance.py plot-hist ") 9 | calibrate_amperf_tolerance.py plot-dropoff ") 10 | 11 | First use "analyse" mode to analyse the results, then use one of the "plot-*" 12 | modes to generate plots from the analysed data. 13 | """ 14 | 15 | import json 16 | import sys 17 | import os 18 | import bz2 19 | from collections import OrderedDict 20 | 21 | sys.path.append(os.path.join(os.path.dirname(__file__), "..")) 22 | from krun.amperf import check_amperf_ratios 23 | 24 | 25 | TOLERANCES = [(i + 1) * 0.0005 for i in xrange(80)] 26 | 27 | 28 | def analyse_amperf(jsn, busy_thresh, output_filename): 29 | # Each of these maps a tolerance to a count 30 | bad_pexecs = OrderedDict() 31 | bad_iters = OrderedDict() 32 | for tol in TOLERANCES: 33 | # The totals are the same on eacch iteration, so it's OK to use the 34 | # value from the final iteration 35 | total_pexecs, total_iters, bad_pexecs[tol], bad_iters[tol] = \ 36 | _analyse_amperf(jsn, busy_thresh, tol) 37 | 38 | bad_pexecs_xs, bad_pexecs_ys = zip(*bad_pexecs.iteritems()) 39 | bad_iters_xs, bad_iters_ys = zip(*bad_iters.iteritems()) 40 | 41 | # JSON can't deal with tuples 42 | bad_pexecs_xs, bad_pexecs_ys = list(bad_pexecs_xs), list(bad_pexecs_ys) 43 | bad_iters_xs, bad_iters_ys = list(bad_iters_xs), list(bad_iters_ys) 44 | 45 | ratios = _collect_busy_ratios(jsn, busy_thresh) 46 | 47 | dct = { 48 | "bad_pexecs": [bad_pexecs_xs, bad_pexecs_ys], 49 | "bad_iters": [bad_iters_xs, bad_iters_ys], 50 | "total_pexecs": total_pexecs, 51 | "total_iters": total_iters, 52 | "ratios": ratios, 53 | } 54 | print("\ndumping to %s" % output_filename) 55 | with open(output_filename, "w") as fh: 56 | json.dump(dct, fh, indent=2) 57 | 58 | 59 | def _analyse_amperf(jsn, busy_thresh, tol): 60 | """Returns the number of bad process executions and in-process 61 | iterations""" 62 | 63 | num_bad_pexecs = 0 64 | num_bad_iters = 0 65 | total_pexecs = 0 66 | total_iters = 0 67 | 68 | sys.stdout.write("\ntolerance: %8.5f: " % tol) 69 | bounds = 1.0 - tol, 1.0 + tol 70 | for bench in jsn["wallclock_times"]: 71 | sys.stdout.write(".") 72 | sys.stdout.flush() 73 | for pexec_idx in xrange(len(jsn["wallclock_times"][bench])): 74 | total_pexecs += 1 75 | aperfs = jsn["aperf_counts"][bench][pexec_idx] 76 | mperfs = jsn["mperf_counts"][bench][pexec_idx] 77 | wc_times = jsn["wallclock_times"][bench][pexec_idx] 78 | total_iters += len(wc_times) 79 | res = check_amperf_ratios(aperfs, mperfs, wc_times, busy_thresh, 80 | bounds) 81 | bad_iters = set() 82 | for core in res: 83 | # iterate different types of badness 84 | for idxs in core.violations.itervalues(): 85 | bad_iters.update(idxs) 86 | if len(bad_iters) > 0: 87 | num_bad_pexecs += 1 88 | num_bad_iters += len(bad_iters) 89 | return total_pexecs, total_iters, num_bad_pexecs, num_bad_iters 90 | 91 | 92 | def _collect_busy_ratios(jsn, busy_thresh): 93 | ratios = [] 94 | for bench in jsn["wallclock_times"]: 95 | bounds = 0, 2 # irrelevant for this mode really. 96 | for pexec_idx in xrange(len(jsn["wallclock_times"][bench])): 97 | aperfs = jsn["aperf_counts"][bench][pexec_idx] 98 | mperfs = jsn["mperf_counts"][bench][pexec_idx] 99 | wc_times = jsn["wallclock_times"][bench][pexec_idx] 100 | res = check_amperf_ratios(aperfs, mperfs, wc_times, busy_thresh, 101 | bounds) 102 | for core_res in res: 103 | for busy, ratio in zip(core_res.busy_iters, core_res.vals): 104 | if busy: 105 | ratios.append(ratio) 106 | return ratios 107 | 108 | 109 | def plot_hist(jsn, output_filename): 110 | ratios = jsn["ratios"] 111 | n_bins = 1000 112 | 113 | f, ax = plt.subplots(1, 1, sharey=False, figsize=(20, 10)) 114 | 115 | ax.hist(ratios, n_bins, facecolor="red", alpha=0.75) 116 | ax.set_xlabel("Ratio") 117 | ax.set_ylabel("Count") 118 | ax.set_title("Probability Distribution of A/MPERF ratios") 119 | ax.grid(True) 120 | 121 | print("Plotting to %s" % output_filename) 122 | plt.savefig(output_filename) 123 | 124 | 125 | def plot_dropoff(jsn, output_filename): 126 | total_iters, total_pexecs = \ 127 | int(jsn["total_iters"]), int(jsn["total_pexecs"]) 128 | bad_pexecs_xs, bad_pexecs_ys = jsn["bad_pexecs"] 129 | bad_iters_xs, bad_iters_ys = jsn["bad_iters"] 130 | 131 | # Convert from good to bad (inverse) 132 | good_pexecs_ys = [total_pexecs - y for y in bad_pexecs_ys] 133 | good_iters_ys = [total_iters - y for y in bad_iters_ys] 134 | good_pexecs_xs = bad_pexecs_xs # These are the same, just a renaming 135 | good_iters_xs = bad_iters_xs 136 | 137 | # Convert Y-axes to percentages 138 | good_pexecs_ys = [float(x) / total_pexecs * 100 for x in good_pexecs_ys] 139 | good_iters_ys = [float(x) / total_iters * 100 for x in good_iters_ys] 140 | 141 | f, (ax1, ax2) = plt.subplots(1, 2, sharey=False, figsize=(20, 10)) 142 | 143 | ax1.plot(good_pexecs_xs, good_pexecs_ys) 144 | ax1.set_title("Good Process Executions") 145 | ax1.set_xlabel("Tolerance") 146 | ax1.set_ylabel("% pexecs") 147 | ax1.set_ylim([0, 102]) 148 | ax1.grid(color="r", linestyle="--") 149 | ax1.set_yticks([(x + 1) * 5 for x in xrange(20)]) 150 | 151 | ax2.plot(good_iters_xs, good_iters_ys) 152 | ax2.set_title("Good In-Process Iterations") 153 | ax2.set_xlabel("Tolerance") 154 | ax2.set_ylabel("% iters") 155 | ax2.set_ylim([0, 102]) 156 | ax2.grid(color="r", linestyle="--") 157 | ax2.set_yticks([(x + 1) * 5 for x in xrange(20)]) 158 | 159 | print("Plotting to %s" % output_filename) 160 | plt.savefig(output_filename) 161 | 162 | 163 | def load_json(filename, bzip=True): 164 | if bzip: 165 | fn = bz2.BZ2File 166 | else: 167 | fn = open 168 | 169 | with fn(filename) as fh: 170 | jsn = json.load(fh) 171 | return jsn 172 | 173 | 174 | def usage(): 175 | print(__doc__) 176 | sys.exit(1) 177 | 178 | if __name__ == "__main__": 179 | try: 180 | mode = sys.argv[1] 181 | filename = sys.argv[2] 182 | except IndexError: 183 | usage() 184 | 185 | if mode.startswith("plot"): 186 | import matplotlib 187 | matplotlib.use("Agg") 188 | import matplotlib.pyplot as plt 189 | 190 | dot_index = filename.index(".") 191 | if mode == "compute": 192 | try: 193 | busy_thresh = int(sys.argv[3]) 194 | except IndexError: 195 | usage() 196 | jsn = load_json(filename) 197 | output_filename = "%s-amstats-%s.json" % (filename[:dot_index], 198 | busy_thresh) 199 | analyse_amperf(jsn, busy_thresh, output_filename) 200 | elif mode == "plot-dropoff": 201 | jsn = load_json(filename, bzip=False) 202 | output_filename = "%s-dropoff.pdf" % filename[:dot_index] 203 | plot_dropoff(jsn, output_filename) 204 | elif mode == "plot-hist": 205 | jsn = load_json(filename, bzip=False) 206 | output_filename = "%s-hist.pdf" % filename[:dot_index] 207 | plot_hist(jsn, output_filename) 208 | else: 209 | print("bad usage") 210 | -------------------------------------------------------------------------------- /scripts/check_envlogs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2.7 2 | 3 | """Check the environment logs of many pexecs for similarity. 4 | 5 | Usage: check_envlogs.py 6 | 7 | We check: 8 | * The SHA1 hashes of the envlogs. 9 | * The sizes of the envlogs. 10 | 11 | When hashing, we ignore `SUDO_COMMAND=` lines, since this contains a variable 12 | (but fixed-length) wrapper filename. 13 | 14 | The script prints nothing if all is well. If the script prints anything, then 15 | non-unique filesizes or hashes have been detected. If this is the case, the 16 | benchmarking environment deviated between runs (which is bad). 17 | """ 18 | 19 | import os 20 | import sys 21 | import hashlib 22 | import pprint 23 | import stat 24 | from collections import defaultdict 25 | 26 | 27 | def hash_file(filename): 28 | """Hash a file and return a hex string""" 29 | 30 | hasher = hashlib.sha1() 31 | with open(filename, 'rb') as fh: 32 | for line in fh: 33 | if line.startswith("SUDO_COMMAND="): 34 | continue # the dash wrapper filename varies, we allow this 35 | hasher.update(line) 36 | return hasher.hexdigest() 37 | 38 | 39 | def get_key_dct(files): 40 | """From a list of files in an envlog directory, build a dict mapping a 41 | benchmark key to filenames""" 42 | 43 | key_dct = {} 44 | for fl in files: 45 | elems = fl.split("__") 46 | key = "{}:{}:{}".format(*elems[:-1]) 47 | if key not in key_dct: 48 | key_dct[key] = [] 49 | key_dct[key].append(fl) 50 | return key_dct 51 | 52 | 53 | def get_filesize(filename): 54 | """Get the size of a file in bytes""" 55 | 56 | mode = os.stat(filename) 57 | return mode[stat.ST_SIZE] 58 | 59 | 60 | def print_problems(dct): 61 | for key, vals in dct.iteritems(): 62 | print(" %s:" % key) 63 | for idx, val in enumerate(vals): 64 | if idx == 4: 65 | print(" ... (%d more)" % (len(vals) - 4, )) 66 | break 67 | print(" %s" % val) 68 | print("") 69 | 70 | 71 | def check(dirname, key, files): 72 | """Check the envlogs for the given benchmark key""" 73 | 74 | hashes = defaultdict(set) 75 | sizes = defaultdict(set) 76 | for fl in files: 77 | path = os.path.join(dirname, fl) 78 | 79 | # Hash 80 | hash = hash_file(path) 81 | hashes[hash].add(fl) 82 | 83 | # Size 84 | size = get_filesize(path) 85 | sizes[size].add(fl) 86 | 87 | num_hashes = len(hashes) 88 | if num_hashes > 1: 89 | print("%s: %d unique hashes:" % (key, num_hashes)) 90 | print_problems(hashes) 91 | 92 | num_sizes = len(sizes) 93 | if num_sizes > 1: 94 | print("%s: %d unique file sizes:" % (key, num_sizes)) 95 | print_problems(sizes) 96 | 97 | 98 | def main(dirname): 99 | files = os.listdir(dirname) 100 | key_dct = get_key_dct(files) 101 | for key, files in key_dct.iteritems(): 102 | check(dirname, key, files) 103 | 104 | 105 | if __name__ == "__main__": 106 | try: 107 | dirname = sys.argv[1] 108 | except IndexError: 109 | print(__doc__) 110 | sys.exit(1) 111 | main(dirname) 112 | -------------------------------------------------------------------------------- /scripts/progress.awk: -------------------------------------------------------------------------------- 1 | #!/usr/bin/awk -f 2 | BEGIN {C=0; O=0; E=0; keys=0} 3 | /^keys/ {keys=1} 4 | keys == 1 && $1 =="C" {C++} 5 | keys == 1 && $1 =="E" {E++} 6 | keys == 1 && $1 =="O" {O++} 7 | END {print "C=" C " O=" O " E=" E " " (C+E) / (C + O + E) * 100 "%"} 8 | -------------------------------------------------------------------------------- /scripts/run_krun_at_boot: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | if [ $# -lt 1 ]; then 6 | echo "run_krun_at_boot [extra krun args]" 7 | exit 1 8 | fi 9 | 10 | # Consume arguments 11 | SCRIPT_PATH=`readlink -f $0` 12 | KRUN_CONFIG_FILE=$1 13 | shift 14 | 15 | # File needs to exist for `readlink -f` to work 16 | if [ ! -e "${KRUN_CONFIG_FILE}" ]; then 17 | echo "The config file is non-existent" 18 | exit 1 19 | fi 20 | 21 | if [ "`readlink -f ${KRUN_CONFIG_FILE}`" != "${KRUN_CONFIG_FILE}" ]; then 22 | echo "Please supply an absolute path to the config file" 23 | exit 1 24 | fi 25 | 26 | # Find Python 27 | if [ "`uname`" = "OpenBSD" ]; then 28 | PATH=${PATH}:/usr/local/bin 29 | fi 30 | PYTHON=`which python2.7` 31 | 32 | if [ "${PYTHON}" = "" ]; then 33 | echo "Failed to find python" 34 | exit 1 35 | fi 36 | 37 | KRUN_DIR=`dirname ${SCRIPT_PATH}`/.. 38 | EXPERIMENT_DIR=`dirname ${KRUN_CONFIG_FILE}` 39 | KRUN_ARGS="--hardware-reboots --daemonise $* ${KRUN_CONFIG_FILE}" 40 | 41 | # All is well, invoke Krun. 42 | cd ${EXPERIMENT_DIR} && ${PYTHON} ${KRUN_DIR}/krun.py ${KRUN_ARGS} 43 | -------------------------------------------------------------------------------- /utils/.gitignore: -------------------------------------------------------------------------------- 1 | query_turbo 2 | -------------------------------------------------------------------------------- /utils/Makefile: -------------------------------------------------------------------------------- 1 | CFLAGS += -Wall -Wextra 2 | 3 | all: query_turbo 4 | 5 | clean: 6 | rm -f query_turbo 7 | -------------------------------------------------------------------------------- /utils/query_turbo.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Small utility to determine if turbo boost is enabled on the *current* core. 3 | * 4 | * Use taskset to choose which core. 5 | */ 6 | 7 | #define _GNU_SOURCE 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | /* Thermal/power management CPUID leaf */ 14 | #define CPUID_THERM_POWER 0x6 15 | 16 | /* Fields of CPUID_THERM_POWER */ 17 | #define CPUID_THERM_POWER_TURBO 1 << 1 18 | 19 | 20 | int 21 | main(void) 22 | { 23 | uint32_t eax; 24 | int enabled; 25 | 26 | asm volatile( 27 | "mov %1, %%eax\n\t" 28 | "cpuid\n\t" 29 | : "=a" (eax) // out 30 | : "i"(CPUID_THERM_POWER) // in 31 | :"ebx", "ecx", "edx"); // clobber 32 | 33 | enabled = (eax & CPUID_THERM_POWER_TURBO) != 0; 34 | printf("%d\n", enabled); 35 | 36 | return (EXIT_SUCCESS); 37 | } 38 | -------------------------------------------------------------------------------- /vm_sanity_checks/JavaCheckJVMCIServerEnabled.java: -------------------------------------------------------------------------------- 1 | import java.lang.management.ManagementFactory; 2 | import com.sun.management.HotSpotDiagnosticMXBean; 3 | import com.sun.management.VMOption; 4 | 5 | /** 6 | * Fake benchmark which crashes if Graal is not correctly enabled 7 | */ 8 | class JavaCheckJVMCIServerEnabled implements BaseKrunEntry { 9 | 10 | HotSpotDiagnosticMXBean diagBean; 11 | 12 | public static void main(String[] args) { 13 | new JavaCheckJVMCIServerEnabled().run_iter(666); 14 | } 15 | 16 | public JavaCheckJVMCIServerEnabled() { 17 | diagBean = ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class); 18 | } 19 | 20 | public void run_iter(int param) { 21 | /* 22 | * We want to know that: 23 | */ 24 | 25 | /* A) The JVM was built with JVMCI support */ 26 | String vmVers = System.getProperty("java.vm.version"); 27 | if (!vmVers.contains("jvmci")) { 28 | String msg = "JVM was not built with JVMCI support: java.vm.version=" + vmVers; 29 | throw new java.lang.IllegalStateException(msg); 30 | } 31 | 32 | /* B) That JVMCI is enabled */ 33 | String enableJVMCI = diagBean.getVMOption("EnableJVMCI").getValue(); 34 | if (!enableJVMCI.equals("true")) { 35 | String msg = "JVMCI is not enabled: EnableJVMCI=" + enableJVMCI; 36 | throw new java.lang.IllegalStateException(msg); 37 | } 38 | 39 | /* C) The Graal compiler is selected */ 40 | String useJVMCI = diagBean.getVMOption("UseJVMCICompiler").getValue(); 41 | if (!useJVMCI.equals("true")) { 42 | String msg = "JVMCI compiler not selected: UseJVMCICompiler=" + useJVMCI; 43 | throw new java.lang.IllegalStateException(msg); 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /vm_sanity_checks/Makefile: -------------------------------------------------------------------------------- 1 | all: JavaCheckJVMCIServerEnabled.class 2 | 3 | .PHONY: clean 4 | 5 | JavaCheckJVMCIServerEnabled.class: JavaCheckJVMCIServerEnabled.java 6 | if [ "${ENABLE_JAVA}" = "1" ]; then \ 7 | CLASSPATH=../iterations_runners/ ${JAVAC} \ 8 | JavaCheckJVMCIServerEnabled.java; \ 9 | fi 10 | 11 | clean: 12 | rm -f *.class 13 | -------------------------------------------------------------------------------- /vm_sanity_checks/truffleruby_check_graal_enabled.rb: -------------------------------------------------------------------------------- 1 | # Crash if Graal is not enabled. 2 | def run_iter(param) 3 | begin 4 | if (not Truffle::Graal.graal?) then 5 | raise "Graal is not enabled" 6 | end 7 | rescue NameError 8 | raise "Failed to find Truffle::Graal.graal? attribute." 9 | end 10 | end 11 | 12 | if __FILE__ == $0 13 | run_iter 666 14 | end 15 | --------------------------------------------------------------------------------