├── .gitignore ├── LICENSE ├── README.md ├── cve_stat ├── TODO ├── _linux_kernel_cve.py ├── cves_per_file.py ├── dump_kernel_cve_infos.py ├── percentile.py ├── plot.sh ├── report │ ├── broken_to_reported.png │ ├── broken_to_reported_linear.png │ ├── report.md │ ├── report_to_fix_authored.png │ ├── report_to_fix_authored_linear.png │ ├── report_to_fix_committed.png │ └── report_to_fix_committed_linear.png ├── stat.py └── vulns_stat.sh ├── exp.py ├── exps ├── blogbench ├── blogbench-still ├── gcma ├── gcma-blogbench ├── lmbench ├── still ├── still-blogbench └── update-lazybox ├── gcma_exps.py ├── generate_exp_conf.py ├── git_helpers ├── __find_commit_in.sh ├── _git.py ├── _patch.py ├── authors.py ├── commits_on_text.py ├── decorate_backport_commits.sh ├── decorate_backport_patch.py ├── find_change_from.py ├── fixed_bugs.sh ├── fixes.py ├── format_backport_patches.sh ├── merge_topic.sh ├── nr_commits.py ├── patch_parse.py ├── patches_for.sh ├── profile_author.py ├── release_history.sh ├── relstat.py └── timeline_of_commit.py ├── gnuplot ├── .gitignore ├── clustered_box-yerr_datgen.py ├── clustered_box_datgen.py ├── example.sh ├── heatmap_datgen.py ├── lzstyle.gp ├── pdftopng ├── plot.py ├── plot_stdin.sh ├── plot_stdout.py ├── scatter-yerr_datgen.py ├── scatter_datgen.py ├── show_term_colors.py ├── splitplot.sh └── transform_data_format.py ├── humble_ci ├── README.md ├── hci.py └── noti_update.sh ├── kill_childs_self.py ├── kill_run_exps.py ├── linux_hack ├── README ├── build.sh ├── build_install_perf.sh ├── list_mm_patches.py ├── ls_kernels.py ├── maintainers.py ├── release_schedule.py ├── rm_kernels.sh ├── set_kernel.py ├── stable_commits_check.py ├── stats │ ├── active_maintainers.sh │ └── nr_maintainers.sh └── turn_debug_msg.sh ├── parallel_runs ├── README.md ├── exp.py ├── gcma_exps.py ├── generate_exp_conf.py ├── kill_childs_self.py ├── kill_run_exps.py ├── remote.tcl ├── remote_cmds.exp ├── remote_exp.py ├── remote_exps.exp ├── remote_exps_registered.exp ├── remote_reboot.exp ├── remote_set_kernel.exp ├── remote_stat.exp ├── remote_zram_swap.exp ├── run_exps.py ├── ssh_args.py └── test_run_exps │ ├── bye │ ├── check.exps │ ├── childs_processes_background.exps │ ├── hello │ ├── infini_root_test.exps │ ├── multiple_lines.exps │ ├── multiple_mains.exps │ ├── run_secs.py │ ├── spawn_process.py │ └── test_multiple_expss.py ├── parallel_ssh_cmds └── ssh_parallel.sh ├── remote.tcl ├── remote_cmds.exp ├── remote_exp.py ├── remote_exps.exp ├── remote_exps_registered.exp ├── remote_reboot.exp ├── remote_set_kernel.exp ├── remote_stat.exp ├── remote_zram_swap.exp ├── repeat_runs ├── README ├── __common.sh ├── _gen_exp_cfg.sh ├── _parse.sh ├── _stat.sh ├── aggregate_results.sh ├── examples │ ├── example.config │ └── memprofile │ │ ├── parsers │ │ ├── commlog.sh │ │ ├── memfree.sh │ │ ├── perf.stat.sh │ │ └── pswpin.sh │ │ ├── runners │ │ ├── back │ │ │ ├── 0001_memfree_stat.sh │ │ │ ├── 0002_pswpin_stat.sh │ │ │ └── 0003_memfp.sh │ │ ├── end │ │ │ └── 0001_kill_perf.sh │ │ ├── main │ │ │ └── 0001_sleep.sh │ │ └── start │ │ │ ├── 0001_sync_drop_caches.sh │ │ │ └── 0002_perf_stat.sh │ │ └── statists │ │ └── memused.avg.sh ├── org_results.sh ├── post.sh └── run.sh ├── run_exps.py ├── scripts ├── bdstat.py ├── cache_hierarchy.sh ├── chswpdev.sh ├── cpuloadstat.sh ├── dropcaches.sh ├── fs.py ├── gen_report │ ├── acdf.py │ ├── ltldat.py │ └── parse_still.py ├── hwinfo │ ├── blockdevs.sh │ ├── cpumodel.sh │ ├── eth_drivers.sh │ ├── ipaddrs.sh │ ├── lzhwinfo.sh │ ├── nics.sh │ ├── nr_cpusocks.sh │ ├── nr_cpuspersock.sh │ ├── nr_hwthrs.sh │ ├── nr_thrspercpu.sh │ └── sz_mem.sh ├── idleof ├── kernel_dev │ ├── DEPRECATED │ ├── build.sh │ ├── build_install_perf.sh │ ├── ls_kernels.py │ ├── rm_kernels.sh │ ├── set_kernel.py │ └── stats │ │ ├── active_maintainers.sh │ │ └── nr_maintainers.sh ├── memcg_mspike.sh ├── memfp.sh ├── mysql │ ├── backup.sh │ ├── kill_mysqld.sh │ ├── reset.sh │ ├── restore.sh │ └── start_mysqld.sh ├── nr_thrs.sh ├── perf │ ├── .gitignore │ ├── lat_trace.py │ ├── lat_trace_cdf.py │ ├── lat_trace_stat.py │ ├── lbperfutil.py │ ├── nr_evt.py │ ├── nr_trace.py │ ├── pick_field.py │ ├── plot_swptrace.sh │ ├── pr_evt.py │ ├── sample.py │ ├── stat_field.py │ └── trace_swpio.sh ├── ply │ ├── .gitignore │ ├── README │ ├── TODO │ ├── callstack_kprobe.sh │ ├── fetch-ply.sh │ ├── latency_kprobe.sh │ ├── nr_calls_kprobe.sh │ └── xtime_kprobe.sh ├── pr_cpuinfo.py ├── repeat.sh ├── report │ ├── files_to.py │ ├── fmt_tbl.py │ ├── memfree_to_used.py │ ├── paths.sh │ ├── pinatrace2record.sh │ ├── recs2tbl.py │ ├── recs_to_diff.py │ ├── statof.py │ ├── tbl2recs.py │ └── yzoom.py ├── run_memcg_lim.sh ├── run_memcg_lim_spike.sh ├── subprocs.py ├── turn_thp.sh ├── ufsi.py ├── vmastat.py ├── wait_machine.sh ├── wait_workof.sh └── zram_swap.sh ├── ssh_args.py ├── test_run_exps ├── bye ├── check.exps ├── childs_processes_background.exps ├── hello ├── infini_root_test.exps ├── multiple_lines.exps ├── multiple_mains.exps ├── run_secs.py ├── spawn_process.py └── test_multiple_expss.py ├── unsorted └── unwrap_text.py └── workloads ├── blogbench └── blogbench.sh ├── cloudsuite ├── datacaching │ ├── average_60results.py │ ├── build_dockerimages.sh │ ├── cleanup.sh │ ├── datacaching.sh │ └── startserver.sh ├── fetch-cloudsuite.sh └── webserving │ ├── build_dockerimages.sh │ ├── cleanup-webserving.sh │ ├── parse_summary.py │ ├── rm_dockerimages.sh │ ├── run-webserving.sh │ ├── setup-webserving.sh │ ├── summary.sh │ └── webserving.sh ├── ebizzy ├── install.sh └── run-spf-test.sh ├── gcma ├── gcma.sh └── gcma_stat.sh ├── kbuild └── kbuild.sh ├── mem_stress ├── Makefile ├── README ├── mem_stress.sh └── stress.c ├── mosbench ├── .gitignore ├── fetch-mosbench.sh ├── metis │ ├── build.sh │ └── run.sh └── psearchy │ ├── .gitignore │ ├── build.sh │ └── run.sh ├── oltpbench ├── build.sh └── fetch-src.sh └── raspistill └── raspistill.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.tar.xz 2 | *.swp 3 | *.o 4 | *.pyc 5 | 6 | workloads/kbuild/build_dir 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Lazybox 2 | ======= 3 | 4 | `lazybox` is a toolbox for helping lazy hackers. 5 | 6 | History of Lazybox and ongoing reorganization 7 | ============================================= 8 | 9 | The project has started as a collection of scripts that helps automation of 10 | performance evaluation experiments. More specifically, for automating runs and 11 | terminations of worklaods in parallel, e.g., running a workload with profiler 12 | with variable a/b/c. `run_exps.py` was the main body. 13 | 14 | Some scripts for misc works have started being added into scripts/ directory. 15 | As time goes by, more and more tools have added. Some tools are even placed 16 | under the root, instead of the `scripts/` directory. As a result, `lazybox` 17 | became just a collection of random tools. But the original tools 18 | (`run_exps.py` and its friends) for the specific purpose are still placed at 19 | the root, and REAME was wrongly describing this project. Some tools under 20 | `scripts/` has also became large enough to deserve a directory under the root. 21 | 22 | To make things easier to find and manage, we started reorganization from 23 | 2024-03-02. Because some projects are apparently depending on lazybox, we will 24 | try to make non-destructive ways. We will keep old files and copy those to new 25 | location, notify the alternatives, wait until existing users update to use the 26 | new location, and finally destroy old things. 27 | 28 | As of 2024-03-02, the original part of this project (`run_exps.py` and its 29 | frieds) is copied to `parallel_runs`. All users should update their setups to 30 | use the tools under the new directory. The files under the root will soon be 31 | deleted. 32 | 33 | Version Compatibility 34 | ===================== 35 | 36 | Lazybox v1.0 has released by 2020-01-01. Lazybox later than the version will 37 | not be strictly compatible with the v1.0. Therefore, if you have scripts 38 | depends on Lazybox v1.0 or earlier versions, please use the older version or 39 | test it again with newer version. 40 | -------------------------------------------------------------------------------- /cve_stat/TODO: -------------------------------------------------------------------------------- 1 | Questions to answer 2 | 3 | - How many CVEs comes out? 4 | - How long it takes from found to be fixed? 5 | - How long it takes to be found? 6 | - How is it for different stable kernels? 7 | - How is it for specific time period? 8 | - How is it for specific CVSS scores? 9 | -------------------------------------------------------------------------------- /cve_stat/cves_per_file.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | ''' 4 | Count number of CVEs that affecting specific files and print the output sorted 5 | by the numbers. Files can be specified using root directory and maximum depth, 6 | like 'du' command. 7 | 8 | Examples: 9 | 10 | $ git clone git://git.kernel.org/pub/scm/linux/security/vulns.git \ 11 | ../../vulns 12 | $ ./cves_per_file.py ../../vulns/cve/published/*/*.mbox --root mm/damon/ 13 | 1 mm/damon/vaddr-test.h 14 | 2 mm/damon/dbgfs.c 15 | $ 16 | $ ./cves_per_file.py ../../vulns/cve/published/*/*.mbox --max_depth 1 \ 17 | | tail -n 5 18 | 22 kernel 19 | 62 arch 20 | 99 fs 21 | 119 net 22 | 351 drivers 23 | $ 24 | $ ./cves_per_file.py ../../vulns/cve/published/*/*.mbox --max_depth 1 \ 25 | --root drivers | tail -n 5 26 | 11 drivers/i2c 27 | 11 drivers/usb 28 | 20 drivers/infiniband 29 | 56 drivers/gpu 30 | 81 drivers/net 31 | $ 32 | $ ./cves_per_file.py ../../vulns/cve/published/*/*.mbox --max_depth 1 \ 33 | --root drivers/net | tail -n 5 34 | 2 drivers/net/can 35 | 2 drivers/net/dsa 36 | 6 drivers/net/usb 37 | 26 drivers/net/wireless 38 | 39 drivers/net/ethernet 39 | ''' 40 | 41 | import argparse 42 | import sys 43 | 44 | def main(): 45 | parser = argparse.ArgumentParser() 46 | parser.add_argument('cve_mbox', metavar='', nargs='+', 47 | help='cve description mbox file') 48 | parser.add_argument('--root', metavar='', 49 | help='root of files to count') 50 | parser.add_argument('--max_depth', type=int, metavar='', 51 | help='similar to that of du') 52 | args = parser.parse_args() 53 | 54 | if args.max_depth is not None and args.root: 55 | args.max_depth += len(args.root.split('/')) 56 | 57 | counts = {} 58 | 59 | for mbox in args.cve_mbox: 60 | if mbox == 'stdin': 61 | cve_description = sys.stdin.read() 62 | else: 63 | with open(mbox, 'r') as f: 64 | cve_description = f.read() 65 | 66 | paragraphs = cve_description.split('\n\n') 67 | for par in paragraphs: 68 | lines = par.split('\n') 69 | if lines[0] == 'The file(s) affected by this issue are:': 70 | for f in lines[1:]: 71 | f = f.strip() 72 | if args.root is not None and not f.startswith(args.root): 73 | continue 74 | if args.max_depth: 75 | f = '/'.join(f.split('/')[:args.max_depth]) 76 | if not f in counts: 77 | counts[f] = 0 78 | counts[f] += 1 79 | for f in sorted(counts.keys(), key=lambda f: counts[f]): 80 | print(counts[f], f) 81 | 82 | 83 | if __name__ == '__main__': 84 | main() 85 | -------------------------------------------------------------------------------- /cve_stat/dump_kernel_cve_infos.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import json 5 | import os 6 | import sys 7 | 8 | bindir = os.path.dirname(sys.argv[0]) 9 | 10 | import _linux_kernel_cve 11 | 12 | def main(): 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument('--linux_kernel_cves', metavar='', 15 | default=os.path.join(bindir, '..', '..', 'linux_kernel_cves'), 16 | help='path to local linux_kernel_cves repo') 17 | parser.add_argument('--linux', metavar='', 18 | default=os.path.join(bindir, '..', '..', 'linux'), 19 | help='path to local linux repo') 20 | parser.add_argument('--output', metavar='', 21 | default='dumped_cve_info.json', 22 | help='path to json file to dump the information') 23 | args = parser.parse_args() 24 | 25 | data_dir = os.path.join(args.linux_kernel_cves, 'data') 26 | with open(os.path.join(data_dir, 'kernel_cves.json'), 'r') as f: 27 | main_infos = json.load(f) 28 | with open(os.path.join(data_dir, 'stream_data.json'), 'r') as f: 29 | stream_breaks = json.load(f) 30 | with open(os.path.join(data_dir, 'stream_fixes.json'), 'r') as f: 31 | stream_fixes = json.load(f) 32 | 33 | to_dump = {} 34 | for cve_name in main_infos: 35 | cve_year = int(cve_name.split('-')[1]) 36 | if cve_year < 2019: 37 | continue 38 | to_dump[cve_name] = _linux_kernel_cve.LinuxKernelCve(cve_name, 39 | args.linux_kernel_cves, main_infos, stream_breaks, 40 | stream_fixes, args.linux).to_kvpairs() 41 | print(json.dumps(to_dump[cve_name], indent=4)) 42 | with open(args.output, 'w') as f: 43 | json.dump(to_dump, f, indent=4) 44 | 45 | if __name__ == '__main__': 46 | main() 47 | -------------------------------------------------------------------------------- /cve_stat/percentile.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | 5 | import _linux_kernel_cve 6 | 7 | def secs_to_days(secs): 8 | return secs / 3600 / 24 9 | 10 | def pr_percentiles(values): 11 | values = sorted(values) 12 | print('0 %d' % (secs_to_days(values[0] - 1))) 13 | for i in range(1, 100): 14 | print('%d %d' % (i, secs_to_days(values[int(i / 100 * len(values))]))) 15 | print('100 %d' % (secs_to_days(values[-1] + 1))) 16 | 17 | def main(): 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument('dumpfile', 20 | help='dumped LinuxKernelCve json file') 21 | parser.add_argument('--metric', 22 | choices=[ 23 | 'report_to_fix_authored', 24 | 'report_to_fix_committed', 25 | 'broken_to_reported', 26 | ], 27 | help='metric to show') 28 | parser.add_argument('--min_val', type=int, 29 | help='minimum value of the metric to count') 30 | args = parser.parse_args() 31 | 32 | cves = _linux_kernel_cve.load_kernel_cves_from_json(args.dumpfile).values() 33 | 34 | report_to_fix_committed_secs = {} # tree: [sec] 35 | report_to_fix_authored_secs = {} # tree: [sec] 36 | broken_to_reported_secs = {} 37 | for tree in ['mainline', '6.4', '6.1', '5.15', '5.10', '5.4', '4.19', 38 | '4.14']: 39 | for cve in cves: 40 | if not tree in cve.fix_commits: 41 | continue 42 | if not tree in report_to_fix_committed_secs: 43 | report_to_fix_committed_secs[tree] = [] 44 | if not tree in report_to_fix_authored_secs: 45 | report_to_fix_authored_secs[tree] = [] 46 | if not tree in broken_to_reported_secs: 47 | broken_to_reported_secs[tree] = [] 48 | 49 | secs = cve.fix_commits[tree].committed_date - cve.added_date 50 | if not args.min_val or secs >= args.min_val: 51 | report_to_fix_committed_secs[tree].append(secs) 52 | 53 | secs = cve.fix_commits[tree].authored_date - cve.added_date 54 | if not args.min_val or secs >= args.min_val: 55 | report_to_fix_authored_secs[tree].append(secs) 56 | 57 | if not tree in cve.break_commits: 58 | continue 59 | secs = cve.added_date - cve.break_commits[tree].committed_date 60 | if not args.min_val or secs >= args.min_val: 61 | broken_to_reported_secs[tree].append(secs) 62 | 63 | for tree in ['mainline', '6.4', '6.1', '5.15', '5.10', '5.4', '4.19', 64 | '4.14']: 65 | print(tree) 66 | if args.metric == 'report_to_fix_authored': 67 | pr_percentiles(report_to_fix_authored_secs[tree]) 68 | elif args.metric == 'report_to_fix_committed': 69 | pr_percentiles(report_to_fix_committed_secs[tree]) 70 | elif args.metric == 'broken_to_reported': 71 | pr_percentiles(broken_to_reported_secs[tree]) 72 | 73 | print() 74 | print() 75 | 76 | if __name__ == '__main__': 77 | main() 78 | -------------------------------------------------------------------------------- /cve_stat/plot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 " 6 | exit 1 7 | fi 8 | 9 | cve_json=$1 10 | 11 | bindir=$(dirname "$0") 12 | 13 | "$bindir/percentile.py" "$cve_json" \ 14 | --metric broken_to_reported | \ 15 | "$bindir/../gnuplot/plot.py" --data_fmt recs --type scatter \ 16 | --pointsize 1 --xtitle "Percentile" \ 17 | --ytitle "Days from a CVE is committed to be reported" \ 18 | --size 1024,768 \ 19 | --font "TimesNewRoman" "broken_to_reported_linear.png" 20 | 21 | "$bindir/percentile.py" "$cve_json" \ 22 | --metric broken_to_reported --min_val 1 | \ 23 | "$bindir/../gnuplot/plot.py" --data_fmt recs --type scatter \ 24 | --pointsize 1 --xtitle "Percentile" \ 25 | --ytitle "Days from a CVE is committed to be reported, excluding <1 (logscale)" \ 26 | --size 1024,768 \ 27 | --font "TimesNewRoman" --ylog "broken_to_reported.png" 28 | 29 | "$bindir/percentile.py" "$cve_json" \ 30 | --metric report_to_fix_authored | \ 31 | "$bindir/../gnuplot/plot.py" --data_fmt recs --type scatter \ 32 | --pointsize 1 --xtitle "Percentile" \ 33 | --ytitle "Days from a CVE be reported to the fix be authored" \ 34 | --size 1024,768 \ 35 | --font "TimesNewRoman" "report_to_fix_authored_linear.png" 36 | 37 | "$bindir/percentile.py" "$cve_json" \ 38 | --metric report_to_fix_committed | \ 39 | "$bindir/../gnuplot/plot.py" --data_fmt recs --type scatter \ 40 | --pointsize 1 --xtitle "Percentile" \ 41 | --ytitle "Days from a CVE be reported to the fix be committed" \ 42 | --size 1024,768 \ 43 | --font "TimesNewRoman" "report_to_fix_committed_linear.png" 44 | 45 | "$bindir/percentile.py" "$cve_json" \ 46 | --metric report_to_fix_authored --min_val 1 | \ 47 | "$bindir/../gnuplot/plot.py" --data_fmt recs --type scatter \ 48 | --pointsize 1 --xtitle "Percentile" \ 49 | --ytitle "Days from a CVE be reported to the fix be authored, excluding <1 (logscale)" \ 50 | --size 1024,768 \ 51 | --font "TimesNewRoman" --ylog "report_to_fix_authored.png" 52 | 53 | "$bindir/percentile.py" "$cve_json" \ 54 | --metric report_to_fix_committed --min_val 1 | \ 55 | "$bindir/../gnuplot/plot.py" --data_fmt recs --type scatter \ 56 | --pointsize 1 --xtitle "Percentile" \ 57 | --ytitle "Days from a CVE be reported to the fix be committed, exlcuding <1 (logscale)" \ 58 | --size 1024,768 \ 59 | --font "TimesNewRoman" --ylog "report_to_fix_committed.png" 60 | -------------------------------------------------------------------------------- /cve_stat/report/broken_to_reported.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sjp38/lazybox/37c62da3a03166a02a5decdc301d2fd47061d280/cve_stat/report/broken_to_reported.png -------------------------------------------------------------------------------- /cve_stat/report/broken_to_reported_linear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sjp38/lazybox/37c62da3a03166a02a5decdc301d2fd47061d280/cve_stat/report/broken_to_reported_linear.png -------------------------------------------------------------------------------- /cve_stat/report/report_to_fix_authored.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sjp38/lazybox/37c62da3a03166a02a5decdc301d2fd47061d280/cve_stat/report/report_to_fix_authored.png -------------------------------------------------------------------------------- /cve_stat/report/report_to_fix_authored_linear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sjp38/lazybox/37c62da3a03166a02a5decdc301d2fd47061d280/cve_stat/report/report_to_fix_authored_linear.png -------------------------------------------------------------------------------- /cve_stat/report/report_to_fix_committed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sjp38/lazybox/37c62da3a03166a02a5decdc301d2fd47061d280/cve_stat/report/report_to_fix_committed.png -------------------------------------------------------------------------------- /cve_stat/report/report_to_fix_committed_linear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sjp38/lazybox/37c62da3a03166a02a5decdc301d2fd47061d280/cve_stat/report/report_to_fix_committed_linear.png -------------------------------------------------------------------------------- /cve_stat/vulns_stat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Show how total number of Linux kernel CVEs that published and rejected by the 4 | # community's CNA changes per day, for last N days, in M interval. 5 | # 6 | # Example usage: 7 | # $ git clone git://git.kernel.org/pub/scm/linux/security/vulns.git 8 | # $ ./vulns_stat.sh ~/vulns/ 46 7 9 | # 10 | # 2024-02-27 103 2 11 | # 2024-03-05 378 4 12 | # 2024-03-12 405 11 13 | # 2024-03-19 456 16 14 | # 2024-03-26 502 18 15 | # 2024-04-02 557 19 16 | # 2024-04-09 692 20 17 | 18 | set -e 19 | 20 | if [ $# -ne 3 ] 21 | then 22 | echo "Usage: $0 " 23 | exit 1 24 | fi 25 | 26 | vulns_path=$1 27 | max_days=$2 28 | interval=$3 29 | 30 | if [ ! -d "$vulns_path" ] 31 | then 32 | echo "$vulns_path no found" 33 | exit 1 34 | fi 35 | 36 | cd "$vulns_path" 37 | 38 | git remote update &> /dev/null 39 | git checkout origin/master &> /dev/null 40 | 41 | echo " " 42 | for ((i = "$max_days" ; i > 0 ; i -= "$interval" )) 43 | do 44 | date=$(date -d "-$i day" '+%Y-%m-%d') 45 | commit=$(git log origin/master --until "$date" -1 --pretty=%H) 46 | nr_cves=$(git ls-tree "$commit" -- cve/published/*/*.json | wc -l) 47 | nr_rejects=$(git ls-tree "$commit" -- cve/rejected/*/*.json | wc -l) 48 | echo "$date $nr_cves $nr_rejects" 49 | done 50 | -------------------------------------------------------------------------------- /exps/blogbench: -------------------------------------------------------------------------------- 1 | main ./workloads/blogbench/blogbench.sh >> ./out/blogbench.blogbench 2 | -------------------------------------------------------------------------------- /exps/blogbench-still: -------------------------------------------------------------------------------- 1 | main ./workloads/blogbench/blogbench.sh >> ./out/blogbench-still.blogbench 2 | back while true; do \ 3 | ./workloads/raspistill/raspistill.py 30 10 /run/shm/img.jpg >> \ 4 | ./out/blogbench-still.still; done 5 | -------------------------------------------------------------------------------- /exps/gcma: -------------------------------------------------------------------------------- 1 | main ./workloads/gcma/gcma.sh 30 \ 2 | "1 64 128 256 512 1024 2048 4096 8192 16384 32768 50000" >> ./out/gcma.gcma 3 | -------------------------------------------------------------------------------- /exps/gcma-blogbench: -------------------------------------------------------------------------------- 1 | main nice --19 ./workloads/gcma/gcma.sh 30 \ 2 | "1 64 128 256 512 1024 2048 4096 8192 16384 32768 50000" >> \ 3 | ./out/gcma-blogbench.gcma 4 | back while true; do \ 5 | ./workloads/blogbench/blogbench.sh >> \ 6 | ./out/gcma-blogbench.blogbench; done 7 | -------------------------------------------------------------------------------- /exps/lmbench: -------------------------------------------------------------------------------- 1 | main cd /home/sjpark/lmbench/lmbench3; make rerun 2 | -------------------------------------------------------------------------------- /exps/still: -------------------------------------------------------------------------------- 1 | main ./workloads/raspistill/raspistill.py 30 10 /run/shm/img.jpg >> ./out/still.still 2 | -------------------------------------------------------------------------------- /exps/still-blogbench: -------------------------------------------------------------------------------- 1 | main nice --19 ./workloads/raspistill/raspistill.py 30 10 /run/shm/img.jpg \ 2 | >> ./out/still-blogbench.still 3 | back ./workloads/blogbench/blogbench.sh >> \ 4 | ./out/still-blogbench.blogbench 5 | -------------------------------------------------------------------------------- /exps/update-lazybox: -------------------------------------------------------------------------------- 1 | main git remote update && git checkout origin/master 2 | -------------------------------------------------------------------------------- /gcma_exps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import ssh_args 6 | 7 | user, target, port, password = ssh_args.parse_input() 8 | 9 | lbpath = "/home/%s/lazybox" % user 10 | 11 | bootloader = "grub" 12 | if target == "raspberrypi": 13 | bootloader = "rasp2" 14 | 15 | k_cma = "cma" 16 | k_gcma = "gcma" 17 | k_vanilla = "vanilla" 18 | kernels = [k_cma, k_gcma, k_vanilla] 19 | kparam_cma = "coherent_pool=16M cma=64M smsc95xx.turbo_mode=N" 20 | expspath = "./exps/" 21 | exps = ["gcma", "gcma-blogbench", 22 | "still", "still-blogbench", "blogbench-still", "blogbench"] 23 | 24 | for kernel in kernels: 25 | for exp in exps: 26 | kernel_param = "" 27 | if kernel in [k_cma, k_gcma]: 28 | kernel_param = kparam_cma 29 | if kernel == k_vanilla and (exp == "gcma" or exp == "gcma-blogbench"): 30 | continue 31 | os.system("expect ./remote_set_kernel.exp %s %s %s %s %s %s %s %s" % ( 32 | user, target, port, password, lbpath, bootloader, kernel, kernel_param)) 33 | if kernel == k_gcma: 34 | os.system("expect ./remote_zram_swap.exp %s %s %s %s %s 100M" % ( 35 | user, target, port, password, lbpath)) 36 | os.system("expect ./remote_exps.exp %s %s %s %s %s %s" % ( 37 | user, target, port, password, lbpath, expspath + exp)) 38 | -------------------------------------------------------------------------------- /generate_exp_conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Stub for experiments config file generator 5 | 6 | Experiments configuration file describes which experiments should be executed. 7 | For simplicity, it is recommended to be linearly interpretable rather than 8 | procedural. In other words, `loop` or `goto` is not recommended inside 9 | experiments config file. 10 | 11 | But, constraining people to write down repeating experiments manually is crime 12 | against humanity. For the reason, it is recommended to write down each user's 13 | own experiments config file generator using their familiar tools. To help 14 | making each user's own generator, this file contains essential code for 15 | automated experiments config file generation. 16 | 17 | Users could use this file for their purpose by following steps: 18 | 1. Copy this file 19 | 2. Edit main loop inside the file which commented to be 20 | 3. Run modified copy and redirect stdout to appropriate file 21 | 22 | """ 23 | 24 | __author__ = "SeongJae Park" 25 | __email__ = "sj38.park@gmail.com" 26 | __copyright__ = "Copyright (c) 2013-2020, SeongJae Park" 27 | __license__ = "GPLv2" 28 | 29 | import exp 30 | 31 | exps = [] 32 | 33 | # !!! Edit below loop to fit on your purpose using Python language 34 | for arg1 in [1, 2, 3, 4, 5]: 35 | for arg2 in ['a', 'b', 'abc']: 36 | starts = ["echo hi"] 37 | mains = ["echo main with %s %s > main.out" % (arg1, arg2)] 38 | # For multiple commands, code could be seems like below 39 | # mains = ["echo main with %s %s" % (arg1, arg2), 40 | # "echo main2 with %s %s" % (arg2, arg1)] 41 | backs = ["echo back with %s" % (arg1)] 42 | ends = ["echo buy"] 43 | checks = ["grep main main.out"] 44 | 45 | # Do not forget to match indentation 46 | exps.append(exp.Exp(starts, mains, backs, ends, checks)) 47 | 48 | # !!! Do not edit code below 49 | for exp in exps: 50 | for start in exp.start_cmds: 51 | print("start %s" % start) 52 | for main in exp.main_cmds: 53 | print("main %s" % main) 54 | for back in exp.back_cmds: 55 | print("back %s" % back) 56 | for end in exp.end_cmds: 57 | print("end %s" % end) 58 | for check in exp.check_cmds: 59 | print("check %s" % check) 60 | 61 | print('') 62 | -------------------------------------------------------------------------------- /git_helpers/__find_commit_in.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | find_commit_of_result="" 4 | find_commit_of() 5 | { 6 | subject=$1 7 | author=$2 8 | author_date=$3 9 | commit_range=$4 10 | 11 | pretty="%h %s" 12 | string_to_grep=$subject 13 | if [ ! "$author_date" = "" ] 14 | then 15 | pretty+=" (%ad)" 16 | string_to_grep+=" ($author_date)" 17 | fi 18 | 19 | if [ "$author" = "" ] 20 | then 21 | hash_subject=$(git log --pretty="$pretty" "$commit_range" | \ 22 | grep -i -m 1 "$string_to_grep") 23 | else 24 | hash_subject=$(git log --author "$author" --pretty="$pretty" \ 25 | "$commit_range" | grep -i -m 1 "$string_to_grep") 26 | fi 27 | 28 | find_commit_of_result=$hash_subject 29 | } 30 | 31 | pr_usage_exit() 32 | { 33 | message=$1 34 | exit_code=$2 35 | 36 | if [ ! "$message" = "" ] 37 | then 38 | echo 39 | echo "$message" 40 | fi 41 | echo " 42 | Usage: $0 [OPTION]... 43 | 44 | Find a commit in that has the author name and subject of 45 | 46 | 47 | OPTION 48 | --hash_only Print hash only 49 | --commit Hash of the commit to find 50 | --title Title of the commit to find 51 | --author <author> Author of the commit to find 52 | --author_date <author_date> Author date of the commit to find 53 | --repo <dir> Path to the local git repo 54 | " 55 | exit $exit_code 56 | } 57 | 58 | hash_only="false" 59 | repo="./" 60 | while [ $# -ne 0 ] 61 | do 62 | case $1 in 63 | "--hash_only") 64 | hash_only="true" 65 | shift 1 66 | continue 67 | ;; 68 | "--commit") 69 | if [ $# -lt 2 ] 70 | then 71 | pr_usage_exit "--commit wrong" 1 72 | fi 73 | commit_to_find=$2 74 | shift 2 75 | continue 76 | ;; 77 | "--title") 78 | if [ $# -lt 2 ] 79 | then 80 | pr_usage_exit "--title wrong" 1 81 | fi 82 | title_to_find=$2 83 | shift 2 84 | continue 85 | ;; 86 | "--author") 87 | if [ $# -lt 2 ] 88 | then 89 | pr_usage_exit "<author> is not given" 1 90 | fi 91 | author=$2 92 | shift 2 93 | continue 94 | ;; 95 | "--author_date") 96 | if [ $# -lt 2 ] 97 | then 98 | pr_usage_exit "<author_date> is not given" 1 99 | fi 100 | author_date=$2 101 | shift 2 102 | continue 103 | ;; 104 | "--repo") 105 | if [ $# -lt 2 ] 106 | then 107 | pr_usage_exit "<dir> is not given" 1 108 | fi 109 | repo=$2 110 | shift 2 111 | continue 112 | ;; 113 | *) 114 | if [ ! "$commit_range" = "" ] 115 | then 116 | pr_usage_exit "multiple <commit range>" 1 117 | fi 118 | commit_range=$1 119 | shift 1 120 | continue 121 | ;; 122 | esac 123 | done 124 | 125 | if [ "$commit_to_find" = "" ] && [ "$title_to_find" = "" ] 126 | then 127 | pr_usage_exit "--commit or --title should given" 1 128 | fi 129 | 130 | cd "$repo" 131 | 132 | if [ "$title_to_find" = "" ] 133 | then 134 | subject=$(git log -n 1 "$commit_to_find" --pretty=%s 2> /dev/null) 135 | if [ "$subject" = "" ] 136 | then 137 | echo "wrong commit id ($commit_to_find)" 138 | exit 1 139 | fi 140 | else 141 | subject="$title_to_find" 142 | fi 143 | 144 | if [ "$author" = "" ] && [ ! "$commit_to_find" = "" ] 145 | then 146 | author=$(git log -n 1 "$commit_to_find" --pretty=%an) 147 | fi 148 | 149 | if [ "$author_date" = "" ] && [ ! "$commit_to_find" = "" ] 150 | then 151 | author_date=$(git log -n 1 "$commit_to_find" --pretty=%ad) 152 | fi 153 | 154 | find_commit_of "$subject" "$author" "$author_date" "$commit_range" 155 | hash_subject=$find_commit_of_result 156 | 157 | if [ "$hash_subject" = "" ] 158 | then 159 | exit 1 160 | fi 161 | 162 | if [ "$hash_only" = "true" ] 163 | then 164 | simple_hash=$(echo "$hash_subject" | awk '{print $1}') 165 | git log --pretty=%H -n 1 "$simple_hash" 166 | exit 167 | fi 168 | 169 | echo "$hash_subject" 170 | -------------------------------------------------------------------------------- /git_helpers/decorate_backport_commits.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | if [ $# -ne 3 ] 6 | then 7 | echo "Usage: $0 <first commit> <last commit> <upstream tree>" 8 | echo 9 | echo "Adds upstream commit comments to <first commit>..<last commit>" 10 | exit 1 11 | fi 12 | 13 | bindir=$(dirname "$0") 14 | first_commit=$1 15 | last_commit=$2 16 | remote=$3 17 | 18 | commit_range="${first_commit}..${last_commit}" 19 | 20 | before_patches_dir=$(mktemp -d before_patches-XXXX) 21 | echo "convert $commit_range into patches under \"$before_patches_dir/\"" 22 | git format-patch "$commit_range" -o "$before_patches_dir" --quiet 23 | 24 | after_patches_dir=$(mktemp -d after_patches-XXXX) 25 | for patch in "$before_patches_dir"/*.patch 26 | do 27 | echo "decorate $patch" 28 | patch_name=$(basename $patch) 29 | if ! "$bindir/decorate_backport_patch.py" "$patch" "$remote" > \ 30 | "$after_patches_dir/$patch_name" 2> /dev/null 31 | then 32 | echo " decoration failed, maybe not a backported one" 33 | fi 34 | 35 | done 36 | 37 | head_commit=$(git rev-parse HEAD) 38 | commits_to_restore="${last_commit}..${head_commit}" 39 | 40 | echo "reset HEAD and apply decorated patches" 41 | git reset --hard "$first_commit" 42 | 43 | for patch in "$after_patches_dir"/*.patch 44 | do 45 | git am "$patch" 46 | done 47 | 48 | echo "apply after $last_commit commits ($commits_to_restore)" 49 | git cherry-pick "$commits_to_restore" 50 | 51 | echo "original patches are in $before_patches_dir" 52 | echo "decorated patches are in $after_patches_dir" 53 | -------------------------------------------------------------------------------- /git_helpers/decorate_backport_patch.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import sys 5 | 6 | import _patch 7 | 8 | def main(): 9 | parser = argparse.ArgumentParser() 10 | ''' 11 | 'stable' style adds 'commit xxx upstream.' at the beginning. Very same to 12 | the usual stable commits. 13 | 'cherry-pick' style adds '(cherry picked from commit xxx' at the end. Very 14 | same to the 'git cherry-pick -x'. 15 | 'both' adds two styles of the comments. 16 | ''' 17 | parser.add_argument('--upstream_commit_comment_style', 18 | choices=['stable', 'cherry-pick', 'all'], default='stable', 19 | help='style of comment for upstream commit') 20 | parser.add_argument('patch', metavar='<file>', 21 | help='patch file to add the upstream commit line') 22 | parser.add_argument('upstream_remote', metavar='<remote tree>', 23 | help='git reference for the upstream tree') 24 | args = parser.parse_args() 25 | 26 | if args.upstream_commit_comment_style == 'all': 27 | upstream_commit_comment_styles = ['stable', 'cherry-pick'] 28 | else: 29 | upstream_commit_comment_styles = [args.upstream_commit_comment_style] 30 | 31 | patch = _patch.Patch(args.patch) 32 | if patch.author == None or patch.subject == None: 33 | print('Patch (%s) has no author and subject' % args.patch, 34 | file=sys.stderr) 35 | exit(1) 36 | 37 | try: 38 | commit_hash = patch.commit_in(args.upstream_remote) 39 | except: 40 | print('upstream commit for %s of %s not found' % 41 | (patch.subject, patch.author), file=sys.stderr) 42 | sys.stdout.write('%s' % patch) 43 | exit(1) 44 | 45 | patch.decorate_for_backport(commit_hash, upstream_commit_comment_styles) 46 | sys.stdout.write('%s' % patch) 47 | 48 | if __name__ == '__main__': 49 | main() 50 | -------------------------------------------------------------------------------- /git_helpers/find_change_from.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | ''' 4 | Load a change from a patch or commit, and find matching change from patches 5 | queue or commits range 6 | ''' 7 | 8 | import argparse 9 | 10 | import _git 11 | 12 | def main(): 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument('--patch', metavar='<file>', 15 | help='patch containing the change') 16 | parser.add_argument('--commit', metavar='<commit>', 17 | help='commit containing the change') 18 | parser.add_argument('--subject', metavar='<subject>', 19 | help='subject of the change') 20 | parser.add_argument('--author', metavar='<author name <author email>>', 21 | help='author of the change') 22 | 23 | parser.add_argument('--repo', metavar='<dir>', default='./', 24 | help='local repo to find the change from') 25 | parser.add_argument('patch_or_commits', metavar='<file or commits>', 26 | nargs='+', 27 | help='commits range or patch files to find the change from') 28 | 29 | parser.add_argument('--describe_contains', action='store_true', 30 | help='show \'git describe --contains\' for found commit together') 31 | parser.add_argument('--remote_repo', metavar='<url>', 32 | help='show https url for the found commit using this') 33 | args = parser.parse_args() 34 | 35 | if args.patch == None and args.commit == None and args.subject == None: 36 | print('--patch, --commit, or --subject should be set') 37 | exit(1) 38 | 39 | if args.patch: 40 | change = _git.Change(patch_file=args.patch) 41 | elif args.commit: 42 | change = _git.Change(commit=args.commit, repo=args.repo) 43 | elif args.subject: 44 | change = _git.Change(subject=args.subject, author=args.author) 45 | 46 | found = False 47 | for to_find_from in args.patch_or_commits: 48 | matching_change = change.find_matching_change([to_find_from], 49 | args.repo) 50 | if matching_change == None: 51 | continue 52 | found = True 53 | if len(args.patch_or_commits) > 1: 54 | print('found it from %s' % to_find_from) 55 | if matching_change.commit: 56 | print('%s ("%s")' % 57 | (matching_change.commit.hashid[:12], change.subject)) 58 | if args.describe_contains: 59 | print('- merged in %s' % 60 | matching_change.commit.first_contained_version()) 61 | if args.remote_repo: 62 | print('- url: %s' % 63 | matching_change.url(args.remote_repo, None)) 64 | else: 65 | print(matching_change.patch.file_name) 66 | exit(0 if found else 1) 67 | 68 | if __name__ == '__main__': 69 | main() 70 | -------------------------------------------------------------------------------- /git_helpers/fixed_bugs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 3 ] 4 | then 5 | echo "Usage: $0 <repo> <bug commits range> <fix commits range>" 6 | exit 1 7 | fi 8 | 9 | repo=$1 10 | potential_bugs=$2 11 | potential_fixes=$3 12 | bindir=$(dirname "$0") 13 | 14 | for fix in $(git -C "$repo" log "$potential_fixes" --pretty="%H") 15 | do 16 | fixes_line=$(git -C "$repo" log -n 1 "$fix" --pretty=%B | \ 17 | grep "^Fixes: ") 18 | if [ "$fixes_line" = "" ] 19 | then 20 | continue 21 | fi 22 | bug_commit=$(echo "$fixes_line" | awk '{print $2}') 23 | if "$bindir/__find_commit_in.sh" --repo "$repo" \ 24 | --commit "$bug_commit" "$potential_bugs" &> /dev/null 25 | then 26 | echo "$bug_commit in $potential_bugs fixed by $fix in $potential_fixes" 27 | fi 28 | done 29 | -------------------------------------------------------------------------------- /git_helpers/fixes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import os 5 | import subprocess 6 | 7 | import _git 8 | 9 | def print_reference(change, git_url, queue_url): 10 | if git_url or queue_url: 11 | print(' - url: %s' % change.url(git_url, queue_url)) 12 | else: 13 | if change.commit: 14 | print(' - commit %s' % change.commit.hashid) 15 | if change.patch: 16 | print(' - patch %s' % change.patch.file_name) 17 | if change.commit: 18 | print(' - in %s' % change.commit.first_contained_version()) 19 | 20 | def print_fix_bug(fix, bug, remote_git_url, remote_queue_url): 21 | print('- fix: "%s"' % fix.subject) 22 | print_reference(fix, remote_git_url, remote_queue_url) 23 | print('- bug: "%s"' % bug.subject) 24 | print_reference(bug, remote_git_url, remote_queue_url) 25 | print() 26 | 27 | def main(): 28 | parser = argparse.ArgumentParser() 29 | parser.add_argument('--repo', metavar='<dir>', default='./', 30 | help='local repo') 31 | parser.add_argument('--fixes', metavar='<file or commits>', nargs='+', 32 | help='potential fix patch files or commits') 33 | parser.add_argument('--bugs', metavar='<file or commits>', 34 | nargs='+', 35 | help='potential bug patch files or commits') 36 | parser.add_argument('--remote_git_url', metavar='<url>', 37 | help='show https url for the found bug/fix commits using this') 38 | parser.add_argument('--patches_queue_url', metavar='<url>', 39 | help='show https url for the found bug/fix patches using this') 40 | args = parser.parse_args() 41 | 42 | if args.fixes == None or args.bugs == None: 43 | print('--fixes and --bugs should be passed') 44 | parser.print_usage() 45 | exit(1) 46 | 47 | potential_fixes = _git.read_changes(args.fixes, args.repo) 48 | for potential_fix in potential_fixes: 49 | for bug_reference in potential_fix.get_fixing_commit_refs(): 50 | hashid = bug_reference.split()[0] 51 | if not _git.is_hashid(hashid): 52 | continue 53 | try: 54 | buggy_change = _git.Change(commit=hashid, repo=args.repo) 55 | except: 56 | print('# Failed parsing %s from %s' % 57 | (bug_reference, potential_fix)) 58 | continue 59 | for patch_or_commits_range in args.bugs: 60 | if os.path.isfile(patch_or_commits_range): 61 | patch_file = patch_or_commits_range 62 | bug = buggy_change.find_matching_patch([patch_file]) 63 | else: 64 | commits = patch_or_commits_range 65 | bug = buggy_change.find_matching_commit(args.repo, commits) 66 | if bug != None: 67 | print_fix_bug(potential_fix, bug, 68 | args.remote_git_url, args.patches_queue_url) 69 | 70 | if __name__ == '__main__': 71 | main() 72 | -------------------------------------------------------------------------------- /git_helpers/format_backport_patches.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 2 ] 4 | then 5 | echo "Usage: $0 <commits range> <upstream tree>" 6 | exit 1 7 | fi 8 | 9 | bindir=$(dirname "$0") 10 | commit_range=$1 11 | remote=$2 12 | 13 | before_patches_dir=$(mktemp -d before_patches-XXXX) 14 | echo "convert $commit_range into patches under \"$before_patches_dir/\"" 15 | git format-patch "$commit_range" -o "$before_patches_dir" --quiet 16 | 17 | after_patches_dir=$(mktemp -d after_patches-XXXX) 18 | for patch in "$before_patches_dir"/*.patch 19 | do 20 | echo "decorate $patch" 21 | patch_name=$(basename $patch) 22 | if ! "$bindir/decorate_backport_patch.py" "$patch" "$remote" > \ 23 | "$after_patches_dir/$patch_name" 2> /dev/null 24 | then 25 | echo " decoration failed, maybe not a backported one" 26 | fi 27 | done 28 | 29 | echo "Not decorated patches are in ${before_patches_dir}/" 30 | echo "Decorated patches are in ${after_patches_dir}/" 31 | -------------------------------------------------------------------------------- /git_helpers/merge_topic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 2 ] 4 | then 5 | echo "Usage: $0 <baseline> <topic>" 6 | exit 1 7 | fi 8 | 9 | baseline="$1" 10 | topic="$2" 11 | 12 | head_commit=$(git rev-parse HEAD) 13 | git reset --hard "$baseline" 14 | git merge "$head_commit" --no-ff --no-edit -m "Merge \"$topic\"" 15 | -------------------------------------------------------------------------------- /git_helpers/patch_parse.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | 5 | import _git 6 | 7 | def main(): 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('patch', metavar='<file>', 10 | help='patch file to read') 11 | parser.add_argument('fields', choices=['subject', 'date', 'author', 12 | 'mail_header', 'description', 'diff', 'fixes'], 13 | nargs='+', 14 | help='fields to read from the patch') 15 | args = parser.parse_args() 16 | 17 | change = _git.Change(patch_file=args.patch, set_diff=True) 18 | for field in args.fields: 19 | if field == 'subject': 20 | print(change.subject) 21 | elif field == 'date': 22 | print(change.patch.sent_date) 23 | elif field == 'author': 24 | print(change.author) 25 | elif field == 'mail_header': 26 | print(change.patch.email_header) 27 | elif field == 'description': 28 | print(change.description) 29 | elif field == 'diff': 30 | print(change.diff) 31 | elif field == 'fixes': 32 | print('\n'.join(change.get_fixing_commit_refs())) 33 | 34 | if __name__ == '__main__': 35 | main() 36 | -------------------------------------------------------------------------------- /git_helpers/patches_for.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # find patches that fixing some of given commits 4 | 5 | bindir=$(dirname "$0") 6 | 7 | if [ $# -ne 3 ] 8 | then 9 | echo "Usage: $0 <commits range> <repo> <patches dir>" 10 | exit 1 11 | fi 12 | 13 | patches_dir=$1 14 | repo_dir=$2 15 | potential_bug_commits=$3 16 | 17 | for patch_file in "$patches_dir"/*.patch 18 | do 19 | bugs=$("$bindir/patch_parse.py" "$patch_file" fixes | awk '{print $1}') 20 | for bug in $bugs 21 | do 22 | bug_in_commits=$("$bindir/__find_commit_in.sh" \ 23 | --commit "$bug" --repo "$repo_dir" \ 24 | "$potential_bug_commits") 25 | if [ ! "$bug_in_commits" = "" ] 26 | then 27 | echo "- $patch_file fixes" 28 | echo " - $bug_in_commits" 29 | fi 30 | done 31 | done 32 | -------------------------------------------------------------------------------- /git_helpers/release_history.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | git for-each-ref --sort=creatordate \ 4 | --format '%(creatordate:short) %(refname:short)' refs/tags 5 | -------------------------------------------------------------------------------- /git_helpers/timeline_of_commit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import os 5 | import subprocess 6 | import sys 7 | 8 | def append_event(timeline, date, event): 9 | if not date in timeline: 10 | timeline[date] = [] 11 | events = timeline[date] 12 | if not event in events: 13 | events.append(event) 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--subject', help='subject of the patch') 18 | parser.add_argument('--author', help='author of the patch') 19 | parser.add_argument('--commit', help='commit id of the patch') 20 | parser.add_argument('trees', nargs='+', 21 | help='trees to check for the patch') 22 | args = parser.parse_args() 23 | 24 | if not args.subject and not args.author: 25 | if not args.commit: 26 | print('subject and author, or commit are necessary') 27 | parser.print_help() 28 | exit(1) 29 | else: 30 | subject = subprocess.check_output(['git', 'log', '-n', '1', 31 | '--pretty=%s', args.commit]).decode().strip() 32 | author = subprocess.check_output(['git', 'log', '-n', '1', 33 | '--pretty=%an', args.commit]).decode().strip() 34 | else: 35 | subject = args.subject 36 | author = args.author 37 | 38 | bindir = os.path.dirname(sys.argv[0]) 39 | find_commit_in = os.path.join(bindir, '__find_commit_in.sh') 40 | timeline = {} 41 | for tree in args.trees: 42 | try: 43 | commit_hash = subprocess.check_output([find_commit_in, 44 | '--hash_only', '--author', author, '--title', subject, 45 | tree]).decode().strip() 46 | except: 47 | continue 48 | 49 | author_date = subprocess.check_output(['git', 'log', '-n', '1', 50 | '--date=iso-strict', '--pretty=%ad', 51 | commit_hash]).decode().strip() 52 | author_name_mail = subprocess.check_output(['git', 'log', '-n', '1', 53 | '--pretty=%an <%ae>', commit_hash]).decode().strip() 54 | append_event(timeline, author_date, 55 | 'authored by %s' % author_name_mail) 56 | 57 | committer = subprocess.check_output(['git', 'log', '-n', '1', 58 | '--pretty=%cn <%ce>', commit_hash]).decode().strip() 59 | commit_date = subprocess.check_output(['git', 'log', '-n', '1', 60 | '--date=iso-strict', '--pretty=%cd', 61 | commit_hash]).decode().strip() 62 | append_event(timeline, commit_date, 63 | 'committed by %s into %s' % (committer, tree)) 64 | 65 | for date in sorted(timeline.keys()): 66 | for event in timeline[date]: 67 | date_simple = date.split('T')[0] 68 | time = date.split('T')[1] 69 | print(date_simple) 70 | print(' %s: %s' % (time, event)) 71 | 72 | if __name__ == '__main__': 73 | main() 74 | -------------------------------------------------------------------------------- /gnuplot/.gitignore: -------------------------------------------------------------------------------- 1 | plot.pdf 2 | example_plots/* 3 | -------------------------------------------------------------------------------- /gnuplot/clustered_box-yerr_datgen.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import random 4 | 5 | keys = ["metric", "sysA", "Astdev", "sysB", "Bstdev"] 6 | xaxes = ["metricA", "metricB", "metricC", "metricD", "metricE", "metricF"] 7 | 8 | print("\t".join(keys)) 9 | line = "" 10 | for i in range(len(xaxes)): 11 | values = [str(random.randint(0, 100)) for _ in keys[1:]] 12 | print("%s\t%s" % (xaxes[i], "\t".join(values))) 13 | 14 | -------------------------------------------------------------------------------- /gnuplot/clustered_box_datgen.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import random 4 | 5 | keys = ["metric", "sysA", "sysB", "sysC", "sysD", "sysE", "sysF", "sysG"] 6 | xaxes = ["metricA", "metricBB", "metricC", "metricD", "metricE", "metricF"] 7 | 8 | print("\t".join(keys)) 9 | line = "" 10 | for i in range(len(xaxes)): 11 | values = [str(random.randint(0, 100000)) for _ in keys[1:]] 12 | print("%s\t%s" % (xaxes[i], "\t".join(values))) 13 | 14 | -------------------------------------------------------------------------------- /gnuplot/example.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ODIR="example_plots" 6 | mkdir -p $ODIR 7 | 8 | OPTS="--size 7,3 --title 'example' " 9 | OPTS+="--xrange [0:10] --yrange [0:*] --ylog --xtics_rotate -45 " 10 | OPTS+="--gnuplot_cmds" 11 | 12 | OPTS=($OPTS) 13 | 14 | ./scatter_datgen.py | ./plot.py "$ODIR/scatter.pdf" "${OPTS[@]}" \ 15 | --font 'times new roman,20' 16 | 17 | ./scatter-yerr_datgen.py | ./plot.py "$ODIR/scatter-yerr.pdf" \ 18 | --type scatter-yerr "${OPTS[@]}" --xlog --xtitle "examle x title" 19 | 20 | ./clustered_box_datgen.py | ./plot.py "$ODIR/cluster.pdf" \ 21 | --type clustered_boxes "${OPTS[@]}" --ytitle "example y title" 22 | 23 | ./clustered_box-yerr_datgen.py | ./plot.py "$ODIR/cluster-yerr.pdf" \ 24 | --type clustered_boxes-yerr "${OPTS[@]}" 25 | 26 | ./heatmap_datgen.py | ./plot.py "$ODIR/heatmap.pdf" --type heatmap 27 | 28 | echo 29 | echo "stdout plots" 30 | 31 | ./scatter_datgen.py | ./plot.py stdout > "$ODIR/scatter_stdout" 32 | ./clustered_box_datgen.py | ./plot.py stdout --data_fmt table \ 33 | > "$ODIR/clustered_box_stdout" 34 | 35 | ./scatter_datgen.py --max 10000000000 \ 36 | | ./plot.py stdout --stdout_val_type bytes \ 37 | > "$ODIR/scatter_stdout_bytes" 38 | ./scatter_datgen.py | ./plot.py stdout --stdout_val_type seconds \ 39 | > "$ODIR/scatter_stdout_seconds" 40 | 41 | echo 42 | echo "check the outputs in $ODIR" 43 | -------------------------------------------------------------------------------- /gnuplot/heatmap_datgen.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import random 4 | 5 | for x in range(5): 6 | for y in range(5): 7 | print("%d %d %d" % (x, y, random.randint(0,10))) 8 | -------------------------------------------------------------------------------- /gnuplot/lzstyle.gp: -------------------------------------------------------------------------------- 1 | set key noenhanced 2 | set key tmargin center 3 | set key horizontal 4 | 5 | set style line 1 lw 4 6 | 7 | set autoscale fix 8 | 9 | set linetype 1 pointtype 5 linecolor rgb "#004586" lw 1 10 | set linetype 2 pointtype 13 linecolor rgb "#ff420e" lw 1 11 | set linetype 3 pointtype 11 linecolor rgb "#ffd320" lw 1 12 | set linetype 4 pointtype 9 linecolor rgb "#579d1c" lw 1 13 | set linetype 5 pointtype 7 linecolor rgb "#7e0021" lw 1 14 | set linetype 6 pointtype 1 linecolor rgb "#83caff" lw 1 15 | set linetype 7 pointtype 2 linecolor rgb "#314004" lw 1 16 | set linetype 8 pointtype 3 linecolor rgb "#aecf00" lw 1 17 | 18 | set linestyle 1 pointtype 0 linecolor rgb "#000000" lw 1 19 | 20 | set boxwidth 1 21 | set style fill solid border -1 22 | -------------------------------------------------------------------------------- /gnuplot/pdftopng: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 <name of pdf without suffix>" 6 | exit 1 7 | fi 8 | 9 | convert -density 150 $1.pdf -quality 90 $1.png 10 | -------------------------------------------------------------------------------- /gnuplot/plot_stdin.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -lt 3 ] 4 | then 5 | echo "Usage: $0 <chart type> <x label> <y label> [logscale]" 6 | echo " supported chart types are:" 7 | echo " scatter, scatter-yerr, clustered_boxes, heatmap" 8 | exit 1 9 | fi 10 | 11 | BINDIR=$(dirname "$0") 12 | 13 | CHART_TYPE=$1 14 | XLABEL=$2 15 | YLABEL=$3 16 | LOGSCALE=$4 17 | 18 | export GNUPLOT_LIB=$BINDIR 19 | 20 | TMPFILE=$(mktemp /tmp/lbx-gnuplot.XXX) 21 | 22 | cat /dev/stdin > "$TMPFILE" 23 | 24 | if [ "$LOGSCALE" = "x" ] 25 | then 26 | LOGSCALE="--xlog" 27 | elif [ "$LOGSCALE" = "y" ] 28 | then 29 | LOGSCALE="--ylog" 30 | elif [ "$LOGSCALE" = "xy" ] 31 | then 32 | LOGSCALE="--xlog --ylog" 33 | fi 34 | 35 | "$BINDIR/plot.py" --file "$TMPFILE" --type "$CHART_TYPE" \ 36 | --xtitle "$XLABEL" --ytitle "$YLABEL" "$LOGSCALE" 37 | rm "$TMPFILE" 38 | -------------------------------------------------------------------------------- /gnuplot/scatter-yerr_datgen.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import random 4 | 5 | keys = ["systemA", "systemB", "systemC"] 6 | xaxes = range(1,5) 7 | 8 | for idx, k in enumerate(keys): 9 | print(k) 10 | for x in xaxes: 11 | print("%d %d %d" % (x, random.randint(0, 100), random.randint(0,30))) 12 | if idx < len(keys) - 1: 13 | print("\n") 14 | -------------------------------------------------------------------------------- /gnuplot/scatter_datgen.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import random 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument('--max', type=int, default=100, 8 | help='max value in the data') 9 | args = parser.parse_args() 10 | 11 | keys = ["system_A", "systemB", "systemC"] 12 | xaxes = range(1,5) 13 | 14 | for idx, k in enumerate(keys): 15 | print(k) 16 | for x in xaxes: 17 | print("%d %d" % (x, random.randint(0, args.max))) 18 | if idx < len(keys) - 1: 19 | print("\n") 20 | -------------------------------------------------------------------------------- /gnuplot/show_term_colors.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | for i in range(0, 16): 4 | line = '' 5 | for j in range(0, 16): 6 | val = i * 16 + j 7 | line += u'\u001b[48;5;%dm %03d \u001b[0m' % (val, val) 8 | print(line) 9 | -------------------------------------------------------------------------------- /gnuplot/splitplot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Split data into small segments and plot each segment 4 | # 5 | # This script would be useful for huge dataset. For example, suppose that the 6 | # data has x range [0-100) and y range [0-100). If user gives 2 and 2 for 7 | # number of x segments and y segments respectively, this script will split the 8 | # data into four segments ranging at [0-50)/[0-50), [50-100)/[0-50), 9 | # [0-50)/[50-100), [50-100)/[50-100) and plot the four segments. Also, it will 10 | # sample the data for each plot to have about only 1000 plot points. 11 | # Note that this script assumes single record dataset. 12 | 13 | if [ $# -ne 3 ] 14 | then 15 | echo "Usage: $0 <file> <nr x segments> <nr y segment>" 16 | exit 1 17 | fi 18 | 19 | BINDIR=$(dirname $0) 20 | STATOF="$BINDIR/../scripts/report/statof.py" 21 | SAMPLE="$BINDIR/../scripts/perf/sample.py" 22 | PLOT="$BINDIR/plot_stdin.sh" 23 | YZOOM="$BINDIR/../scripts/report/yzoom.py" 24 | 25 | F="$1" 26 | NR_XSEGS="$2" 27 | NR_YSEGS="$3" 28 | 29 | FSAMPLED="$F.sampled" 30 | 31 | NRDAT=$(wc -l < $F) 32 | 33 | SEG_WIDTH=1000 34 | 35 | cat $F | $SAMPLE $(($NRDAT / $NR_XSEGS / $SEG_WIDTH)) > $FSAMPLED 36 | MIN=$($STATOF min $FSAMPLED) 37 | MAX=$($STATOF max $FSAMPLED) 38 | 39 | SEG_HEIGHT=$(( ($MAX - $MIN) / $NR_YSEGS )) 40 | 41 | for i in $(seq 1 $NR_XSEGS) 42 | do 43 | for j in $(seq 1 $NR_YSEGS) 44 | do 45 | DAT=$F.$i-$j 46 | head -n $(( $i * $SEG_WIDTH )) $FSAMPLED | \ 47 | tail -n $SEG_WIDTH | \ 48 | $YZOOM $(( ($NR_YSEGS - $j) * $SEG_HEIGHT )) \ 49 | $(( ($NR_YSEGS - $j + 1) * $SEG_HEIGHT )) > $DAT 50 | if [ $(wc -l < $DAT) -eq 0 ] 51 | then 52 | continue 53 | fi 54 | cat $DAT | $PLOT scatter-dot "Time" "Virtual Addr" 55 | mv plot.pdf $F.plot.$i-$j.pdf 56 | done 57 | done 58 | -------------------------------------------------------------------------------- /humble_ci/README.md: -------------------------------------------------------------------------------- 1 | Humble Contiguous Integration 2 | ============================= 3 | 4 | This directory contains scripts for simple contiguous integration works. The 5 | main script, `hci.py`, can be used for periodically checking if specific git 6 | repo has updated and running specific tasks for each of the updates. The 7 | script saves the state of each task and continues the tasks from the last saved 8 | position when executed again, so fault-tolerant. This feature makes it useful 9 | to be used by tasks that could kill the script, e.g., reboot. 10 | 11 | The tasks could be specified as a list of commands. To let the commands know 12 | for what source tree's update it has triggered, `hci.py` sets following 13 | environmental variables. 14 | 15 | - HUMBLE_CI_REPO: Path to the git repo 16 | - HUMBLE_CI_BRANCH: Branch that updated 17 | - HUMBLE_CI_REMOTE: Name of the remote of the branch 18 | - HUMBLE_CI_URL: Url of the remote 19 | 20 | If any of the tasks fails, the tasks are marked as failed and no subsequent 21 | tasks will be executed. 22 | 23 | Simple Update Notification Example 24 | ---------------------------------- 25 | 26 | You may use `hci.py` for getting simple upstream tree update notifications. 27 | For the case, this directory contains a script for that purpose, namely 28 | `noti_update.sh`. It receives and email address to send the notirication, 29 | formats the notirication message with the `HUMBLE_CI_*` environment variables, 30 | and send the message via `git send-email`. In other words, it assumes the user 31 | would already set `git-sendemail` with smtp password so that it can send email 32 | without intervention, and called by `hci.py`. 33 | 34 | For example, below command will check updates to Linux mainline and two latest 35 | LTS kernels for every hour, and send notice email. 36 | 37 | $ ./hci.py --repo ./linux \ 38 | --tree_to_track linus git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master \ 39 | --tree_to_track stable git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git linux-5.4.y \ 40 | --tree_to_track stable git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git linux-5.10.y \ 41 | --cmds "./noti_update.sh $email_to_receive_noti" --delay 3600 42 | -------------------------------------------------------------------------------- /humble_ci/noti_update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 <receipients>" 6 | exit 1 7 | fi 8 | 9 | recipients=$1 10 | 11 | repo=$HUMBLE_CI_REPO 12 | remote=$HUMBLE_CI_REMOTE 13 | url=$HUMBLE_CI_URL 14 | branch=$HUMBLE_CI_BRANCH 15 | 16 | repo_name=$(basename "$repo") 17 | 18 | commit_intro=$(git -C "$repo" show --pretty="%h (\"%s\")" --quiet \ 19 | "$remote/$branch") 20 | 21 | subject="[hci-noti] $repo_name: $remote/$branch has updated to $commit_intro" 22 | 23 | report_file=$(mktemp hci-report-XXXX) 24 | 25 | echo "Subject: $subject" > "$report_file" 26 | echo " 27 | humble_ci noticed update on $branch of $url. The last commit of the tree is: 28 | 29 | $commit_intro" >> "$report_file" 30 | 31 | git send-email --to "$recipients" "$report_file" 32 | rm "$report_file" 33 | -------------------------------------------------------------------------------- /kill_childs_self.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Kill a process and its entire subprocesses 5 | """ 6 | 7 | import argparse 8 | import os 9 | import signal 10 | import subprocess 11 | import sys 12 | import time 13 | 14 | import exp 15 | 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('pid', metavar='<pid>', type=int, 18 | help = 'process id of target') 19 | args = parser.parse_args() 20 | 21 | pid = args.pid 22 | 23 | exp.kill_childs_self(pid) 24 | -------------------------------------------------------------------------------- /kill_run_exps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import os 5 | import signal 6 | import subprocess 7 | import sys 8 | import time 9 | 10 | import exp 11 | 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument('exp_path', metavar='<exp file>', type=str) 14 | args = parser.parse_args() 15 | 16 | exp_path = args.exp_path 17 | 18 | print("\n\n%s[kill_run_exps] It's time to say good-bye, run_exps %s!\n\n" 19 | % (exp.ltime(), exp_path)) 20 | 21 | p = subprocess.Popen("ps -ef | grep run_exps.py", 22 | stdout=subprocess.PIPE, shell=True) 23 | out, err = p.communicate() 24 | out = out.decode('utf-8') 25 | 26 | print(out) 27 | 28 | pid = 0 29 | out = out.split('\n') 30 | for line in out: 31 | spltd = line.split() 32 | if len(spltd) < 8: 33 | continue 34 | if spltd[7] != "python": 35 | continue 36 | if os.path.split(spltd[8])[1] != "run_exps.py": 37 | continue 38 | if spltd[9] != exp_path: 39 | continue 40 | pid = int(spltd[1]) 41 | break 42 | 43 | if pid == 0: 44 | print("the process not found...") 45 | exit(1) 46 | 47 | while True: 48 | childs = exp.childs_of(pid, False) 49 | try: 50 | os.kill(pid, signal.SIGTERM) 51 | except OSError as e: 52 | print("error %s while sending SIGTERM" % e) 53 | childs = exp.childs_of(pid, False) 54 | if len(childs) == 0: 55 | break 56 | print("childs of process %s still exists. send signal again after 1 sec" % 57 | pid) 58 | time.sleep(1) 59 | 60 | print("\n\n%s[kill_run_exps] Now run_exps %s cleaned up!\n\n" % ( 61 | exp.ltime(), exp_path)) 62 | -------------------------------------------------------------------------------- /linux_hack/README: -------------------------------------------------------------------------------- 1 | scripts for hacking Linux kernel 2 | -------------------------------------------------------------------------------- /linux_hack/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | pr_usage() 6 | { 7 | echo " 8 | Usage: $0 [OTION]... <src dir> <build dir> 9 | 10 | OPTION 11 | --config Config file to append 12 | --install Install built kernel 13 | --reboot Reboot after install 14 | " 15 | } 16 | 17 | pr_msg_usage_exit() 18 | { 19 | msg=$1 20 | exit_code=$2 21 | echo "$msg" 22 | pr_usage 23 | exit "$exit_code" 24 | } 25 | 26 | src_dir="" 27 | build_dir="" 28 | additional_config="" 29 | do_install="false" 30 | do_reboot="false" 31 | while [ $# -ne 0 ] 32 | do 33 | case $1 in 34 | "--config") 35 | if [ $# -lt 2 ] 36 | then 37 | pr_msg_usage_exit "--config argument is not given" 1 38 | fi 39 | additional_config=$2 40 | shift 2 41 | continue 42 | ;; 43 | "--install") 44 | do_install="true" 45 | shift 1 46 | continue 47 | ;; 48 | "--reboot") 49 | do_reboot="true" 50 | shift 1 51 | continue 52 | ;; 53 | *) 54 | if [ $# -lt 2 ] 55 | then 56 | pr_msg_usage_exit "src and build dirs not given" 1 57 | fi 58 | if [ ! "$src_dir" = "" ] 59 | then 60 | pr_msg_usage_exit "more than one src dir given" 1 61 | fi 62 | src_dir=$1 63 | build_dir=$(realpath $2) 64 | shift 2 65 | ;; 66 | esac 67 | done 68 | 69 | bindir=$(dirname "$0") 70 | 71 | if [ "$src_dir" = "" ] 72 | then 73 | pr_msg_usage_exit "src dir not given" 1 74 | fi 75 | 76 | sudo apt install -y build-essential libssl-dev bc bison flex libelf-dev 77 | 78 | orig_config=$build_dir/.config 79 | 80 | if [ ! -d "$build_dir" ] 81 | then 82 | mkdir "$build_dir" 83 | fi 84 | 85 | if [ ! -f "$orig_config" ] 86 | then 87 | cp "/boot/config-$(uname -r)" "$orig_config" 88 | sed -i 's/CONFIG_DEBUG_INFO_BTF=y/# CONFIG_DEBUG_INFO_BTF/' \ 89 | "$build_dir/.config" 90 | make -C "$src_dir" O="$build_dir" olddefconfig 91 | make -C "$src_dir" O="$build_dir" localmodconfig 92 | fi 93 | 94 | if [ ! "$additional_config" = "" ] 95 | then 96 | cat "$additional_config" >> "$build_dir/.config" 97 | fi 98 | 99 | make -C "$src_dir" O="$build_dir" olddefconfig 100 | make -C "$src_dir" O="$build_dir" -j$(nproc) 101 | 102 | if [ "$do_install" = "true" ] 103 | then 104 | sudo make -C "$src_dir" O="$build_dir" modules_install install 105 | kernelversion=$(make -C "$src_dir" O="$build_dir" -s kernelrelease) 106 | sudo "$bindir/set_kernel.py" "$kernelversion" 107 | fi 108 | 109 | if [ "$do_reboot" = "true" ] 110 | then 111 | echo "reboot now" 112 | sudo shutdown -r now 113 | fi 114 | -------------------------------------------------------------------------------- /linux_hack/build_install_perf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | if [ $# -ne 2 ] 6 | then 7 | echo "Usage: <linux dir> <perf build dir>" 8 | exit 1 9 | fi 10 | 11 | perf_file="/usr/bin/perf" 12 | 13 | if [ -f "$perf_file" ] 14 | then 15 | echo "$perf_file already exists" 16 | exit 1 17 | fi 18 | 19 | linux_dir=$1 20 | perf_build_dir=$2 21 | perf_dir="$linux_dir/tools/perf" 22 | 23 | if ! sudo apt install -y python-dev 24 | then 25 | sudo apt install -y python-dev-is-python3 26 | fi 27 | 28 | sudo apt install -y build-essential libdw-dev systemtap-sdt-dev libunwind-dev \ 29 | libslang2-dev libperl-dev libiberty-dev liblzma-dev \ 30 | libzstd-dev libcap-dev libnuma-dev libbabeltrace-ctf-dev \ 31 | libpfm4-dev libtraceevent-dev python3-setuptools pkg-config 32 | 33 | make -C "$perf_dir" O="$perf_build_dir" 34 | make -C "$perf_dir" O="$perf_build_dir" install 35 | sudo ln -s "$HOME/bin/perf" "$perf_file" 36 | -------------------------------------------------------------------------------- /linux_hack/list_mm_patches.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import subprocess 4 | 5 | mm_master = 'akpm.korg.mm/master' 6 | mm_stable = 'akpm.korg.mm/mm-stable' 7 | mm_unstable = 'akpm.korg.mm/mm-unstable' 8 | mm_new = 'akpm.korg.mm/mm-new' 9 | 10 | def list_patches_in(commits_base, commits_end): 11 | cproc = subprocess.run( 12 | ['git', 'log', '--pretty=%H', '--reverse', '%s..%s' % 13 | (commits_base, commits_end)], 14 | capture_output=True, text=True) 15 | if cproc.returncode != 0: 16 | return 'git log fail (%s)' % cproc.stderr 17 | commits = [x for x in cproc.stdout.strip().split('\n') if x != ''] 18 | print('%s: %d patches' % (commits_end.split('/')[-1], len(commits))) 19 | to_skip = 0 20 | for commit in commits: 21 | if to_skip > 0: 22 | to_skip -= 1 23 | continue 24 | cproc = subprocess.run( 25 | ['git', 'log', '-1', '--pretty=%B', commit], 26 | capture_output=True, text=True) 27 | if cproc.returncode != 0: 28 | return 'git log -1 fail (%s)' % cproc.stderr 29 | commit_msg = cproc.stdout.strip() 30 | pars = commit_msg.split('\n\n') 31 | if len(pars) < 2: 32 | continue 33 | patch_series_par = pars[1] 34 | if patch_series_par.strip().startswith('Patch series '): 35 | unwrapped = ' '.join(patch_series_par.split('\n')) 36 | print(' %s' % unwrapped) 37 | # skip next commits of the series 38 | for line in commit_msg.split('\n'): 39 | if line.startswith('This patch (of ') and line.endswith('):'): 40 | to_skip = int(line.split()[3][:-2]) - 1 41 | continue 42 | cproc = subprocess.run( 43 | ['git', 'show', commit, '--pretty=%h'], capture_output=True, 44 | text=True) 45 | if cproc.returncode != 0: 46 | return 'git show fail (%s)' % cproc.stderr 47 | commit_content = cproc.stdout 48 | if len(commit_content.split('\n')) > 300: 49 | unwrapped = ' '.join(pars[0].split('\n')) 50 | print(' Patch "%s"' % unwrapped) 51 | print() 52 | return None 53 | 54 | def main(): 55 | err = list_patches_in(mm_master, mm_stable) 56 | if err is not None: 57 | print(err) 58 | exit(1) 59 | err = list_patches_in(mm_stable, mm_unstable) 60 | if err is not None: 61 | print(err) 62 | exit(1) 63 | err = list_patches_in(mm_unstable, mm_new) 64 | if err is not None: 65 | print(err) 66 | exit(1) 67 | 68 | if __name__ == '__main__': 69 | main() 70 | -------------------------------------------------------------------------------- /linux_hack/ls_kernels.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import datetime 5 | import sys 6 | import os 7 | 8 | GRUBCFG_PATH = "/boot/grub/grub.cfg" 9 | 10 | GRUB = "grub" 11 | CUBOX = "cubox" 12 | RASP2 = "rasp2" 13 | 14 | def grub_kernels(): 15 | with open(GRUBCFG_PATH) as f: 16 | lines = f.read() 17 | 18 | kernels = [] 19 | for line in lines.split('\n'): 20 | tokens = line.split() 21 | if len(tokens) < 2: 22 | continue 23 | if tokens[0] == 'linux': 24 | kernel_position = tokens[1] 25 | kernel_name = kernel_position.lstrip('/boot/vmlinuz-') 26 | if not kernel_name in kernels: 27 | kernels.append(kernel_name) 28 | return kernels 29 | 30 | if __name__ == "__main__": 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument('bootloader', nargs='?', type=str, default='grub', 33 | choices=[GRUB, CUBOX, RASP2], metavar='bootloader', 34 | help='bootloader of the system') 35 | args = parser.parse_args() 36 | bootloader = args.bootloader 37 | 38 | if bootloader == GRUB: 39 | kernels = grub_kernels() 40 | print("\n".join(kernels)) 41 | elif bootloader == CUBOX: 42 | print(bootloader + " is not supported yet...") 43 | elif bootloader == RASP2: 44 | print(bootloader + " is not supported yet...") 45 | else: 46 | print("Not supported bootloader %s\n" % bootloader) 47 | -------------------------------------------------------------------------------- /linux_hack/maintainers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import os 5 | 6 | def parse_maintainers(maintainers_file): 7 | with open(maintainers_file, 'r') as f: 8 | content = f.read() 9 | 10 | subsystems = {} 11 | keys = { 12 | 'M:': 'maintainer', 13 | 'R:': 'reviewer', 14 | 'L:': 'list', 15 | 'S:': 'status', 16 | 'W:': 'webpage', 17 | 'F:': 'files', 18 | } 19 | 20 | lists_started = False 21 | for paragraph in content.split('\n\n'): 22 | if (lists_started is False and 23 | paragraph.startswith('3C59X NETWORK DRIVER')): 24 | lists_started = True 25 | if not lists_started: 26 | continue 27 | lines = paragraph.split('\n') 28 | subsystem = {'name': lines[0]} 29 | for line in lines[1:]: 30 | for key in keys: 31 | if line.startswith(key): 32 | converted_key = keys[key] 33 | if not converted_key in subsystem: 34 | subsystem[converted_key] = [] 35 | subsystem[converted_key].append(' '.join(line.split()[1:])) 36 | subsystems[lines[0]] = subsystem 37 | return subsystems 38 | 39 | def main(): 40 | parser = argparse.ArgumentParser() 41 | parser.add_argument('--repo', metavar='<dir>', help='linux repo') 42 | args = parser.parse_args() 43 | 44 | maintainers_file = os.path.join(args.repo, 'MAINTAINERS') 45 | if not os.path.isfile(maintainers_file): 46 | print('wrong --repo') 47 | exit(1) 48 | 49 | subsystems = parse_maintainers(maintainers_file) 50 | 51 | print(subsystems) 52 | 53 | if __name__ == '__main__': 54 | main() 55 | -------------------------------------------------------------------------------- /linux_hack/release_schedule.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # TODO 4 | # - expect rc8 based on the history 5 | # - support merge window schedule 6 | 7 | import argparse 8 | import datetime 9 | import os 10 | import subprocess 11 | 12 | def main(): 13 | parser = argparse.ArgumentParser() 14 | parser.description = 'Expect future linux release schedule' 15 | parser.add_argument('--linux', metavar='<repo>', default='./', 16 | help='path to the linux tree') 17 | parser.add_argument('--until', metavar='<days>', type=int, default=365, 18 | help='future days to expect the schedule until') 19 | args = parser.parse_args() 20 | 21 | major_ver = None 22 | minor_ver = None 23 | with open(os.path.join(args.linux, 'Makefile'), 'r') as f: 24 | for line in f: 25 | fields = line.strip().split() 26 | if fields[:2] == ['VERSION', '=']: 27 | major_ver = int(fields[2]) 28 | if fields[:2] == ['PATCHLEVEL', '=']: 29 | minor_ver = int(fields[2]) 30 | if fields[:2] == ['EXTRAVERSION', '=']: 31 | if len(fields) > 2: 32 | minor_ver -= 1 33 | break 34 | curr_ver = 'v%s.%s' % (major_ver, minor_ver) 35 | git_cmd = ['git', '-C', args.linux] 36 | curr_ver_date = subprocess.check_output( 37 | ['git', '-C', args.linux, 'log', '-1', '--pretty=%cd', 38 | '--date=iso', curr_ver]).decode().strip().split()[0] 39 | ver_date = datetime.datetime.strptime(curr_ver_date, '%Y-%m-%d') 40 | ver = [major_ver, minor_ver] 41 | 42 | now = datetime.datetime.now() 43 | 44 | while ver_date - now < datetime.timedelta(days=args.until): 45 | # 9 weeks per release 46 | ver_date += datetime.timedelta(days=63) 47 | ver[1] += 1 48 | print('%s: v%d.%d' % (ver_date.strftime('%Y-%m-%d'), ver[0], ver[1])) 49 | 50 | if __name__ == '__main__': 51 | main() 52 | -------------------------------------------------------------------------------- /linux_hack/rm_kernels.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pr_usage() 4 | { 5 | echo "Usage: $0 [OPTION]... [target kernel version]..." 6 | echo 7 | echo "OPTION" 8 | echo " --except <old> <new> Leave <old> old and <new> new kernels" 9 | echo " --except_new <number> Leave <number> latest kernels" 10 | echo " --dry Make no change but notify what will do" 11 | echo " -h, --help Show this message" 12 | } 13 | 14 | pr_usage_exit() 15 | { 16 | exit_code=$1 17 | 18 | pr_usage 19 | exit "$exit_code" 20 | } 21 | 22 | if [ $# -lt 1 ]; 23 | then 24 | pr_usage_exit 1 25 | fi 26 | 27 | bindir=$(dirname "$0") 28 | # newest kernel comes first 29 | kernels=($("$bindir/ls_kernels.py")) 30 | 31 | kernels_to_remove=() 32 | except_old_nr=${#kernels[@]} 33 | except_new_nr=${#kernels[@]} 34 | dry_run="false" 35 | target_specified="false" 36 | 37 | while [ $# -ne 0 ] 38 | do 39 | case $1 in 40 | "--except") 41 | if [ $# -lt 3 ] 42 | then 43 | echo "<number> not given" 44 | pr_usage_exit 1 45 | fi 46 | except_old_nr=$2 47 | except_new_nr=$3 48 | target_specified="true" 49 | shift 3 50 | continue 51 | ;; 52 | "--dry") 53 | dry_run="true" 54 | shift 1 55 | continue 56 | ;; 57 | "--help" | "-h") 58 | pr_usage_exit 0 59 | ;; 60 | *) 61 | if [ "$except_old_nr" = "" ] && [ "$except_new_nr" = "" ] && 62 | [ $# -lt 1 ] 63 | then 64 | echo "<target kernel version> not given" 65 | pr_usage_exit 1 66 | fi 67 | kernels_to_remove=($@) 68 | target_specified="true" 69 | break 70 | ;; 71 | esac 72 | done 73 | 74 | if [ ! "$target_specified" = "true" ] 75 | then 76 | echo "Target kernels to remove are not specified" 77 | pr_usage_exit 1 78 | fi 79 | 80 | current_kernel=$(uname -r) 81 | rm_start=$except_new_nr 82 | rm_end=$((${#kernels[@]} - except_old_nr)) 83 | 84 | for ((i = 0 ; i < ${#kernels[@]} ; i++)) 85 | do 86 | if [ $i -lt $rm_start ] || [ $i -ge $rm_end ] 87 | then 88 | continue 89 | fi 90 | if [ "${kernels[$i]}" = "$current_kernel" ] 91 | then 92 | continue 93 | fi 94 | kernels_to_remove+=("${kernels[$i]}") 95 | done 96 | 97 | for ((i = 0 ; i < ${#kernels_to_remove[@]} ; i++)) 98 | do 99 | if [ "${kernels_to_remove[$i]}" = "$current_kernel" ] 100 | then 101 | unset 'kernels_to_remove[i]' 102 | fi 103 | done 104 | 105 | if [ "$EUID" -ne 0 ] && [ "$dry_run" = "false" ] 106 | then 107 | echo "run as root, please" 108 | exit 1 109 | fi 110 | 111 | for ver in "${kernels_to_remove[@]}" 112 | do 113 | if [ "$dry_run" = "true" ] 114 | then 115 | echo "Remove $ver" 116 | continue 117 | fi 118 | if [ ! -e "/boot/vmlinuz-$ver" ] 119 | then 120 | echo "vmlinuz-$ver not found" 121 | continue 122 | fi 123 | 124 | rm "/boot/vmlinuz-$ver" 125 | rm "/boot/initrd.img-$ver" 126 | rm "/boot/System.map-$ver" 127 | rm "/boot/config-$ver" 128 | rm -fr "/lib/modules/$ver" 129 | rm "/var/lib/initramfs-tools/$ver" 130 | done 131 | if [ "$dry_run" = "true" ] 132 | then 133 | exit 0 134 | fi 135 | 136 | update-grub2 137 | -------------------------------------------------------------------------------- /linux_hack/stats/active_maintainers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -lt 1 ] 4 | then 5 | echo "Usage: $0 <linux repo> [activeness threshold in days]" 6 | exit 1 7 | fi 8 | 9 | linux_repo=$1 10 | 11 | if [ $# -ge 2 ] 12 | then 13 | active_thres_days=$2 14 | else 15 | active_thres_days=180 16 | fi 17 | 18 | maintainers=$(git -C "$linux_repo" show HEAD:MAINTAINERS 2> /dev/null | \ 19 | grep '^M:' | sort | uniq | awk -F'^M:\t' '{print $2}') 20 | nr_maintainers=$(echo "$maintainers" | wc -l) 21 | 22 | since_date=$(date --date="-$active_thres_days day") 23 | nr_active_maintainers=0 24 | while IFS= read -r author 25 | do 26 | email=$(echo "$author" | awk '{print $NF}') 27 | nr_recent_commits=$(git -C "$linux_repo" \ 28 | log --pretty=%h --since "$since_date" \ 29 | --author "$email" -1 | wc -l) 30 | if [ "$nr_recent_commits" -eq 1 ] 31 | then 32 | echo "$author": active 33 | nr_active_maintainers=$((nr_active_maintainers + 1)) 34 | else 35 | echo "$author": inactive 36 | fi 37 | done <<< "$maintainers" 38 | 39 | echo "$nr_maintainers $nr_active_maintainers" 40 | -------------------------------------------------------------------------------- /linux_hack/stats/nr_maintainers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 <linux repo>" 6 | exit 1 7 | fi 8 | 9 | linux_repo=$1 10 | 11 | echo "version total new" 12 | 13 | prev_nr_maintainers=0 14 | for major in 2.6 3 4 5 6 15 | do 16 | for minor in {0..40} 17 | do 18 | version="v$major.$minor" 19 | nr_maintainers=$(git -C "$linux_repo" \ 20 | show "$version":MAINTAINERS 2> /dev/null | \ 21 | grep '^M:' | sort | uniq | wc -l) 22 | if [ "$nr_maintainers" = "0" ] 23 | then 24 | continue 25 | fi 26 | 27 | if [ "$prev_nr_maintainers" -eq 0 ] 28 | then 29 | new=0 30 | else 31 | new=$((nr_maintainers - prev_nr_maintainers)) 32 | fi 33 | 34 | echo "$version $nr_maintainers $new" 35 | prev_nr_maintainers=$nr_maintainers 36 | done 37 | done 38 | -------------------------------------------------------------------------------- /linux_hack/turn_debug_msg.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 2 ] 4 | then 5 | echo "Usage: $0 <files> <on|off>" 6 | exit 1 7 | fi 8 | 9 | files=$1 10 | onoff=$2 11 | 12 | cmd="file $files" 13 | if [ "$onoff" = "on" ] 14 | then 15 | cmd+=" +p" 16 | else 17 | cmd=" -p" 18 | fi 19 | 20 | echo -n "$cmd" | sudo tee /sys/kernel/debug/dynamic_debug/control 21 | echo "" 22 | -------------------------------------------------------------------------------- /parallel_runs/gcma_exps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import ssh_args 6 | 7 | user, target, port, password = ssh_args.parse_input() 8 | 9 | lbpath = "/home/%s/lazybox" % user 10 | 11 | bootloader = "grub" 12 | if target == "raspberrypi": 13 | bootloader = "rasp2" 14 | 15 | k_cma = "cma" 16 | k_gcma = "gcma" 17 | k_vanilla = "vanilla" 18 | kernels = [k_cma, k_gcma, k_vanilla] 19 | kparam_cma = "coherent_pool=16M cma=64M smsc95xx.turbo_mode=N" 20 | expspath = "./exps/" 21 | exps = ["gcma", "gcma-blogbench", 22 | "still", "still-blogbench", "blogbench-still", "blogbench"] 23 | 24 | for kernel in kernels: 25 | for exp in exps: 26 | kernel_param = "" 27 | if kernel in [k_cma, k_gcma]: 28 | kernel_param = kparam_cma 29 | if kernel == k_vanilla and (exp == "gcma" or exp == "gcma-blogbench"): 30 | continue 31 | os.system("expect ./remote_set_kernel.exp %s %s %s %s %s %s %s %s" % ( 32 | user, target, port, password, lbpath, bootloader, kernel, kernel_param)) 33 | if kernel == k_gcma: 34 | os.system("expect ./remote_zram_swap.exp %s %s %s %s %s 100M" % ( 35 | user, target, port, password, lbpath)) 36 | os.system("expect ./remote_exps.exp %s %s %s %s %s %s" % ( 37 | user, target, port, password, lbpath, expspath + exp)) 38 | -------------------------------------------------------------------------------- /parallel_runs/generate_exp_conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Stub for experiments config file generator 5 | 6 | Experiments configuration file describes which experiments should be executed. 7 | For simplicity, it is recommended to be linearly interpretable rather than 8 | procedural. In other words, `loop` or `goto` is not recommended inside 9 | experiments config file. 10 | 11 | But, constraining people to write down repeating experiments manually is crime 12 | against humanity. For the reason, it is recommended to write down each user's 13 | own experiments config file generator using their familiar tools. To help 14 | making each user's own generator, this file contains essential code for 15 | automated experiments config file generation. 16 | 17 | Users could use this file for their purpose by following steps: 18 | 1. Copy this file 19 | 2. Edit main loop inside the file which commented to be 20 | 3. Run modified copy and redirect stdout to appropriate file 21 | 22 | """ 23 | 24 | __author__ = "SeongJae Park" 25 | __email__ = "sj38.park@gmail.com" 26 | __copyright__ = "Copyright (c) 2013-2020, SeongJae Park" 27 | __license__ = "GPLv2" 28 | 29 | import exp 30 | 31 | exps = [] 32 | 33 | # !!! Edit below loop to fit on your purpose using Python language 34 | for arg1 in [1, 2, 3, 4, 5]: 35 | for arg2 in ['a', 'b', 'abc']: 36 | starts = ["echo hi"] 37 | mains = ["echo main with %s %s > main.out" % (arg1, arg2)] 38 | # For multiple commands, code could be seems like below 39 | # mains = ["echo main with %s %s" % (arg1, arg2), 40 | # "echo main2 with %s %s" % (arg2, arg1)] 41 | backs = ["echo back with %s" % (arg1)] 42 | ends = ["echo buy"] 43 | checks = ["grep main main.out"] 44 | 45 | # Do not forget to match indentation 46 | exps.append(exp.Exp(starts, mains, backs, ends, checks)) 47 | 48 | # !!! Do not edit code below 49 | for exp in exps: 50 | for start in exp.start_cmds: 51 | print("start %s" % start) 52 | for main in exp.main_cmds: 53 | print("main %s" % main) 54 | for back in exp.back_cmds: 55 | print("back %s" % back) 56 | for end in exp.end_cmds: 57 | print("end %s" % end) 58 | for check in exp.check_cmds: 59 | print("check %s" % check) 60 | 61 | print('') 62 | -------------------------------------------------------------------------------- /parallel_runs/kill_childs_self.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Kill a process and its entire subprocesses 5 | """ 6 | 7 | import argparse 8 | import os 9 | import signal 10 | import subprocess 11 | import sys 12 | import time 13 | 14 | import exp 15 | 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('pid', metavar='<pid>', type=int, 18 | help = 'process id of target') 19 | args = parser.parse_args() 20 | 21 | pid = args.pid 22 | 23 | exp.kill_childs_self(pid) 24 | -------------------------------------------------------------------------------- /parallel_runs/kill_run_exps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import os 5 | import signal 6 | import subprocess 7 | import sys 8 | import time 9 | 10 | import exp 11 | 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument('exp_path', metavar='<exp file>', type=str) 14 | args = parser.parse_args() 15 | 16 | exp_path = args.exp_path 17 | 18 | print("\n\n%s[kill_run_exps] It's time to say good-bye, run_exps %s!\n\n" 19 | % (exp.ltime(), exp_path)) 20 | 21 | p = subprocess.Popen("ps -ef | grep run_exps.py", 22 | stdout=subprocess.PIPE, shell=True) 23 | out, err = p.communicate() 24 | out = out.decode('utf-8') 25 | 26 | print(out) 27 | 28 | pid = 0 29 | out = out.split('\n') 30 | for line in out: 31 | spltd = line.split() 32 | if len(spltd) < 8: 33 | continue 34 | if spltd[7] != "python": 35 | continue 36 | if os.path.split(spltd[8])[1] != "run_exps.py": 37 | continue 38 | if spltd[9] != exp_path: 39 | continue 40 | pid = int(spltd[1]) 41 | break 42 | 43 | if pid == 0: 44 | print("the process not found...") 45 | exit(1) 46 | 47 | while True: 48 | childs = exp.childs_of(pid, False) 49 | try: 50 | os.kill(pid, signal.SIGTERM) 51 | except OSError as e: 52 | print("error %s while sending SIGTERM" % e) 53 | childs = exp.childs_of(pid, False) 54 | if len(childs) == 0: 55 | break 56 | print("childs of process %s still exists. send signal again after 1 sec" % 57 | pid) 58 | time.sleep(1) 59 | 60 | print("\n\n%s[kill_run_exps] Now run_exps %s cleaned up!\n\n" % ( 61 | exp.ltime(), exp_path)) 62 | -------------------------------------------------------------------------------- /parallel_runs/remote.tcl: -------------------------------------------------------------------------------- 1 | proc remote_sudocmd {username target ssh_port password cmds} { 2 | spawn ssh -t -p $ssh_port $username@$target sudo -- bash -c '$cmds' 3 | 4 | # for ssh password 5 | expect "*password*" 6 | send "$password\r" 7 | 8 | # for sudo command 9 | expect "*password*" 10 | send "$password\r" 11 | 12 | # wait for completion of ssh 13 | expect eof 14 | } 15 | 16 | proc remote_sudocmd_registered {username target ssh_port password cmds} { 17 | spawn ssh -t -p $ssh_port $username@$target sudo -- bash -c '$cmds' 18 | 19 | # for sudo command 20 | expect "*password*" 21 | send "$password\r" 22 | 23 | # wait for completion of ssh 24 | expect eof 25 | } 26 | 27 | proc remote_sudoercmd {username target ssh_port password cmds} { 28 | remote_cmd_n_password $username $target $ssh_port $password $cmds 1 29 | } 30 | 31 | # Do remote command which requires n-time password input 32 | proc remote_cmd_n_password {username target ssh_port password cmds nr_prompt} { 33 | spawn ssh -t -p $ssh_port $username@$target bash -c '$cmds' 34 | 35 | # for ssh password 36 | expect "*password*" 37 | send "$password\r" 38 | 39 | set i 0 40 | while {$i < $nr_prompt} { 41 | puts "$i prompt" 42 | expect "*password*" 43 | send "$password\r" 44 | incr i 45 | } 46 | 47 | # wait for completion of ssh 48 | expect eof 49 | } 50 | -------------------------------------------------------------------------------- /parallel_runs/remote_cmds.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | source "remote.tcl" 5 | 6 | # Execute commands from a remote target machine 7 | # 8 | # Usage: 9 | # expect remote_cmds.exp <username> <target> <ssh port> <password> <cmds> [nr_password] 10 | 11 | if { [llength $argv] < 5 } { 12 | puts "usage: " 13 | puts "expect remote_cmds.exp \\" 14 | puts " <username> <target> <ssh port> <password> \\" 15 | puts " <cmds> \[nr_password\]" 16 | exit 1 17 | } 18 | 19 | set username [lindex $argv 0] 20 | set target [lindex $argv 1] 21 | set ssh_port [lindex $argv 2] 22 | set password [lindex $argv 3] 23 | set cmds [lindex $argv 4] 24 | 25 | # We assume one password for sudo 26 | set npass 1 27 | if { [llength $argv] > 5 } { 28 | set npass [lindex $argv 5] 29 | } 30 | 31 | remote_cmd_n_password $username $target $ssh_port $password $cmds $npass 32 | -------------------------------------------------------------------------------- /parallel_runs/remote_exp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import ssh_args 6 | 7 | USAGE="%s <user name> <target> <ssh port> [password] [exp]" % sys.argv[0] 8 | 9 | user, target, port, password = ssh_args.parse_input(USAGE) 10 | if len(sys.argv) < 6: 11 | exp = raw_input("exp to run in remote: ") 12 | else: 13 | exp = sys.argv[5] 14 | 15 | lbpath = "/home/%s/lazybox" % user 16 | 17 | if password == "__lb_registered": 18 | cmd = "expect ./remote_exps_registered.exp %s %s %s %s %s" % ( 19 | user, target, port, lbpath, exp) 20 | else: 21 | cmd = "expect ./remote_exps.exp %s %s %s %s %s %s" % ( 22 | user, target, port, password, lbpath, exp) 23 | print("[remote_exp.py] do cmd $ ", cmd) 24 | os.system(cmd) 25 | -------------------------------------------------------------------------------- /parallel_runs/remote_exps.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | source "remote.tcl" 5 | 6 | # Do experiments from a remote target machine using run_exps.py 7 | # 8 | # Because run_exps.py is a part of lazybox, lazybox should be installed on the 9 | # remote target machine before execcution of this program. 10 | # 11 | # Usage: 12 | # expect remote_exps.exp <username> <target> <ssh port> <password> \ 13 | # <lazybox path> <exp> 14 | 15 | if { [llength $argv] < 6 } { 16 | puts "usage: " 17 | puts "expect remote_exps.exp \\" 18 | puts " <username> <target> <ssh port> <password> \\" 19 | puts " <lazybox path> <exp>" 20 | exit 1 21 | } 22 | 23 | set username [lindex $argv 0] 24 | set target [lindex $argv 1] 25 | set ssh_port [lindex $argv 2] 26 | set password [lindex $argv 3] 27 | set lbpath [lindex $argv 4] 28 | set exp [lindex $argv 5] 29 | 30 | remote_sudocmd $username $target $ssh_port $password \ 31 | "cd $lbpath; ./run_exps.py $exp" 32 | 33 | puts "\nremote_exps.exp FINISHED" 34 | -------------------------------------------------------------------------------- /parallel_runs/remote_exps_registered.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | # Do experiments from a remote target machine using run_exps.py 5 | # 6 | # Because run_exps.py is a part of lazybox, lazybox should be installed on the 7 | # remote target machine before execcution of this program. 8 | # 9 | # Usage: 10 | # expect remote_exps.exp <username> <target> <ssh port> \ 11 | # <lazybox path> <exp> 12 | 13 | if { [llength $argv] < 5 } { 14 | puts "usage: " 15 | puts "expect remote_exps.exp \\" 16 | puts " <username> <target> <ssh port> \\" 17 | puts " <lazybox path> <exp>" 18 | exit 1 19 | } 20 | 21 | set username [lindex $argv 0] 22 | set target [lindex $argv 1] 23 | set ssh_port [lindex $argv 2] 24 | set lbpath [lindex $argv 3] 25 | set exp [lindex $argv 4] 26 | 27 | source "remote.tcl" 28 | 29 | remote_sudocmd_registered $username $target $ssh_port $password \ 30 | "cd $lbpath; sudo ./run_exps.py $exp" 31 | 32 | puts "\nremote_exps_registered.exp FINISHED" 33 | -------------------------------------------------------------------------------- /parallel_runs/remote_reboot.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | # Reboot remote machine via ssh 5 | # 6 | # Usage: 7 | # expect remote_reboot.exp <username> <target> <ssh port> <password> 8 | 9 | if { [llength $argv] < 4 } { 10 | puts "usage: " 11 | puts "expect remote_reboot.exp \\" 12 | puts " <username> <target> <ssh port> <password> \\" 13 | exit 1 14 | } 15 | 16 | set username [lindex $argv 0] 17 | set target [lindex $argv 1] 18 | set ssh_port [lindex $argv 2] 19 | set password [lindex $argv 3] 20 | 21 | source "remote.tcl" 22 | 23 | remote_sudocmd $username $target $ssh_port $password \ 24 | "shutdown -r now" 25 | -------------------------------------------------------------------------------- /parallel_runs/remote_set_kernel.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | # Set kernel of a remote target host as user specified 5 | # 6 | # Internally, kernel setting is done via `scripts/set_kernel/set_kernel.py` of 7 | # lazybox. For the reason, lazybox should be installed on the remote target 8 | # machine before execution of this program. 9 | # 10 | # Usage: 11 | # expect remote_set_kernel.exp <username> <target> <ssh port> <password> \ 12 | # <lazybox path> <bootloader> \ 13 | # <kernel name> <kernel param> 14 | 15 | if { [llength $argv] < 7 } { 16 | puts "usage: " 17 | puts "expect remote_set_kernel.exp \\" 18 | puts " <username> <target> <ssh port> <password> \\" 19 | puts " <lazybox path> <bootloader> \\" 20 | puts " <kernel name> \[kernel param\]" 21 | exit 1 22 | } 23 | 24 | set username [lindex $argv 0] 25 | set target [lindex $argv 1] 26 | set ssh_port [lindex $argv 2] 27 | set password [lindex $argv 3] 28 | set lbpath [lindex $argv 4] 29 | set bootloader [lindex $argv 5] 30 | set kern_name [lindex $argv 6] 31 | set kern_param "" 32 | 33 | if { [llength $argv] > 7 } { 34 | set kern_param [lindex $argv 7] 35 | } 36 | 37 | source "remote.tcl" 38 | 39 | remote_sudocmd $username $target $ssh_port $password \ 40 | "cd $lbpath; \ 41 | ./scripts/set_kernel/set_kernel.py \ 42 | $bootloader $kern_name $kern_param; \ 43 | sync; \ 44 | reboot" 45 | 46 | # wait 30 seconds for reboot 47 | set count 30 48 | puts "\nwait $count seconds for reboot" 49 | while { $count > 0 } { 50 | sleep 1 51 | send_user "$count " 52 | set count [expr $count - 1] 53 | } 54 | puts "\nremote_set_kernel.exp FINISHED" 55 | -------------------------------------------------------------------------------- /parallel_runs/remote_stat.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | # Check remote machine sshd status 5 | # 6 | # Usage: 7 | # expect remote_stat.exp <username> <target> <ssh port> <password> 8 | 9 | if { [llength $argv] < 4 } { 10 | puts "usage: " 11 | puts "expect remote_stat.exp \\" 12 | puts " <username> <target> <ssh port> <password>" 13 | exit 1 14 | } 15 | 16 | set username [lindex $argv 0] 17 | set target [lindex $argv 1] 18 | set ssh_port [lindex $argv 2] 19 | set password [lindex $argv 3] 20 | 21 | source "remote.tcl" 22 | 23 | remote_sudocmd $username $target $ssh_port $password \ 24 | "exit" 25 | -------------------------------------------------------------------------------- /parallel_runs/remote_zram_swap.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | # Make a remote target host to use a zram block device as a swap device 5 | # 6 | # Internally, setting swap area as a zram block device is done via 7 | # `scripts/zram_swap.sh` of lazybox. For the reason, lazybox should be 8 | # installed on the remote target machine before execution of this program. 9 | # 10 | # Usage: 11 | # expect remote_set_kernel.exp <username> <target> <ssh port> <password> \ 12 | # <lazybox path> <bootloader> \ 13 | # <kernel name> <kernel param> 14 | 15 | if { [llength $argv] < 6 } { 16 | puts "usage: " 17 | puts "expect remote_zram_swap.exp \\" 18 | puts " <username> <target> <ssh port> <password> \\" 19 | puts " <lazybox path> <zram swap size>" 20 | exit 1 21 | } 22 | 23 | set username [lindex $argv 0] 24 | set target [lindex $argv 1] 25 | set ssh_port [lindex $argv 2] 26 | set password [lindex $argv 3] 27 | set lbpath [lindex $argv 4] 28 | set zram_size [lindex $argv 5] 29 | 30 | source "remote.tcl" 31 | 32 | remote_sudocmd $username $target $ssh_port $password \ 33 | "cd $lbpath; ./scripts/zram_swap.sh $zram_size" 34 | 35 | puts "\nremote_zram_swap.exp FINISHED" 36 | -------------------------------------------------------------------------------- /parallel_runs/ssh_args.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | "Help argument parsing for remote experiments" 4 | 5 | __author__ = "SeongJae Park" 6 | __email__ = "sj38.park@gmail.com" 7 | __copyright__ = "Copyright (c) 2015-2020, SeongJae Park" 8 | __license__ = "GPLv2" 9 | 10 | import getpass 11 | import os 12 | import sys 13 | 14 | USAGE="%s <user name> <target> <ssh port> [password]" % sys.argv[0] 15 | 16 | def parse_input(custom_usage=USAGE): 17 | if len(sys.argv) < 4: 18 | print("usage: ", custom_usage) 19 | print("") 20 | exit(1) 21 | 22 | user = sys.argv[1] 23 | target = sys.argv[2] 24 | port = sys.argv[3] 25 | if len(sys.argv) > 4: 26 | password = sys.argv[4] 27 | else: 28 | password = getpass.getpass("password for %s at %s: " % (user, target)) 29 | return user, target, port, password 30 | -------------------------------------------------------------------------------- /parallel_runs/test_run_exps/bye: -------------------------------------------------------------------------------- 1 | main echo "buy" 2 | -------------------------------------------------------------------------------- /parallel_runs/test_run_exps/check.exps: -------------------------------------------------------------------------------- 1 | # first experiment 2 | start echo '1 sleeps. will fail.' 3 | main sleep 1 4 | end echo 'end!' 5 | check exit 1 6 | 7 | start echo '3 sleeps. will success.' 8 | main for i in `seq 1 3`; \ 9 | do \ 10 | echo 'main 3: '$i; \ 11 | sleep 1; \ 12 | done 13 | end echo 'end!' 14 | check exit 0 15 | 16 | -------------------------------------------------------------------------------- /parallel_runs/test_run_exps/childs_processes_background.exps: -------------------------------------------------------------------------------- 1 | # first experiment 2 | start echo "start" 3 | main ./test_run_exps/run_secs.py 6 4 | back ./test_run_exps/spawn_process.py 5 5 | end echo 'end!' 6 | -------------------------------------------------------------------------------- /parallel_runs/test_run_exps/hello: -------------------------------------------------------------------------------- 1 | main echo "hello" 2 | -------------------------------------------------------------------------------- /parallel_runs/test_run_exps/infini_root_test.exps: -------------------------------------------------------------------------------- 1 | # first experiment 2 | start echo "start" 3 | main ./test_run_exps/run_secs.py 6 4 | back while true; do ./test_run_exps/run_secs.py 2; sleep 1; done 5 | end echo 'end!' 6 | -------------------------------------------------------------------------------- /parallel_runs/test_run_exps/multiple_lines.exps: -------------------------------------------------------------------------------- 1 | # first experiment 2 | start echo "start" 3 | main ./test_run_exps/run_secs.py \ 4 | 6 5 | back while true; \ 6 | do ./test_run_exps/run_secs.py 2; \ 7 | sleep 1; \ 8 | done 9 | end echo 'end!' 10 | -------------------------------------------------------------------------------- /parallel_runs/test_run_exps/multiple_mains.exps: -------------------------------------------------------------------------------- 1 | # first experiment 2 | start echo '6 and 2' 3 | main for i in `seq 1 6`; \ 4 | do \ 5 | echo 'main 6: '$i; \ 6 | sleep 1; \ 7 | done 8 | main for i in `seq 1 2`; \ 9 | do \ 10 | echo 'main 2: '$i; \ 11 | sleep 1; \ 12 | done 13 | end echo 'end!' 14 | 15 | start echo '3 and 8' 16 | main for i in `seq 1 3`; \ 17 | do \ 18 | echo 'main 3: '$i; \ 19 | sleep 1; \ 20 | done 21 | main for i in `seq 1 8`; \ 22 | do \ 23 | echo 'main 8: '$i; \ 24 | sleep 1; \ 25 | done 26 | end echo 'end!' 27 | -------------------------------------------------------------------------------- /parallel_runs/test_run_exps/run_secs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import subprocess 4 | import sys 5 | import time 6 | 7 | for i in range(int(sys.argv[1])): 8 | print(i) 9 | time.sleep(1) 10 | print("buy buy") 11 | -------------------------------------------------------------------------------- /parallel_runs/test_run_exps/spawn_process.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import subprocess 4 | import sys 5 | import time 6 | 7 | remaining_spawns = int(sys.argv[1]) - 1 8 | 9 | if remaining_spawns > 0: 10 | print('will spawn %d more child' % remaining_spawns) 11 | cmd = '%s %d' % (__file__, remaining_spawns) 12 | subprocess.call(cmd, shell=True) 13 | 14 | while True: 15 | time.sleep(3) 16 | -------------------------------------------------------------------------------- /parallel_runs/test_run_exps/test_multiple_expss.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | "test multiple experiments spec files support of run_exps.py" 4 | 5 | import os 6 | 7 | testdir = os.path.abspath(os.path.dirname(__file__)) 8 | lzdir = os.path.join(os.path.dirname(__file__), os.pardir) 9 | lzdir = os.path.abspath(lzdir) 10 | cmd = "%s %s %s" % (os.path.join(lzdir, "run_exps.py"), 11 | os.path.join(testdir, "hello"), os.path.join(testdir, "bye")) 12 | print("execute: ", cmd) 13 | os.system(cmd) 14 | -------------------------------------------------------------------------------- /parallel_ssh_cmds/ssh_parallel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pr_usage() 4 | { 5 | echo "Usage: $0 [OPTION]... <cmd> <host>..." 6 | echo 7 | echo "OPTION" 8 | echo " --user <username> Specify the ssh username to use" 9 | echo " --port <port> Specify the ssh port to use" 10 | echo " --log_prefix <prefix> Prefix of the log files" 11 | echo " --keep_log Keep log files" 12 | echo " -h, --help Show this usage" 13 | } 14 | 15 | pr_usage_exit() 16 | { 17 | exit_code=$1 18 | pr_usage 19 | exit "$exit_code" 20 | } 21 | 22 | if [ $# -lt 1 ] 23 | then 24 | pr_usage_exit 1 25 | fi 26 | 27 | ssh_user=$USER 28 | ssh_port=22 29 | log_prefix="" 30 | keep_log="false" 31 | 32 | while [ $# -ne 0 ] 33 | do 34 | case $1 in 35 | "--user") 36 | if [ $# -lt 2 ] 37 | then 38 | pr_usage_exit 1 39 | fi 40 | ssh_user=$2 41 | shift 2 42 | continue 43 | ;; 44 | "--port") 45 | if [ $# -lt 2 ] 46 | then 47 | pr_usage_exit 1 48 | fi 49 | ssh_port=$2 50 | shift 2 51 | continue 52 | ;; 53 | "--log_prefix") 54 | if [ $# -lt 2 ] 55 | then 56 | pr_usage_exit 1 57 | fi 58 | log_prefix=$2 59 | shift 2 60 | continue 61 | ;; 62 | "--keep_log") 63 | keep_log="true" 64 | shift 1 65 | continue 66 | ;; 67 | "--help" | "-h") 68 | pr_usage_exit 0 69 | ;; 70 | *) 71 | if [ $# -lt 2 ] 72 | then 73 | pr_usage_exit 1 74 | fi 75 | cmd=$1 76 | hosts=( "$@" ) 77 | unset hosts[0] 78 | break;; 79 | esac 80 | done 81 | 82 | declare -A log_files 83 | 84 | for host in ${hosts[@]} 85 | do 86 | if [ "$keep_log" = "true" ] 87 | then 88 | date_str=$(date +"%Y-%m-%d-%H-%M") 89 | log_file="$log_prefix"ssh_parallel_"$host"_"$date_str" 90 | log_file=$(mktemp "$log_file"_XXXX) 91 | log_files[$host]="$log_file" 92 | ssh -p "$ssh_port" "$ssh_user@$host" "$cmd" | tee "$log_file" & 93 | else 94 | ssh -p "$ssh_port" "$ssh_user@$host" "$cmd" & 95 | fi 96 | done 97 | 98 | wait 99 | 100 | for host in ${!log_files[@]} 101 | do 102 | echo "log for $host is at ${log_files[$host]}" 103 | done 104 | -------------------------------------------------------------------------------- /remote.tcl: -------------------------------------------------------------------------------- 1 | proc remote_sudocmd {username target ssh_port password cmds} { 2 | spawn ssh -t -p $ssh_port $username@$target sudo -- bash -c '$cmds' 3 | 4 | # for ssh password 5 | expect "*password*" 6 | send "$password\r" 7 | 8 | # for sudo command 9 | expect "*password*" 10 | send "$password\r" 11 | 12 | # wait for completion of ssh 13 | expect eof 14 | } 15 | 16 | proc remote_sudocmd_registered {username target ssh_port password cmds} { 17 | spawn ssh -t -p $ssh_port $username@$target sudo -- bash -c '$cmds' 18 | 19 | # for sudo command 20 | expect "*password*" 21 | send "$password\r" 22 | 23 | # wait for completion of ssh 24 | expect eof 25 | } 26 | 27 | proc remote_sudoercmd {username target ssh_port password cmds} { 28 | remote_cmd_n_password $username $target $ssh_port $password $cmds 1 29 | } 30 | 31 | # Do remote command which requires n-time password input 32 | proc remote_cmd_n_password {username target ssh_port password cmds nr_prompt} { 33 | spawn ssh -t -p $ssh_port $username@$target bash -c '$cmds' 34 | 35 | # for ssh password 36 | expect "*password*" 37 | send "$password\r" 38 | 39 | set i 0 40 | while {$i < $nr_prompt} { 41 | puts "$i prompt" 42 | expect "*password*" 43 | send "$password\r" 44 | incr i 45 | } 46 | 47 | # wait for completion of ssh 48 | expect eof 49 | } 50 | -------------------------------------------------------------------------------- /remote_cmds.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | source "remote.tcl" 5 | 6 | # Execute commands from a remote target machine 7 | # 8 | # Usage: 9 | # expect remote_cmds.exp <username> <target> <ssh port> <password> <cmds> [nr_password] 10 | 11 | if { [llength $argv] < 5 } { 12 | puts "usage: " 13 | puts "expect remote_cmds.exp \\" 14 | puts " <username> <target> <ssh port> <password> \\" 15 | puts " <cmds> \[nr_password\]" 16 | exit 1 17 | } 18 | 19 | set username [lindex $argv 0] 20 | set target [lindex $argv 1] 21 | set ssh_port [lindex $argv 2] 22 | set password [lindex $argv 3] 23 | set cmds [lindex $argv 4] 24 | 25 | # We assume one password for sudo 26 | set npass 1 27 | if { [llength $argv] > 5 } { 28 | set npass [lindex $argv 5] 29 | } 30 | 31 | remote_cmd_n_password $username $target $ssh_port $password $cmds $npass 32 | -------------------------------------------------------------------------------- /remote_exp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import ssh_args 6 | 7 | USAGE="%s <user name> <target> <ssh port> [password] [exp]" % sys.argv[0] 8 | 9 | user, target, port, password = ssh_args.parse_input(USAGE) 10 | if len(sys.argv) < 6: 11 | exp = raw_input("exp to run in remote: ") 12 | else: 13 | exp = sys.argv[5] 14 | 15 | lbpath = "/home/%s/lazybox" % user 16 | 17 | if password == "__lb_registered": 18 | cmd = "expect ./remote_exps_registered.exp %s %s %s %s %s" % ( 19 | user, target, port, lbpath, exp) 20 | else: 21 | cmd = "expect ./remote_exps.exp %s %s %s %s %s %s" % ( 22 | user, target, port, password, lbpath, exp) 23 | print("[remote_exp.py] do cmd $ ", cmd) 24 | os.system(cmd) 25 | -------------------------------------------------------------------------------- /remote_exps.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | source "remote.tcl" 5 | 6 | # Do experiments from a remote target machine using run_exps.py 7 | # 8 | # Because run_exps.py is a part of lazybox, lazybox should be installed on the 9 | # remote target machine before execcution of this program. 10 | # 11 | # Usage: 12 | # expect remote_exps.exp <username> <target> <ssh port> <password> \ 13 | # <lazybox path> <exp> 14 | 15 | if { [llength $argv] < 6 } { 16 | puts "usage: " 17 | puts "expect remote_exps.exp \\" 18 | puts " <username> <target> <ssh port> <password> \\" 19 | puts " <lazybox path> <exp>" 20 | exit 1 21 | } 22 | 23 | set username [lindex $argv 0] 24 | set target [lindex $argv 1] 25 | set ssh_port [lindex $argv 2] 26 | set password [lindex $argv 3] 27 | set lbpath [lindex $argv 4] 28 | set exp [lindex $argv 5] 29 | 30 | remote_sudocmd $username $target $ssh_port $password \ 31 | "cd $lbpath; ./run_exps.py $exp" 32 | 33 | puts "\nremote_exps.exp FINISHED" 34 | -------------------------------------------------------------------------------- /remote_exps_registered.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | # Do experiments from a remote target machine using run_exps.py 5 | # 6 | # Because run_exps.py is a part of lazybox, lazybox should be installed on the 7 | # remote target machine before execcution of this program. 8 | # 9 | # Usage: 10 | # expect remote_exps.exp <username> <target> <ssh port> \ 11 | # <lazybox path> <exp> 12 | 13 | if { [llength $argv] < 5 } { 14 | puts "usage: " 15 | puts "expect remote_exps.exp \\" 16 | puts " <username> <target> <ssh port> \\" 17 | puts " <lazybox path> <exp>" 18 | exit 1 19 | } 20 | 21 | set username [lindex $argv 0] 22 | set target [lindex $argv 1] 23 | set ssh_port [lindex $argv 2] 24 | set lbpath [lindex $argv 3] 25 | set exp [lindex $argv 4] 26 | 27 | source "remote.tcl" 28 | 29 | remote_sudocmd_registered $username $target $ssh_port $password \ 30 | "cd $lbpath; sudo ./run_exps.py $exp" 31 | 32 | puts "\nremote_exps_registered.exp FINISHED" 33 | -------------------------------------------------------------------------------- /remote_reboot.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | # Reboot remote machine via ssh 5 | # 6 | # Usage: 7 | # expect remote_reboot.exp <username> <target> <ssh port> <password> 8 | 9 | if { [llength $argv] < 4 } { 10 | puts "usage: " 11 | puts "expect remote_reboot.exp \\" 12 | puts " <username> <target> <ssh port> <password> \\" 13 | exit 1 14 | } 15 | 16 | set username [lindex $argv 0] 17 | set target [lindex $argv 1] 18 | set ssh_port [lindex $argv 2] 19 | set password [lindex $argv 3] 20 | 21 | source "remote.tcl" 22 | 23 | remote_sudocmd $username $target $ssh_port $password \ 24 | "shutdown -r now" 25 | -------------------------------------------------------------------------------- /remote_set_kernel.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | # Set kernel of a remote target host as user specified 5 | # 6 | # Internally, kernel setting is done via `scripts/set_kernel/set_kernel.py` of 7 | # lazybox. For the reason, lazybox should be installed on the remote target 8 | # machine before execution of this program. 9 | # 10 | # Usage: 11 | # expect remote_set_kernel.exp <username> <target> <ssh port> <password> \ 12 | # <lazybox path> <bootloader> \ 13 | # <kernel name> <kernel param> 14 | 15 | if { [llength $argv] < 7 } { 16 | puts "usage: " 17 | puts "expect remote_set_kernel.exp \\" 18 | puts " <username> <target> <ssh port> <password> \\" 19 | puts " <lazybox path> <bootloader> \\" 20 | puts " <kernel name> \[kernel param\]" 21 | exit 1 22 | } 23 | 24 | set username [lindex $argv 0] 25 | set target [lindex $argv 1] 26 | set ssh_port [lindex $argv 2] 27 | set password [lindex $argv 3] 28 | set lbpath [lindex $argv 4] 29 | set bootloader [lindex $argv 5] 30 | set kern_name [lindex $argv 6] 31 | set kern_param "" 32 | 33 | if { [llength $argv] > 7 } { 34 | set kern_param [lindex $argv 7] 35 | } 36 | 37 | source "remote.tcl" 38 | 39 | remote_sudocmd $username $target $ssh_port $password \ 40 | "cd $lbpath; \ 41 | ./scripts/set_kernel/set_kernel.py \ 42 | $bootloader $kern_name $kern_param; \ 43 | sync; \ 44 | reboot" 45 | 46 | # wait 30 seconds for reboot 47 | set count 30 48 | puts "\nwait $count seconds for reboot" 49 | while { $count > 0 } { 50 | sleep 1 51 | send_user "$count " 52 | set count [expr $count - 1] 53 | } 54 | puts "\nremote_set_kernel.exp FINISHED" 55 | -------------------------------------------------------------------------------- /remote_stat.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | # Check remote machine sshd status 5 | # 6 | # Usage: 7 | # expect remote_stat.exp <username> <target> <ssh port> <password> 8 | 9 | if { [llength $argv] < 4 } { 10 | puts "usage: " 11 | puts "expect remote_stat.exp \\" 12 | puts " <username> <target> <ssh port> <password>" 13 | exit 1 14 | } 15 | 16 | set username [lindex $argv 0] 17 | set target [lindex $argv 1] 18 | set ssh_port [lindex $argv 2] 19 | set password [lindex $argv 3] 20 | 21 | source "remote.tcl" 22 | 23 | remote_sudocmd $username $target $ssh_port $password \ 24 | "exit" 25 | -------------------------------------------------------------------------------- /remote_zram_swap.exp: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -- 2 | set timeout -1 3 | 4 | # Make a remote target host to use a zram block device as a swap device 5 | # 6 | # Internally, setting swap area as a zram block device is done via 7 | # `scripts/zram_swap.sh` of lazybox. For the reason, lazybox should be 8 | # installed on the remote target machine before execution of this program. 9 | # 10 | # Usage: 11 | # expect remote_set_kernel.exp <username> <target> <ssh port> <password> \ 12 | # <lazybox path> <bootloader> \ 13 | # <kernel name> <kernel param> 14 | 15 | if { [llength $argv] < 6 } { 16 | puts "usage: " 17 | puts "expect remote_zram_swap.exp \\" 18 | puts " <username> <target> <ssh port> <password> \\" 19 | puts " <lazybox path> <zram swap size>" 20 | exit 1 21 | } 22 | 23 | set username [lindex $argv 0] 24 | set target [lindex $argv 1] 25 | set ssh_port [lindex $argv 2] 26 | set password [lindex $argv 3] 27 | set lbpath [lindex $argv 4] 28 | set zram_size [lindex $argv 5] 29 | 30 | source "remote.tcl" 31 | 32 | remote_sudocmd $username $target $ssh_port $password \ 33 | "cd $lbpath; ./scripts/zram_swap.sh $zram_size" 34 | 35 | puts "\nremote_zram_swap.exp FINISHED" 36 | -------------------------------------------------------------------------------- /repeat_runs/README: -------------------------------------------------------------------------------- 1 | This directory contains scripts for 1) repeated runs of various experiments and 2 | 2) summary of the multiple outputs. 3 | 4 | For each of the purposes, you can use `run.sh` and `post.sh`, respectively. 5 | `run.sh` repeatedly runs the required experiments for user-specified times and 6 | stores the outputs from each data in files under hierarchical directories. 7 | `post.sh` refines the raw outputs and make statistics (average, min, max, 8 | stdev) of each of the refined data. 9 | 10 | Each experiment is identified by its 'name' and 'variant'. One experiment 11 | could contain multiple variants. By knowing the 'name' and 'variant', you 12 | should be able to identify what workloads should run under what condition and 13 | what outputs should be generated. 14 | 15 | Files for each experiment 16 | ========================= 17 | 18 | Nonetheless, the essential parts of each experiements should be implemented by 19 | users. They should implement some executable files and place those under 20 | specific directory with specific name. 21 | 22 | `<exp name>/runners/(start|main|back|end)/` 23 | ------------------------------------------ 24 | 25 | Place executable files each running different workload for the experiment. 26 | Each of the files under each last directory (start, main, back, or end) will be 27 | executed as `(start|main|back|end)` workload, as defined by 28 | `prarallel_runs/README.md`. The files will be sorted by the name using `sort` 29 | command and be described to the `parallel_runs/run_exps.py` in the order. 30 | 31 | Each of the runners receives the output directory for this run as an argument. 32 | The output directory is `<exp name>/<variant>/0(0-9)`. Thus, you can know 33 | current exp name and variant using the argument. The outputs containing 34 | information for the final report should be stored in the directory as a file. 35 | 36 | You can add custom runners by defining `(start|main|back|end)_RUNNERS` 37 | variables. Each variable should contain each type of workloads. 38 | 39 | `<exp name>/parsers/` 40 | --------------------- 41 | 42 | Place executable files each parsing different raw output files made by your 43 | runners (executable files under `<exp name>/runners/*`). The name of the 44 | executable file should be matched with one of the raw output files, but can 45 | have any suffix. For each output file, the parser matched with the name will 46 | be executed. 47 | 48 | Each of the parsers receives the path to the raw output files and the path to 49 | the directory that parsed outputs should be stored. The parsed data ashould be 50 | placed under the directory as a file. 51 | 52 | You can change this directory to any other path by defining `parsers_dir` in 53 | your config file. 54 | 55 | 56 | `<exp name>/statists/` 57 | ---------------------- 58 | 59 | Place executable files each generating stat of the parsed outputs for each of 60 | the repeated runs. The name of the executable file should be matched with one 61 | of the parsed output files, but can have any suffix. For each parsed output 62 | file, the stat executable file matched with the name will be executed. 63 | 64 | Each of the stat generator receives the path of the directory that their 65 | statistics should be saved, and the paths to the parsed files directories. 66 | 67 | 68 | TODO 69 | ==== 70 | 71 | - Add pre-requisites check 72 | -------------------------------------------------------------------------------- /repeat_runs/__common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | 5 | LBX="$BINDIR/../" 6 | ODIR_ROOT="$PWD/results" 7 | 8 | VARIANTS="orig" 9 | REPEATS=1 10 | PARSED='parsed' 11 | 12 | if [ -z "$CFG" ] 13 | then 14 | CFG="$BINDIR/examples/example.config" 15 | fi 16 | 17 | source "$CFG" 18 | -------------------------------------------------------------------------------- /repeat_runs/_gen_exp_cfg.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Make lazybox experiment configuration for given workload/variance. Users can 4 | # pipe the generated configuration to the `run_exps.py` for actual run of the 5 | # experiment. 6 | 7 | BINDIR=$(dirname "$0") 8 | 9 | source "$BINDIR/__common.sh" 10 | 11 | if [ $# -ne 2 ] 12 | then 13 | echo "USAGE: $0 <expname> <variance>" 14 | exit 1 15 | fi 16 | 17 | 18 | EXPNAME="$1" 19 | VARIANCE="$2" 20 | 21 | exp_basename=$(basename "$EXPNAME") 22 | ODIR=$ODIR_ROOT/$exp_basename/$VARIANCE 23 | 24 | MAX_REPEAT=99 25 | for (( unqid=1; unqid <= MAX_REPEAT; unqid+=1 )) 26 | do 27 | CANDIDATE=$ODIR/$(printf "%02d" $unqid) 28 | if [ ! -d "$CANDIDATE" ] 29 | then 30 | ODIR=$CANDIDATE 31 | break 32 | fi 33 | if [ $unqid -eq $MAX_REPEAT ] 34 | then 35 | echo "[Error] $MAX_REPEAT repeated results already exists!!!" 36 | exit 1 37 | fi 38 | done 39 | mkdir -p "$ODIR" 40 | 41 | for runner_type in "start" "main" "back" "end" 42 | do 43 | custom_runners_varname=$runner_type"_RUNNERS" 44 | for runner in ${!custom_runners_varname} 45 | do 46 | echo "$runner_type $runner $ODIR" 47 | done 48 | 49 | runners_dir=$EXPNAME/runners/$runner_type 50 | if [ ! -d "$runners_dir" ] 51 | then 52 | continue 53 | fi 54 | for runner in $(ls "$runners_dir" | sort) 55 | do 56 | if [[ "$runner" = "_"* ]] 57 | then 58 | continue 59 | fi 60 | echo "$runner_type $runners_dir/$runner $ODIR" 61 | done 62 | done 63 | GROUP=$(groups "$USER" | awk '{print $3}') 64 | echo "end chown -R $USER:$GROUP $ODIR_ROOT" 65 | -------------------------------------------------------------------------------- /repeat_runs/_parse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | 5 | source "$BINDIR/__common.sh" 6 | 7 | if [ $# -ne 2 ] 8 | then 9 | echo "Usage: $0 <experiment dir> <raw output directory>" 10 | exit 1 11 | fi 12 | 13 | if [ -z "$parsers_dir" ] 14 | then 15 | parsers_dir="$1/parsers/" 16 | fi 17 | raw_outputs_dir=$2 18 | parsed_dir="$raw_outputs_dir/$PARSED" 19 | 20 | mkdir -p "$parsed_dir" 21 | 22 | echo "parse $raw_outputs_dir" 23 | for raw_output in $(ls "$raw_outputs_dir") 24 | do 25 | parsers=$(ls "$parsers_dir" | grep -e '^'"$raw_output"'*') 26 | for parser in $parsers 27 | do 28 | "$parsers_dir"/"$parser" "$raw_outputs_dir" "$parsed_dir" 29 | done 30 | done 31 | -------------------------------------------------------------------------------- /repeat_runs/_stat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | source $BINDIR/__common.sh 5 | 6 | if [ $# -lt 2 ] 7 | then 8 | echo "Usage: $0 <experiment dir> <path to raw output directories>" 9 | exit 1 10 | fi 11 | 12 | if [ -z "$statists_dir" ] 13 | then 14 | statists_dir=$1'/statists/' 15 | fi 16 | 17 | echo "$2" 18 | raw_input_dirs=$(ls "$2" | grep -e '[0-9][0-9]') 19 | if [ -z "$raw_input_dirs" ] 20 | then 21 | echo "no raw input dir in $2" 22 | exit 1 23 | fi 24 | parsed_dirs="" 25 | for rd in $raw_input_dirs 26 | do 27 | parsed_dirs+=$2/$rd"/$PARSED " 28 | done 29 | parsed_dir_1=$(echo "$parsed_dirs" | awk '{print $1}') 30 | 31 | stat_odir=$2'/stat/' 32 | mkdir -p "$stat_odir" 33 | 34 | echo "stat $parsed_dir_1" 35 | for parsed_file in $(ls "$parsed_dir_1") 36 | do 37 | statists=$(ls "$statists_dir" | grep -e '^'$parsed_file'*') 38 | for statist in $statists 39 | do 40 | "$statists_dir/$statist" "$stat_odir" "$parsed_dirs" 41 | done 42 | done 43 | -------------------------------------------------------------------------------- /repeat_runs/aggregate_results.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Aggregate multiple results directories into one results directory 6 | 7 | if [ $# -lt 3 ] 8 | then 9 | echo "Usage: $0 <src dir>... <dst dir>" 10 | exit 1 11 | fi 12 | 13 | nr_src_dirs=$(($# - 1)) 14 | src_dirs=(${@:1:$nr_src_dirs}) 15 | dst_dir="${@: -1}" 16 | 17 | bindir=$(dirname "$0") 18 | 19 | source "$bindir/__common.sh" 20 | 21 | for src_dir in "${src_dirs[@]}" 22 | do 23 | for v in $VARIANTS 24 | do 25 | src="$src_dir/$v" 26 | dst="$dst_dir/$v" 27 | mkdir -p "$dst" 28 | 29 | merged=0 30 | for s in $(find "$src" -name "[0-9][0-9]") 31 | do 32 | uid=$(basename "$s") 33 | candidate="$dst/$uid" 34 | while [ -d "$candidate" ] 35 | do 36 | merged=1 37 | uid=$((10#$uid + 1)) 38 | uid=$(printf "%02d" $uid) 39 | candidate="$dst/$uid" 40 | if [ "$uid" -gt 99 ] 41 | then 42 | echo "uid > 99!" 43 | exit 1 44 | fi 45 | done 46 | echo "cp -R $s $candidate" 47 | cp -R "$s" "$candidate" 48 | done 49 | 50 | if [ $merged -ne 0 ] 51 | then 52 | continue 53 | fi 54 | 55 | echo "cp -R $src/* $dst/" 56 | cp -R "$src/"* "$dst" 57 | done 58 | done 59 | -------------------------------------------------------------------------------- /repeat_runs/examples/example.config: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FILEDIR=`dirname $BASH_SOURCE` 4 | 5 | EXPERIMENTS=$FILEDIR/memprofile 6 | REPEATS=1 7 | VARIANTS="orig fake" 8 | 9 | start_RUNNERS="echo ls" 10 | end_RUNNERS="ls echo" 11 | -------------------------------------------------------------------------------- /repeat_runs/examples/memprofile/parsers/commlog.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | grep real "$1/commlog" > "$2/perf" 4 | -------------------------------------------------------------------------------- /repeat_runs/examples/memprofile/parsers/memfree.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | LBX=$HOME/lazybox 4 | 5 | "$LBX/scripts/report/memfree_to_used.py" "$1/memfree" > "$2/memused" 6 | 7 | MFTOT=0 8 | NR_MF=0 9 | for mf in $(awk '{print $2}' "$2/memused") 10 | do 11 | MFTOT=$((MFTOT + mf)) 12 | NR_MF=$((NR_MF + 1)) 13 | done 14 | echo "memused.avg: " $((MFTOT / NR_MF)) > "$2/memused.avg" 15 | -------------------------------------------------------------------------------- /repeat_runs/examples/memprofile/parsers/perf.stat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PFS=$1/perf.stat 4 | grep "insns* per cycle" $PFS | awk '{print "ipc: " $4}' > $2/ipc 5 | grep "page-faults" $PFS | awk '{print "page-faults: " $4}' > $2/pf 6 | grep "LL-cache hits" $PFS | awk '{print "llcmiss: " $4}' | \ 7 | sed -e 's/%//' > $2/llcmiss 8 | -------------------------------------------------------------------------------- /repeat_runs/examples/memprofile/parsers/pswpin.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | LBX=$HOME/lazybox 4 | PSWPIN=$1/pswpin 5 | 6 | $LBX/scripts/report/recs_to_diff.py $PSWPIN > $2/pswpin.diff 7 | NR_SWPIN=0 8 | TOTAL_SWPIN=0 9 | for swpin in `awk '{print $2}' $2/pswpin.diff` 10 | do 11 | TOTAL_SWPIN=$(($TOTAL_SWPIN + $swpin)) 12 | NR_SWPIN=$(($NR_SWPIN + 1)) 13 | done 14 | echo "swpin.avg: " $(($TOTAL_SWPIN / $NR_SWPIN)) > $2/pswpin.avg 15 | -------------------------------------------------------------------------------- /repeat_runs/examples/memprofile/runners/back/0001_memfree_stat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | while : 4 | do 5 | grep MemFree /proc/meminfo >> "$1/memfree" 6 | sleep 1 7 | done 8 | -------------------------------------------------------------------------------- /repeat_runs/examples/memprofile/runners/back/0002_pswpin_stat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | while : 4 | do 5 | grep pswpin /proc/vmstat >> "$1/pswpin" 6 | sleep 1 7 | done 8 | -------------------------------------------------------------------------------- /repeat_runs/examples/memprofile/runners/back/0003_memfp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | LAZYBOX=$HOME/lazybox 4 | 5 | "$LAZYBOX/scripts/memfp.sh" "sleep 5" >> "$1/memfps" 6 | -------------------------------------------------------------------------------- /repeat_runs/examples/memprofile/runners/end/0001_kill_perf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kill -SIGINT "$(pidof perf)" 4 | -------------------------------------------------------------------------------- /repeat_runs/examples/memprofile/runners/main/0001_sleep.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | (time sleep 5) 2>&1 | tee "$1/commlog" 4 | -------------------------------------------------------------------------------- /repeat_runs/examples/memprofile/runners/start/0001_sync_drop_caches.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sync 4 | echo 3 > /proc/sys/vm/drop_caches 5 | -------------------------------------------------------------------------------- /repeat_runs/examples/memprofile/runners/start/0002_perf_stat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | perf stat -a -d -o "$1/perf.stat" & 3 | -------------------------------------------------------------------------------- /repeat_runs/examples/memprofile/statists/memused.avg.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -lt 2 ] 4 | then 5 | echo "Usage: $0 <output directory> <src directory [src directory ...]>" 6 | exit 1 7 | fi 8 | 9 | LBX=$HOME/lazybox 10 | 11 | ODIR=$1 12 | SDIR=${@:2} 13 | 14 | for stat in avg min max stdev 15 | do 16 | STATDIR=$ODIR/$stat 17 | mkdir -p $STATDIR 18 | 19 | f=memused.avg 20 | paths="" 21 | for d in $SDIR 22 | do 23 | paths+=$d/$f" " 24 | done 25 | 26 | $LBX/scripts/report/statof.py $stat $paths > $STATDIR/$f 27 | done 28 | -------------------------------------------------------------------------------- /repeat_runs/org_results.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # organize results: move, copy (merge), rm. 4 | # source is current ODIR_ROOT. 5 | 6 | if [ $# -lt 1 ] || ( [ "$1" != "cp" ] && [ "$1" != "rm" ] ) 7 | then 8 | echo "Usage: $0 (cp|rm) [destination results dir]" 9 | exit 1 10 | fi 11 | 12 | OP=$1 13 | DEST=$2 14 | 15 | if [ "$OP" != "rm" ] && [ -z "$DEST" ] 16 | then 17 | echo "Only 'rm' op can have no destination" 18 | exit 1 19 | fi 20 | 21 | BINDIR=$(dirname "$0") 22 | 23 | source "$BINDIR/__common.sh" 24 | 25 | for exp in $EXPERIMENTS 26 | do 27 | exp_basename=$(basename "$exp") 28 | for v in $VARIANTS 29 | do 30 | src="$ODIR_ROOT/$exp_basename/$v" 31 | if [ "$OP" = "rm" ] 32 | then 33 | echo "rm -fr $src" 34 | rm -fr "$src" 35 | continue 36 | fi 37 | 38 | dst=$DEST/$exp_basename/$v 39 | mkdir -p "$dst" 40 | if [ "$OP" = "cp" ] 41 | then 42 | OP="cp -R" 43 | fi 44 | 45 | merged=0 46 | for s in $(find "$src" -name "[0-9][0-9]") 47 | do 48 | uid=$(basename "$s") 49 | candidate="$dst/$uid" 50 | while [ -d "$candidate" ] 51 | do 52 | merged=1 53 | uid=$((10#$uid + 1)) 54 | uid=$(printf "%02d" $uid) 55 | candidate="$dst/$uid" 56 | if [ $uid -gt 99 ] 57 | then 58 | echo "uid > 99!" 59 | exit 1 60 | fi 61 | done 62 | echo "cp -R $s $candidate" 63 | cp -R "$s" "$candidate" 64 | done 65 | 66 | if [ $merged -ne 0 ] 67 | then 68 | continue 69 | fi 70 | 71 | echo "cp -R $src/* $dst/" 72 | cp -R "$src/"* "$dst" 73 | done 74 | done 75 | -------------------------------------------------------------------------------- /repeat_runs/post.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | 5 | source "$BINDIR/__common.sh" 6 | 7 | for exp in $EXPERIMENTS 8 | do 9 | exp_basename=$(basename "$exp") 10 | for v in $VARIANTS 11 | do 12 | for d in "$ODIR_ROOT/$exp_basename/$v/"[0-9][0-9] 13 | do 14 | "$BINDIR"/_parse.sh "$exp" "$d" 15 | done 16 | done 17 | done 18 | 19 | for exp in $EXPERIMENTS 20 | do 21 | exp_basename=$(basename "$exp") 22 | for v in $VARIANTS 23 | do 24 | ODIR="$ODIR_ROOT/$exp_basename/$v" 25 | "$BINDIR/"_stat.sh "$exp" "$ODIR" 26 | done 27 | done 28 | -------------------------------------------------------------------------------- /repeat_runs/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | 5 | source "$BINDIR/__common.sh" 6 | 7 | for i in $(seq "$REPEATS") 8 | do 9 | for exp in $EXPERIMENTS 10 | do 11 | for var in $VARIANTS 12 | do 13 | echo "$i iter of $exp/$var" 14 | "$BINDIR"/_gen_exp_cfg.sh "$exp" "$var" | \ 15 | sudo "$LBX"/parallel_runs/run_exps.py stdin 16 | done 17 | done 18 | done 19 | -------------------------------------------------------------------------------- /scripts/bdstat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Report buddy allocator stat 5 | """ 6 | 7 | import argparse 8 | import subprocess 9 | import sys 10 | import time 11 | 12 | def human_readable_size_form(nr_bytes): 13 | if nr_bytes > 2**30: 14 | nr_bytes = "%.2f GiB" % (nr_bytes / 2.0**30) 15 | elif nr_bytes > 2**20: 16 | nr_bytes = "%.2f MiB" % (nr_bytes / 2.0**20) 17 | elif nr_bytes > 2**10: 18 | nr_bytes = "%.2f KiB" % (nr_bytes / 2.0**10) 19 | else: 20 | nr_bytes = "%d B" % nr_bytes 21 | return nr_bytes 22 | 23 | def pr_buddystat(): 24 | binfo = subprocess.check_output("cat /proc/buddyinfo".split()) 25 | binfo = binfo.decode('utf-8') 26 | """ 27 | Node 0, zone DMA 1 1 0 0 2 1 1 28 | Node 0, zone DMA32 3986 3751 3348 2811 2044 1233 760 29 | Node 0, zone Normal 2380 928 1518 7668 12639 12078 11520 30 | Node 1, zone Normal 681 2489 1869 12689 23714 23179 22081 31 | """ 32 | 33 | free_pages = [] 34 | for line in binfo.strip('\n').split('\n'): 35 | fields = line.split() 36 | free_mem = 0 37 | for i, freep in enumerate(fields[4:]): 38 | free_mem += int(freep) * 2**i * 4096 39 | 40 | print("%s, %s" % (" ".join(fields[0:4]), 41 | human_readable_size_form(free_mem))) 42 | 43 | if __name__ == "__main__": 44 | parser = argparse.ArgumentParser() 45 | parser.add_argument('delay', nargs='?', type=int, metavar='<delay>', 46 | default=-1, help='delay between stat') 47 | args = parser.parse_args() 48 | delay = args.delay 49 | 50 | while True: 51 | pr_buddystat() 52 | if delay == -1: 53 | break 54 | time.sleep(delay) 55 | print("") 56 | -------------------------------------------------------------------------------- /scripts/cache_hierarchy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CACHEDIR=/sys/devices/system/cpu/cpu0/cache/index 4 | 5 | for (( idx=0; ; idx++ )) 6 | do 7 | cachedir=$CACHEDIR$idx 8 | if [ ! -d "$cachedir" ] 9 | then 10 | break 11 | fi 12 | 13 | level=$(cat "$cachedir/level") 14 | 15 | echo "Level $level" 16 | echo "=======" 17 | echo "" 18 | 19 | for file in type size ways_of_associativity number_of_sets \ 20 | coherency_line_size; 21 | do 22 | printf "$file: %s\n" "$(cat $cachedir/$file)" 23 | done 24 | 25 | printf "\n\n" 26 | done 27 | -------------------------------------------------------------------------------- /scripts/chswpdev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 <new swap device>" 6 | exit 1 7 | fi 8 | 9 | SWPDEV=$1 10 | 11 | sudo swapoff -a 12 | sudo swapon "$SWPDEV" 13 | -------------------------------------------------------------------------------- /scripts/cpuloadstat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 2 ] 4 | then 5 | echo "Usage: $0 <delay> <count>" 6 | exit 1 7 | fi 8 | 9 | delay=$1 10 | count=$2 11 | 12 | for ((i = 0; i != count; i++)) 13 | do 14 | read -r cpu user nice system idle iowait irq softirq steal \ 15 | guest guest_nice < /proc/stat 16 | if [ "$cpu" != "cpu" ] 17 | then 18 | echo "/proc/stat has unexpected format" 19 | exit 1 20 | fi 21 | old_active=$((user + nice + system + irq + softirq + steal + \ 22 | guest + guest_nice)) 23 | old_total=$((old_active + idle + iowait)) 24 | 25 | sleep "$delay" 26 | 27 | read -r cpu user nice system idle iowait irq softirq steal \ 28 | guest guest_nice < /proc/stat 29 | if [ "$cpu" != "cpu" ] 30 | then 31 | echo "/proc/stat has unexpected format" 32 | exit 1 33 | fi 34 | now_active=$((user + nice + system + irq + softirq + steal + \ 35 | guest + guest_nice)) 36 | now_total=$((now_active + idle + iowait)) 37 | 38 | total=$((now_total - old_total)) 39 | active=$((now_active - old_active)) 40 | 41 | # percent 42 | echo $SECONDS $((active * 100 / total)) 43 | done 44 | -------------------------------------------------------------------------------- /scripts/dropcaches.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function usage() { 4 | echo "Usage: $0 <target>" 5 | echo 6 | echo "TARGET" 7 | echo " 1: Free pagecache" 8 | echo " 2: Free dentries and inodes" 9 | echo " 3: Free pagecache, dentries, and inodes" 10 | exit 1 11 | } 12 | 13 | if [ $# -ne 1 ] 14 | then 15 | usage 16 | fi 17 | 18 | ARG=$1 19 | 20 | if [ "$ARG" -lt 1 ] || [ "$ARG" -gt 3 ] 21 | then 22 | usage 23 | fi 24 | 25 | echo "$ARG" > /proc/sys/vm/drop_caches 26 | -------------------------------------------------------------------------------- /scripts/fs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import json 5 | import os 6 | 7 | verbose = False 8 | 9 | def read_fs(root, strip_content, max_depth, current_depth): 10 | contents = {} 11 | for filename in os.listdir(root): 12 | filepath = os.path.join(root, filename) 13 | if os.path.isdir(filepath): 14 | if max_depth != None and current_depth + 1 > max_depth: 15 | continue 16 | contents[filename] = read_fs(filepath, strip_content, max_depth, 17 | current_depth + 1) 18 | else: 19 | try: 20 | with open(filepath, 'r') as f: 21 | contents[filename] = f.read() 22 | if strip_content: 23 | contents[filename] = contents[filename].strip() 24 | except Exception as e: 25 | contents[filename] = 'failed (%s)' % e 26 | if verbose: 27 | print('read %s from %s' % (contents[filename], filepath)) 28 | return contents 29 | 30 | def write_fs(root, contents): 31 | if isinstance(contents, list): 32 | for c in contents: 33 | write_fs(root, c) 34 | return 35 | 36 | for filename in contents: 37 | filepath = os.path.join(root, filename) 38 | if os.path.isfile(filepath): 39 | if verbose: 40 | print('write %s into %s' % (contents[filename], filepath)) 41 | try: 42 | with open(filepath, 'w') as f: 43 | f.write(contents[filename]) 44 | except Exception as e: 45 | print('failed writing %s to %s (%s)' % ( 46 | contents[filename], filepath, e)) 47 | else: 48 | write_fs(filepath, contents[filename]) 49 | 50 | def main(): 51 | parser = argparse.ArgumentParser() 52 | parser.add_argument('command', choices=['read', 'write'], 53 | help='command to do') 54 | parser.add_argument('root', metavar='<path>', 55 | help='root to do the reads or writes') 56 | parser.add_argument('--max_depth', type=int, metavar='<number>', 57 | help='depth to read') 58 | parser.add_argument('--dont_strip_content', action='store_true', 59 | help='strip contents of files') 60 | parser.add_argument('--contents', metavar='<json string>', 61 | help='contents to write') 62 | parser.add_argument('--content_file', metavar='<file>', 63 | help='json file having the content to write') 64 | parser.add_argument('--verbose', action='store_true', 65 | help='print verbose log') 66 | args = parser.parse_args() 67 | 68 | global verbose 69 | verbose = args.verbose 70 | 71 | if args.command == 'read': 72 | if args.root == None: 73 | print('--root is not given') 74 | exit(1) 75 | print(json.dumps(read_fs(args.root, not args.dont_strip_content, 76 | args.max_depth, 1), indent=4, sort_keys=True)) 77 | elif args.command == 'write': 78 | if args.contents != None: 79 | contents = args.contents 80 | elif args.content_file: 81 | with open(args.content_file, 'r') as f: 82 | contents = f.read() 83 | write_fs(args.root, json.loads(contents)) 84 | 85 | if __name__ == '__main__': 86 | main() 87 | -------------------------------------------------------------------------------- /scripts/gen_report/acdf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument('data_file', metavar='<file>', type=str, help='data file') 7 | parser.add_argument('target_column', metavar='<target column>', nargs='+', 8 | type=int, help='data file') 9 | parser.add_argument('granularity', metavar='<granularity>', type=float, 10 | help='cdf granularity') 11 | args = parser.parse_args() 12 | 13 | data_file = args.data_file 14 | target_cols = args.target_column 15 | csv_gran = args.granularity 16 | 17 | datas = [] 18 | counts = {} 19 | cdf = {} 20 | 21 | with open(data_file, 'r') as f: 22 | for line in f: 23 | data = [] 24 | for target_col in target_cols: 25 | if len(line.split(',')) < target_col + 1: 26 | continue 27 | 28 | data.append(float(line.split(',')[target_col])) 29 | datas.append(data) 30 | 31 | for data in datas: 32 | for idx, val in enumerate(data): 33 | key = int(val / csv_gran) 34 | if not counts.has_key(key): 35 | counts[key] = [0] * len(data) 36 | counts[key][idx] += 1 37 | 38 | part_sums = [0] * len(target_cols) 39 | for key in sorted(counts): 40 | for idx, val in enumerate(counts[key]): 41 | if not cdf.has_key(key): 42 | cdf[key] = [0] * len(target_cols) 43 | cdf[key][idx] = counts[key][idx] + part_sums[idx] 44 | part_sums[idx] += counts[key][idx] 45 | 46 | for key in sorted(cdf): 47 | out_data = "" 48 | for val in cdf[key]: 49 | out_data += str(val) + ',' 50 | print("%s,%s" % (key, out_data)) 51 | -------------------------------------------------------------------------------- /scripts/gen_report/parse_still.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument('file', metavar='<file>', help='the log file') 7 | args = parser.parse_args() 8 | 9 | cma_times_output = [] 10 | 11 | with open(args.file, 'r') as f: 12 | allocs_started = False 13 | cma_alloc_secs = 0.0 14 | cma_chunk_alloc_secs = 0.0 15 | for line in f: 16 | if line.find("Linux raspberrypi") != -1: 17 | if line.find("-gcma-") != -1: 18 | print("gcma") 19 | cma_times_output.append("gcma") 20 | elif line.find("-cma-") != -1: 21 | print("cma") 22 | cma_times_output.append("cma") 23 | elif line.find("-vanilla-") != -1: 24 | print("vanilla") 25 | cma_times_output.append("vanilla") 26 | 27 | if line.find("system") != -1 and line.find("elapsed") != -1: 28 | spltd = line.split() 29 | user = spltd[0].split("user")[0] 30 | system = spltd[1].split("system")[0] 31 | 32 | elapsed = spltd[2].split("elapsed")[0] 33 | elap_min = int(elapsed.split(":")[0]) 34 | elap_sec = float(elapsed.split(":")[1]) 35 | elapsed = elap_min * 60 + elap_sec 36 | 37 | cpu = spltd[3].split("%")[0] 38 | print(",%s,%s,%s,%s" % (user, system, elapsed,cpu)) 39 | 40 | if line.find("cma_alloc()") != -1 and line.find("consumed") != -1: 41 | cma_alloc_secs += float(line.split()[-2]) / 1000 / 1000 / 1000 42 | if not allocs_started: 43 | allocs_started = True 44 | if line.find("vc_cma_alloc_chunks") != -1: 45 | cma_chunk_alloc_secs += float(line.split()[-2]) / 1000 / 1000 / 1000 46 | if line.find("cma_release") != -1: 47 | if not allocs_started: 48 | continue 49 | allocs_started = False 50 | cma_times_output.append(",%.6f, %.6f" % ( 51 | cma_alloc_secs, cma_chunk_alloc_secs)) 52 | cma_alloc_secs = 0.0 53 | cma_chunk_alloc_secs = 0.0 54 | 55 | for line in cma_times_output: 56 | print(line) 57 | -------------------------------------------------------------------------------- /scripts/hwinfo/blockdevs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if ! which lsblk > /dev/null 4 | then 5 | echo "[ERROR] You should install lsblk" 6 | exit 1 7 | fi 8 | 9 | IFS=$'\n' 10 | for l in $(lsblk -o MODEL,SIZE -n | sort) 11 | do 12 | echo "$l" | awk '{ 13 | if (NF < 2) { 14 | next 15 | } 16 | for (i = 1; i < NF; i++) { 17 | printf $i " " 18 | } 19 | printf "(" $i ")\n" 20 | }' 21 | done 22 | -------------------------------------------------------------------------------- /scripts/hwinfo/cpumodel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | grep "model name" /proc/cpuinfo | head -n 1 | awk '{ 4 | for (i=4; i <NF + 1; i++) { 5 | printf $i 6 | if (i != NF) { 7 | printf " " 8 | } 9 | } 10 | printf "\n" 11 | }' 12 | -------------------------------------------------------------------------------- /scripts/hwinfo/eth_drivers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Dev MAC Driver State" 4 | 5 | for e in /sys/class/net/* 6 | do 7 | dev=$(basename "$e") 8 | driver=$(readlink "$e"/device/driver/module) 9 | if [ "$driver" ] 10 | then 11 | driver=$(basename "$driver") 12 | else 13 | driver="none" 14 | fi 15 | addr=$(cat "$e/address") 16 | operstate=$(cat "$e/operstate") 17 | 18 | printf "%s\t%s\t%s\t%s\n" "$dev" "$addr" "$driver" "$operstate" 19 | done 20 | -------------------------------------------------------------------------------- /scripts/hwinfo/ipaddrs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ifconfig -a | grep "inet addr" | awk '{ 4 | if ($2 == "addr:127.0.0.1") { 5 | exit 6 | } 7 | print $2 8 | }' | awk -F: '{ print $2 }' 9 | -------------------------------------------------------------------------------- /scripts/hwinfo/lzhwinfo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | 5 | cd "$BINDIR" || exit 1 6 | 7 | MODEL=$(./cpumodel.sh) 8 | SOCKS=$(./nr_cpusocks.sh) 9 | CORES=$(./nr_cpuspersock.sh) 10 | THRS=$(./nr_thrspercpu.sh) 11 | TOTAL_THRS=$(./nr_hwthrs.sh) 12 | 13 | DETAIL="$SOCKS socks / $CORES cores / $THRS hyper-thr / $TOTAL_THRS thrs" 14 | echo CPU: "$MODEL ($DETAIL)" 15 | echo MEM: "$(./sz_mem.sh)" 16 | 17 | function linestocsv() { 18 | IFS=$'\n' 19 | RES="" 20 | for l in $1 21 | do 22 | RES=$RES$l", " 23 | done 24 | if [ "$RES" ] 25 | then 26 | echo "${RES::-2}" 27 | fi 28 | unset IFS 29 | } 30 | 31 | printf "NICs: " 32 | linestocsv "$(./nics.sh)" 33 | printf "IPs: " 34 | linestocsv "$(./ipaddrs.sh)" 35 | printf "STORAGEs: " 36 | linestocsv "$(./blockdevs.sh)" 37 | -------------------------------------------------------------------------------- /scripts/hwinfo/nics.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if ! which lspci > /dev/null 4 | then 5 | echo "[ERROR] You should install lspci" 6 | exit 1 7 | fi 8 | 9 | function ethernets() { 10 | lspci | grep "Ethernet controller" | awk '{ 11 | for (i = 4; i <= NF; i++) { 12 | printf $i 13 | if (i < NF) { 14 | printf " " 15 | } 16 | } 17 | printf "\n" 18 | }' | uniq 19 | } 20 | 21 | IFS=$'\n' 22 | for l in $(ethernets) 23 | do 24 | echo "$l" x "$(lspci | grep -c "$l")" 25 | done 26 | -------------------------------------------------------------------------------- /scripts/hwinfo/nr_cpusocks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo $(( $(grep "^physical id" /proc/cpuinfo | sort | tail -n 1 | \ 4 | awk '{print $4}') + 1)) 5 | -------------------------------------------------------------------------------- /scripts/hwinfo/nr_cpuspersock.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | grep "^core id" /proc/cpuinfo | sort | uniq | wc -l 4 | -------------------------------------------------------------------------------- /scripts/hwinfo/nr_hwthrs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | grep -c "^processor" /proc/cpuinfo 4 | -------------------------------------------------------------------------------- /scripts/hwinfo/nr_thrspercpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | 5 | pushd "$BINDIR" > /dev/null 6 | 7 | NR_THRS=$(./nr_hwthrs.sh) 8 | NR_CPUS=$(( $(./nr_cpusocks.sh) * $(./nr_cpuspersock.sh))) 9 | 10 | echo "$NR_THRS" "$NR_CPUS" | awk '{print $1 / $2}' 11 | -------------------------------------------------------------------------------- /scripts/hwinfo/sz_mem.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sz=$(grep "^MemTotal:" /proc/meminfo | awk '{print $2}') 4 | unit=KiB 5 | 6 | if [ "$sz" -gt 1024 ] 7 | then 8 | sz=$((sz / 1024)) 9 | unit=MiB 10 | fi 11 | 12 | if [ "$sz" -gt 1024 ] 13 | then 14 | sz=$((sz / 1024)) 15 | unit=GiB 16 | fi 17 | 18 | echo $sz $unit 19 | -------------------------------------------------------------------------------- /scripts/idleof: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -lt 2 ]; 4 | then 5 | echo "Usage: $0 <user to monitor> <delay time>" 6 | echo "" 7 | exit 1 8 | fi 9 | 10 | while true 11 | do 12 | CMD="w | grep -P \"^$1( |\$)\" | awk '{print \$5}'" 13 | echo "By $(date)" 14 | eval "$CMD" 15 | echo "" 16 | sleep "$2" 17 | done 18 | -------------------------------------------------------------------------------- /scripts/kernel_dev/DEPRECATED: -------------------------------------------------------------------------------- 1 | This directory is deprecated. Please use linux_hack/ on the root of this repo 2 | instead. 3 | -------------------------------------------------------------------------------- /scripts/kernel_dev/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | pr_usage() 6 | { 7 | echo " 8 | Usage: $0 [OTION]... <src dir> <build dir> 9 | 10 | OPTION 11 | --config Config file to append 12 | --install Install built kernel 13 | --reboot Reboot after install 14 | " 15 | } 16 | 17 | pr_msg_usage_exit() 18 | { 19 | msg=$1 20 | exit_code=$2 21 | echo "$msg" 22 | pr_usage 23 | exit "$exit_code" 24 | } 25 | 26 | src_dir="" 27 | build_dir="" 28 | additional_config="" 29 | do_install="false" 30 | do_reboot="false" 31 | while [ $# -ne 0 ] 32 | do 33 | case $1 in 34 | "--config") 35 | if [ $# -lt 2 ] 36 | then 37 | pr_msg_usage_exit "--config argument is not given" 1 38 | fi 39 | additional_config=$2 40 | shift 2 41 | continue 42 | ;; 43 | "--install") 44 | do_install="true" 45 | shift 1 46 | continue 47 | ;; 48 | "--reboot") 49 | do_reboot="true" 50 | shift 1 51 | continue 52 | ;; 53 | *) 54 | if [ $# -lt 2 ] 55 | then 56 | pr_msg_usage_exit "src and build dirs not given" 1 57 | fi 58 | if [ ! "$src_dir" = "" ] 59 | then 60 | pr_msg_usage_exit "more than one src dir given" 1 61 | fi 62 | src_dir=$1 63 | build_dir=$(realpath $2) 64 | shift 2 65 | ;; 66 | esac 67 | done 68 | 69 | bindir=$(dirname "$0") 70 | 71 | if [ "$src_dir" = "" ] 72 | then 73 | pr_msg_usage_exit "src dir not given" 1 74 | fi 75 | 76 | sudo apt install -y build-essential libssl-dev bc bison flex libelf-dev 77 | 78 | orig_config=$build_dir/.config 79 | 80 | if [ ! -d "$build_dir" ] 81 | then 82 | mkdir "$build_dir" 83 | fi 84 | 85 | if [ ! -f "$orig_config" ] 86 | then 87 | cp "/boot/config-$(uname -r)" "$orig_config" 88 | sed -i 's/CONFIG_DEBUG_INFO_BTF=y/# CONFIG_DEBUG_INFO_BTF/' \ 89 | "$build_dir/.config" 90 | make -C "$src_dir" O="$build_dir" olddefconfig 91 | make -C "$src_dir" O="$build_dir" localmodconfig 92 | fi 93 | 94 | if [ ! "$additional_config" = "" ] 95 | then 96 | cat "$additional_config" >> "$build_dir/.config" 97 | fi 98 | 99 | make -C "$src_dir" O="$build_dir" olddefconfig 100 | make -C "$src_dir" O="$build_dir" -j$(nproc) 101 | 102 | if [ "$do_install" = "true" ] 103 | then 104 | sudo make -C "$src_dir" O="$build_dir" modules_install install 105 | kernelversion=$(make -C "$src_dir" O="$build_dir" -s kernelrelease) 106 | sudo "$bindir/set_kernel.py" "$kernelversion" 107 | fi 108 | 109 | if [ "$do_reboot" = "true" ] 110 | then 111 | echo "reboot now" 112 | sudo shutdown -r now 113 | fi 114 | -------------------------------------------------------------------------------- /scripts/kernel_dev/build_install_perf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | if [ $# -ne 2 ] 6 | then 7 | echo "Usage: <linux dir> <perf build dir>" 8 | exit 1 9 | fi 10 | 11 | perf_file="/usr/bin/perf" 12 | 13 | if [ -f "$perf_file" ] 14 | then 15 | echo "$perf_file already exists" 16 | exit 1 17 | fi 18 | 19 | linux_dir=$1 20 | perf_build_dir=$2 21 | perf_dir="$linux_dir/tools/perf" 22 | 23 | if ! sudo apt install -y python-dev 24 | then 25 | sudo apt install -y python-dev-is-python3 26 | fi 27 | 28 | sudo apt install -y build-essential libdw-dev systemtap-sdt-dev libunwind-dev \ 29 | libslang2-dev libperl-dev libiberty-dev liblzma-dev \ 30 | libzstd-dev libcap-dev libnuma-dev libbabeltrace-ctf-dev \ 31 | libpfm4-dev libtraceevent-dev python3-setuptools pkg-config 32 | 33 | make -C "$perf_dir" O="$perf_build_dir" 34 | make -C "$perf_dir" O="$perf_build_dir" install 35 | sudo ln -s "$HOME/bin/perf" "$perf_file" 36 | -------------------------------------------------------------------------------- /scripts/kernel_dev/ls_kernels.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import datetime 5 | import sys 6 | import os 7 | 8 | GRUBCFG_PATH = "/boot/grub/grub.cfg" 9 | 10 | GRUB = "grub" 11 | CUBOX = "cubox" 12 | RASP2 = "rasp2" 13 | 14 | def grub_kernels(): 15 | with open(GRUBCFG_PATH) as f: 16 | lines = f.read() 17 | 18 | kernels = [] 19 | for line in lines.split('\n'): 20 | tokens = line.split() 21 | if len(tokens) < 2: 22 | continue 23 | if tokens[0] == 'linux': 24 | kernel_position = tokens[1] 25 | kernel_name = kernel_position.lstrip('/boot/vmlinuz-') 26 | if not kernel_name in kernels: 27 | kernels.append(kernel_name) 28 | return kernels 29 | 30 | if __name__ == "__main__": 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument('bootloader', nargs='?', type=str, default='grub', 33 | choices=[GRUB, CUBOX, RASP2], metavar='bootloader', 34 | help='bootloader of the system') 35 | args = parser.parse_args() 36 | bootloader = args.bootloader 37 | 38 | if bootloader == GRUB: 39 | kernels = grub_kernels() 40 | print("\n".join(kernels)) 41 | elif bootloader == CUBOX: 42 | print(bootloader + " is not supported yet...") 43 | elif bootloader == RASP2: 44 | print(bootloader + " is not supported yet...") 45 | else: 46 | print("Not supported bootloader %s\n" % bootloader) 47 | -------------------------------------------------------------------------------- /scripts/kernel_dev/rm_kernels.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pr_usage() 4 | { 5 | echo "Usage: $0 [OPTION]... [target kernel version]..." 6 | echo 7 | echo "OPTION" 8 | echo " --except <old> <new> Leave <old> old and <new> new kernels" 9 | echo " --except_new <number> Leave <number> latest kernels" 10 | echo " --dry Make no change but notify what will do" 11 | echo " -h, --help Show this message" 12 | } 13 | 14 | pr_usage_exit() 15 | { 16 | exit_code=$1 17 | 18 | pr_usage 19 | exit "$exit_code" 20 | } 21 | 22 | if [ $# -lt 1 ]; 23 | then 24 | pr_usage_exit 1 25 | fi 26 | 27 | bindir=$(dirname "$0") 28 | # newest kernel comes first 29 | kernels=($("$bindir/ls_kernels.py")) 30 | 31 | kernels_to_remove=() 32 | except_old_nr=${#kernels[@]} 33 | except_new_nr=${#kernels[@]} 34 | dry_run="false" 35 | target_specified="false" 36 | 37 | while [ $# -ne 0 ] 38 | do 39 | case $1 in 40 | "--except") 41 | if [ $# -lt 3 ] 42 | then 43 | echo "<number> not given" 44 | pr_usage_exit 1 45 | fi 46 | except_old_nr=$2 47 | except_new_nr=$3 48 | target_specified="true" 49 | shift 3 50 | continue 51 | ;; 52 | "--dry") 53 | dry_run="true" 54 | shift 1 55 | continue 56 | ;; 57 | "--help" | "-h") 58 | pr_usage_exit 0 59 | ;; 60 | *) 61 | if [ "$except_old_nr" = "" ] && [ "$except_new_nr" = "" ] && 62 | [ $# -lt 1 ] 63 | then 64 | echo "<target kernel version> not given" 65 | pr_usage_exit 1 66 | fi 67 | kernels_to_remove=($@) 68 | target_specified="true" 69 | break 70 | ;; 71 | esac 72 | done 73 | 74 | if [ ! "$target_specified" = "true" ] 75 | then 76 | echo "Target kernels to remove are not specified" 77 | pr_usage_exit 1 78 | fi 79 | 80 | current_kernel=$(uname -r) 81 | rm_start=$except_new_nr 82 | rm_end=$((${#kernels[@]} - except_old_nr)) 83 | 84 | for ((i = 0 ; i < ${#kernels[@]} ; i++)) 85 | do 86 | if [ $i -lt $rm_start ] || [ $i -ge $rm_end ] 87 | then 88 | continue 89 | fi 90 | if [ "${kernels[$i]}" = "$current_kernel" ] 91 | then 92 | continue 93 | fi 94 | kernels_to_remove+=("${kernels[$i]}") 95 | done 96 | 97 | for ((i = 0 ; i < ${#kernels_to_remove[@]} ; i++)) 98 | do 99 | if [ "${kernels_to_remove[$i]}" = "$current_kernel" ] 100 | then 101 | unset 'kernels_to_remove[i]' 102 | fi 103 | done 104 | 105 | if [ "$EUID" -ne 0 ] && [ "$dry_run" = "false" ] 106 | then 107 | echo "run as root, please" 108 | exit 1 109 | fi 110 | 111 | for ver in "${kernels_to_remove[@]}" 112 | do 113 | if [ "$dry_run" = "true" ] 114 | then 115 | echo "Remove $ver" 116 | continue 117 | fi 118 | if [ ! -e "/boot/vmlinuz-$ver" ] 119 | then 120 | echo "vmlinuz-$ver not found" 121 | continue 122 | fi 123 | 124 | rm "/boot/vmlinuz-$ver" 125 | rm "/boot/initrd.img-$ver" 126 | rm "/boot/System.map-$ver" 127 | rm "/boot/config-$ver" 128 | rm -fr "/lib/modules/$ver" 129 | rm "/var/lib/initramfs-tools/$ver" 130 | done 131 | if [ "$dry_run" = "true" ] 132 | then 133 | exit 0 134 | fi 135 | 136 | update-grub2 137 | -------------------------------------------------------------------------------- /scripts/kernel_dev/stats/active_maintainers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -lt 1 ] 4 | then 5 | echo "Usage: $0 <linux repo> [activeness threshold in days]" 6 | exit 1 7 | fi 8 | 9 | linux_repo=$1 10 | 11 | if [ $# -ge 2 ] 12 | then 13 | active_thres_days=$2 14 | else 15 | active_thres_days=180 16 | fi 17 | 18 | maintainers=$(git -C "$linux_repo" show HEAD:MAINTAINERS 2> /dev/null | \ 19 | grep '^M:' | sort | uniq | awk -F'^M:\t' '{print $2}') 20 | nr_maintainers=$(echo "$maintainers" | wc -l) 21 | 22 | since_date=$(date --date="-$active_thres_days day") 23 | nr_active_maintainers=0 24 | while IFS= read -r author 25 | do 26 | email=$(echo "$author" | awk '{print $NF}') 27 | nr_recent_commits=$(git -C "$linux_repo" \ 28 | log --pretty=%h --since "$since_date" \ 29 | --author "$email" -1 | wc -l) 30 | if [ "$nr_recent_commits" -eq 1 ] 31 | then 32 | echo "$author": active 33 | nr_active_maintainers=$((nr_active_maintainers + 1)) 34 | else 35 | echo "$author": inactive 36 | fi 37 | done <<< "$maintainers" 38 | 39 | echo "$nr_maintainers $nr_active_maintainers" 40 | -------------------------------------------------------------------------------- /scripts/kernel_dev/stats/nr_maintainers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 <linux repo>" 6 | exit 1 7 | fi 8 | 9 | linux_repo=$1 10 | 11 | echo "version total new" 12 | 13 | prev_nr_maintainers=0 14 | for major in 2.6 3 4 5 6 15 | do 16 | for minor in {0..40} 17 | do 18 | version="v$major.$minor" 19 | nr_maintainers=$(git -C "$linux_repo" \ 20 | show "$version":MAINTAINERS 2> /dev/null | \ 21 | grep '^M:' | sort | uniq | wc -l) 22 | if [ "$nr_maintainers" = "0" ] 23 | then 24 | continue 25 | fi 26 | 27 | if [ "$prev_nr_maintainers" -eq 0 ] 28 | then 29 | new=0 30 | else 31 | new=$((nr_maintainers - prev_nr_maintainers)) 32 | fi 33 | 34 | echo "$version $nr_maintainers $new" 35 | prev_nr_maintainers=$nr_maintainers 36 | done 37 | done 38 | -------------------------------------------------------------------------------- /scripts/memcg_mspike.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Consume entire memory + <sz spike> periodically 4 | 5 | if [ $# -ne 2 ] 6 | then 7 | echo "Usage: $0 <spike size (MiB)> <interval (seconds)>" 8 | exit 1 9 | fi 10 | 11 | if [ ! -f "$HOME/memwalk/memwalk" ] 12 | then 13 | echo "Install memwalk at $HOME/memwalk/memwalk first." 14 | echo "You can clone it from https://github.com/sjp38/memwalk" 15 | exit 1 16 | fi 17 | 18 | BINDIR=$(dirname "$0") 19 | cd $BINDIR 20 | 21 | MWALK=$HOME/memwalk/memwalk 22 | 23 | SZ_SPIKE=$(($1 * 1024 * 1024)) 24 | INTERVAL=$2 25 | 26 | MEMCG=$(grep memory /proc/$$/cgroup | awk -F: '{print $3}') 27 | MEMCG=/sys/fs/cgroup/memory/$MEMCG 28 | MEMLIM=$(cat "$MEMCG"/memory.limit_in_bytes) 29 | 30 | while :; 31 | do 32 | sleep "$INTERVAL" 33 | MEM_IN_USE=$(cat "$MEMCG"/memory.usage_in_bytes) 34 | TO_CONSUME=$((MEMLIM - MEM_IN_USE + SZ_SPIKE)) 35 | echo "consume $TO_CONSUME bytes" 36 | $MWALK $TO_CONSUME 64 0 -q 37 | done 38 | -------------------------------------------------------------------------------- /scripts/memfp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Print out memory footprint of a command in 1 second interval. 4 | 5 | if [ $# -ne 1 ] 6 | then 7 | echo "Usage: $0 <command name>" 8 | exit 1 9 | fi 10 | 11 | BINDIR=$(dirname "$0") 12 | cd "$BINDIR" || exit 13 | 14 | COMM=$1 15 | PID=$(pgrep -f "$COMM" | head -n 1 | awk '{print $1}') 16 | 17 | echo "vsz rss pid cmd" 18 | while true; 19 | do 20 | PIDS=$(./subprocs.py "$PID") 21 | for P in $PIDS 22 | do 23 | ps -o vsz=,rss=,pid=,cmd= --pid "$P" 24 | done 25 | sleep 1 26 | done 27 | -------------------------------------------------------------------------------- /scripts/mysql/backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 <backup location>" 6 | exit 1 7 | fi 8 | 9 | BACKUP=$1 10 | MYSQLDATA=/usr/local/mysql/data 11 | 12 | sudo cp -R $MYSQLDATA "$BACKUP" 13 | sync 14 | -------------------------------------------------------------------------------- /scripts/mysql/kill_mysqld.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | 5 | pushd "$BINDIR" > /dev/null 6 | 7 | sudo -u mysql kill -SIGTERM "$(pidof mysqld)" 8 | 9 | ../wait_workof.sh mysqld 10 | 11 | popd > /dev/null 12 | -------------------------------------------------------------------------------- /scripts/mysql/reset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pushd /usr/local/mysql 4 | 5 | sudo rm -fr data 6 | sudo mkdir data 7 | sudo chown -R mysql data 8 | sudo -u mysql ./scripts/mysql_install_db --user=mysql 9 | 10 | popd 11 | -------------------------------------------------------------------------------- /scripts/mysql/restore.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 <backup location>" 6 | exit 1 7 | fi 8 | 9 | BACKUP=$1 10 | MYSQLDATA=/usr/local/mysql/data 11 | 12 | sudo rm -fr $MYSQLDATA 13 | sudo cp -R "$BACKUP" "$MYSQLDATA" 14 | sudo chown -R mysql $MYSQLDATA 15 | sync 16 | -------------------------------------------------------------------------------- /scripts/mysql/start_mysqld.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | 5 | pushd "$BINDIR" > /dev/null 6 | 7 | sudo -u mysql /usr/local/mysql/bin/mysqld_safe --user=mysql & 8 | 9 | ../wait_workof.sh mysqld 10 | 11 | popd > /dev/null 12 | -------------------------------------------------------------------------------- /scripts/nr_thrs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 <pid>" 6 | exit 1 7 | fi 8 | 9 | pid=$1 10 | 11 | grep "^Threads" /proc/"$pid"/status | awk '{print $2}' 12 | -------------------------------------------------------------------------------- /scripts/perf/.gitignore: -------------------------------------------------------------------------------- 1 | plot.pdf 2 | -------------------------------------------------------------------------------- /scripts/perf/lat_trace.py: -------------------------------------------------------------------------------- 1 | # perf script event handlers, generated by perf script -g python 2 | # Licensed under the terms of the GNU GPL License version 2 3 | 4 | # The common_* event handler fields are the most useful fields common to 5 | # all events. They don't necessarily correspond to the 'common_*' fields 6 | # in the format files. Those fields not available as handler params can 7 | # be retrieved using Python functions of the form common_*(context). 8 | # See the perf-trace-python Documentation for the list of available functions. 9 | 10 | import os 11 | import sys 12 | 13 | import lbperfutil 14 | 15 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ 16 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') 17 | 18 | from perf_trace_context import * 19 | from Core import * 20 | 21 | import getopt 22 | 23 | USAGE = sys.argv[0] + "<orders>" 24 | 25 | target_orders = [] 26 | 27 | if len(sys.argv) > 1: 28 | target_orders = [int(o) for o in sys.argv[1].split(',')] 29 | 30 | 31 | def trace_begin(): 32 | pass 33 | 34 | def trace_end(): 35 | secs = sorted(latencies.keys()) 36 | for s in secs: 37 | for l in latencies[s]: 38 | print "%d %d" % (s, l) 39 | 40 | start_events = autodict() 41 | latencies = autodict() 42 | 43 | def sj__alloc_nodemask(event_name, context, common_cpu, 44 | common_secs, common_nsecs, common_pid, common_comm, 45 | common_callchain, __probe_ip, order): 46 | if target_orders and order not in target_orders: 47 | return 48 | 49 | try: 50 | start_events[common_cpu][order] += [common_secs * 10**9 + common_nsecs] 51 | except TypeError: 52 | start_events[common_cpu][order] = [common_secs * 10**9 + common_nsecs] 53 | 54 | def sj__alloc_nodemask_ret(event_name, context, common_cpu, 55 | common_secs, common_nsecs, common_pid, common_comm, 56 | common_callchain, __probe_ip, order): 57 | if target_orders and order not in target_orders: 58 | return 59 | 60 | start = start_events[common_cpu][order][0] 61 | try: 62 | start_events[common_cpu][order] = start_events[common_cpu][order][1:] 63 | except TypeError: 64 | return 65 | endtime = common_secs * 10**9 + common_nsecs 66 | try: 67 | latencies[start] += [endtime - start] 68 | except TypeError: 69 | latencies[start] = [endtime - start] 70 | -------------------------------------------------------------------------------- /scripts/perf/lat_trace_cdf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | 5 | precision = 100 6 | if len(sys.argv) > 1: 7 | precision = int(sys.argv[1]) 8 | 9 | data = [] 10 | for line in sys.stdin: 11 | data.append(int(line.split()[1])) 12 | 13 | if len(data) == 0: 14 | exit(0) 15 | 16 | data = sorted(data) 17 | 18 | for i in range(precision): 19 | idx = len(data) / precision * i 20 | if idx == len(data): 21 | break 22 | print("%d %.3f" % (data[idx], 100.0 / precision * i)) 23 | 24 | print("%d %d" % (data[-1], 100)) 25 | -------------------------------------------------------------------------------- /scripts/perf/lat_trace_stat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | 5 | data = [] 6 | for line in sys.stdin: 7 | data.append(int(line.split()[1])) 8 | 9 | average = -1 10 | min_ = -1 11 | max_ = -1 12 | if len(data) > 0: 13 | average = sum(data) / float(len(data)) 14 | min_ = min(data) 15 | max_ = max(data) 16 | print("avg, min, max, count: %.3f, %d, %d, %d" % ( 17 | average, min_, max_, len(data))) 18 | -------------------------------------------------------------------------------- /scripts/perf/lbperfutil.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import sys 4 | 5 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ 6 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') 7 | 8 | from perf_trace_context import * 9 | from Core import * 10 | 11 | 12 | ev_per_time = autodict() 13 | 14 | def pr_evcnts_in_time(evnames=[]): 15 | if not evnames: 16 | evnames = sorted(ev_per_time.keys()) 17 | 18 | title = " time" 19 | if len(evnames) == 0: 20 | return 21 | for n in evnames: 22 | title += ", %s" % n 23 | print title 24 | 25 | secs = sorted(ev_per_time[n]) 26 | for s in secs: 27 | line = "%6s" % (s - secs[0]) 28 | for n in evnames: 29 | count = 0 30 | if s in ev_per_time[n]: 31 | count = ev_per_time[n][s] 32 | line += ",%13s" % count 33 | print line 34 | 35 | def nr_total_event(event): 36 | ret = 0 37 | for s in ev_per_time[event]: 38 | ret += ev_per_time[event][s] 39 | return ret 40 | 41 | def event_names(): 42 | return ev_per_time.keys() 43 | 44 | def count_event(name, time, count): 45 | try: 46 | ev_per_time[name][time] += count 47 | except TypeError: 48 | ev_per_time[name][time] = count 49 | 50 | 51 | start_events = autodict() 52 | latencies = autodict() 53 | def latency_start(id_, time): 54 | try: 55 | start_events[id_] += [time] 56 | except TypeError: 57 | start_events[id_] = [time] 58 | 59 | def latency_end(id_, time): 60 | start = start_events[id_][0] 61 | try: 62 | start_events[id_] = start_events[id_][1:] 63 | except TypeError: 64 | return 65 | endtime = time 66 | try: 67 | latencies[start] += [endtime - start] 68 | except TypeError: 69 | latencies[start] = [endtime - start] 70 | -------------------------------------------------------------------------------- /scripts/perf/nr_evt.py: -------------------------------------------------------------------------------- 1 | # perf script event handlers, generated by perf script -g python 2 | # Licensed under the terms of the GNU GPL License version 2 3 | 4 | # The common_* event handler fields are the most useful fields common to 5 | # all events. They don't necessarily correspond to the 'common_*' fields 6 | # in the format files. Those fields not available as handler params can 7 | # be retrieved using Python functions of the form common_*(context). 8 | # See the perf-trace-python Documentation for the list of available functions. 9 | 10 | import os 11 | import sys 12 | import lbperfutil 13 | 14 | def trace_begin(): 15 | pass 16 | 17 | def trace_end(): 18 | print "Every Events per Second" 19 | print "=======================" 20 | print "" 21 | lbperfutil.pr_evcnts_in_time() 22 | 23 | """ 24 | print "\n" 25 | print "Event per second" 26 | print "================" 27 | print "" 28 | for ev in sorted(lbperfutil.event_names()): 29 | lbperfutil.pr_evcnts_in_time([ev]) 30 | print "" 31 | """ 32 | 33 | print "\n" 34 | print "Total Events Count" 35 | print "==================" 36 | print "" 37 | for ev in sorted(lbperfutil.event_names()): 38 | print ev, ": ", lbperfutil.nr_total_event(ev) 39 | 40 | # pd is for parameters dict 41 | # keys of pd: attr, symbol, sample, dso, comm, ev_name, raw_buf, callchain 42 | # keys of pd['sample']: ip, pid, period, time, tid, cpu 43 | def process_event(pd): 44 | name = pd["ev_name"] 45 | count = pd["sample"]["period"] 46 | t = pd["sample"]["time"] / (1000*1000*1000) 47 | lbperfutil.count_event(name, t, count) 48 | 49 | #def trace_unhandled(event_name, context, event_fields_dict): 50 | # print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]) 51 | 52 | #def print_header(event_name, cpu, secs, nsecs, pid, comm): 53 | # print "%-20s %5u %05u.%09u %8u %-20s " % \ 54 | # (event_name, cpu, secs, nsecs, pid, comm), 55 | -------------------------------------------------------------------------------- /scripts/perf/nr_trace.py: -------------------------------------------------------------------------------- 1 | # perf script event handlers, generated by perf script -g python 2 | # Licensed under the terms of the GNU GPL License version 2 3 | 4 | # The common_* event handler fields are the most useful fields common to 5 | # all events. They don't necessarily correspond to the 'common_*' fields 6 | # in the format files. Those fields not available as handler params can 7 | # be retrieved using Python functions of the form common_*(context). 8 | # See the perf-trace-python Documentation for the list of available functions. 9 | 10 | import os 11 | import sys 12 | 13 | import lbperfutil 14 | 15 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ 16 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') 17 | 18 | from perf_trace_context import * 19 | from Core import * 20 | 21 | 22 | def trace_begin(): 23 | pass 24 | 25 | def trace_end(): 26 | lbperfutil.pr_evcnts_in_time() 27 | 28 | # Add callback for your tracepoint as below. This script assumes the perf.data 29 | # was recorded with perf probe, sj:alloc_nodemask and sj:alloc_nodemask_ret, 30 | # which is added by command below: 31 | # ``` 32 | # $ sudo perf probe --add 'sj:alloc_nodemask=__alloc_pages_nodemask order' 33 | # $ sudo perf probe --add 'sj:alloc_nodemask_ret=__alloc_pages_nodemask:105 order' 34 | # ``` 35 | # 36 | # Please note that the line of sj:alloc_nodemask_ret can be irrelevant for 37 | # specific kernel version. 38 | 39 | def sj__alloc_nodemask(event_name, context, common_cpu, 40 | common_secs, common_nsecs, common_pid, common_comm, 41 | common_callchain, __probe_ip, order): 42 | lbperfutil.count_event(event_name + "%s" % order, common_secs, 1) 43 | lbperfutil.count_event(event_name, common_secs, 1) 44 | 45 | def sj__alloc_nodemask_ret(event_name, context, common_cpu, 46 | common_secs, common_nsecs, common_pid, common_comm, 47 | common_callchain, __probe_ip, order): 48 | lbperfutil.count_event(event_name + "%s" % order, common_secs, 1) 49 | lbperfutil.count_event(event_name, common_secs, 1) 50 | -------------------------------------------------------------------------------- /scripts/perf/pick_field.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import sys 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument('field', metavar='<field>', help='field you want to pick') 8 | args = parser.parse_args() 9 | 10 | wanted = args.field 11 | 12 | # example input line is output of `$ perf script`. It may looks as below. 13 | # command, pid, tid, timestamp, tracepoint name, and trace 14 | # 15 | # memwalk 3837 [012] 383.632199: kmem:mm_page_alloc: \ 16 | # page=0x2f95b76 pfn=49896310 order=0 migratetype=0 \ 17 | # gfp_flags=GFP_NOWAIT|__GFP_NOWARN 18 | 19 | for line in sys.stdin: 20 | tokens = line.split() 21 | try: 22 | time = float(tokens[3][:-1]) * 1000 * 1000 * 1000 # nano second level 23 | except ValueError: 24 | # ignore 25 | continue 26 | for fields in tokens[5:]: 27 | key, value = fields.split('=') 28 | if key == wanted: 29 | print("%d %s" % (int(time), value)) 30 | -------------------------------------------------------------------------------- /scripts/perf/plot_swptrace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | 5 | MIDFILE=perf-scr.out 6 | MAX_SAMPLES=1000 7 | 8 | perf script > $MIDFILE 9 | NR_SAMPLES=$(wc -l < "$MIDFILE") 10 | if [ "$NR_SAMPLES" -gt "$MAX_SAMPLES" ] 11 | then 12 | echo "Too big ($NR_SAMPLES) data. Shrink to $MAX_SAMPLES data" 13 | "$BINDIR"/sample.py $(("$NR_SAMPLES" / "$MAX_SAMPLES")) < $MIDFILE \ 14 | > $MIDFILE.tmp 15 | mv $MIDFILE.tmp $MIDFILE 16 | fi 17 | 18 | parse_perfout() { 19 | echo "Swpin" 20 | grep swpin "$MIDFILE" | sed -e 's/://' | sed -e 's/page=//' | 21 | awk '{print $4 " " $7}' 22 | echo 23 | echo 24 | echo "swpout" 25 | grep swpout "$MIDFILE" | sed -e 's/://' | sed -e 's/page=//' | 26 | awk '{print $4 " " $7}' 27 | } 28 | 29 | parse_perfout | 30 | "$BINDIR"/../../gnuplot/plot_stdin.sh scatter-dot \ 31 | "Time (seconds)" "Page Frame" 32 | -------------------------------------------------------------------------------- /scripts/perf/pr_evt.py: -------------------------------------------------------------------------------- 1 | # perf script event handlers, generated by perf script -g python 2 | # Licensed under the terms of the GNU GPL License version 2 3 | 4 | # The common_* event handler fields are the most useful fields common to 5 | # all events. They don't necessarily correspond to the 'common_*' fields 6 | # in the format files. Those fields not available as handler params can 7 | # be retrieved using Python functions of the form common_*(context). 8 | # See the perf-trace-python Documentation for the list of available functions. 9 | 10 | import os 11 | import sys 12 | 13 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ 14 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') 15 | 16 | from perf_trace_context import * 17 | from Core import * 18 | 19 | 20 | def trace_begin(): 21 | print "in trace_begin" 22 | 23 | def trace_end(): 24 | print "in trace_end" 25 | 26 | nr_prt = 0 27 | def process_event(pd): 28 | global nr_prt 29 | nr_prt += 1 30 | if (nr_prt > 30): 31 | return 32 | for k in pd.keys(): 33 | print k, ": ", pd[k] 34 | print "" 35 | 36 | def trace_unhandled(event_name, context, event_fields_dict): 37 | print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]) 38 | 39 | def print_header(event_name, cpu, secs, nsecs, pid, comm): 40 | print "%-20s %5u %05u.%09u %8u %-20s " % \ 41 | (event_name, cpu, secs, nsecs, pid, comm), 42 | -------------------------------------------------------------------------------- /scripts/perf/sample.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import sys 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument('interval', metavar='<interval>', type=int, 8 | help='interval between lines') 9 | args = parser.parse_args() 10 | interval = args.interval 11 | 12 | i = 0 13 | for l in sys.stdin: 14 | if i % interval == 0: 15 | print(l.strip()) 16 | i += 1 17 | -------------------------------------------------------------------------------- /scripts/perf/stat_field.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import sys 5 | 6 | # example input line is output of `$ perf script`. It may looks as below. 7 | # command, pid, tid, timestamp, tracepoint name, and trace 8 | # 9 | # memwalk 3837 [012] 383.632199: kmem:mm_page_alloc: \ 10 | # page=0x2f95b76 pfn=49896310 order=0 migratetype=0 \ 11 | # gfp_flags=GFP_NOWAIT|__GFP_NOWARN 12 | 13 | parser = arg.parse.ArgumentParser() 14 | parser.add_argument('field_name', metavar='<filed name>', 15 | help='name of the field') 16 | args = parser.parse_args() 17 | wanted = args.field_name 18 | 19 | data = [] 20 | for line in sys.stdin: 21 | tokens = line.split() 22 | try: 23 | time = float(tokens[3][:-1]) * 1000 * 1000 * 1000 # nano second level 24 | except ValueError: 25 | # ignore 26 | continue 27 | for fields in tokens[5:]: 28 | key, value = fields.split('=') 29 | if key == wanted: 30 | data.append(int(value)) 31 | 32 | average = -1 33 | min_ = -1 34 | max_ = -1 35 | if len(data) > 0: 36 | average = sum(data) / float(len(data)) 37 | min_ = min(data) 38 | max_ = max(data) 39 | print("avg: %.3f\nmin: %d\nmax: %d\ncount: %d" % ( 40 | average, min_, max_, len(data))) 41 | -------------------------------------------------------------------------------- /scripts/perf/trace_swpio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if ! perf probe --list | grep lb:swpout > /dev/null 4 | then 5 | echo "Add swpout probe" 6 | perf probe --add lb:swpout='__swap_writepage page' &> /dev/null 7 | fi 8 | if ! perf probe --list | grep lb:swpin > /dev/null 9 | then 10 | echo "Add swpin probe" 11 | perf probe --add lb:swpin='swap_readpage page' &> /dev/null 12 | fi 13 | 14 | echo "Start tracing" 15 | if [ $# -eq 1 ] 16 | then 17 | additionaloption=$1 18 | fi 19 | perf record -e lb:swpin -e lb:swpout "$additionaloption" 20 | -------------------------------------------------------------------------------- /scripts/ply/.gitignore: -------------------------------------------------------------------------------- 1 | ply 2 | -------------------------------------------------------------------------------- /scripts/ply/README: -------------------------------------------------------------------------------- 1 | This directory contains scripts using ply[1]. 2 | 3 | 4 | Pre-requisites 5 | -------------- 6 | 7 | To use the scripts, 'ply' should be installed on the machine. Also, the kernel 8 | should be configured to support the 'ply'. 9 | 10 | 11 | [1] https://wkz.github.io/ply/ 12 | -------------------------------------------------------------------------------- /scripts/ply/TODO: -------------------------------------------------------------------------------- 1 | - More clean output format 2 | - Interval and counts 3 | -------------------------------------------------------------------------------- /scripts/ply/callstack_kprobe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Print callstacks and number of fires of a kprobe point 4 | 5 | if [ $# -eq 0 ] 6 | then 7 | echo "Usage: $0 <kernel function name>..." 8 | exit 1 9 | fi 10 | 11 | plycmd="" 12 | for fn in "${@:1}" 13 | do 14 | plycmd+="kprobe:$fn {@[stack] = count();} " 15 | done 16 | 17 | cmd="sudo ply '$plycmd'" 18 | eval "$cmd" 19 | -------------------------------------------------------------------------------- /scripts/ply/fetch-ply.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install ply 4 | 5 | BINDIR=$(dirname "$0") 6 | pushd "$BINDIR" > /dev/null 7 | 8 | git clone https://github.com/iovisor/ply.git 9 | cd ply 10 | git checkout 2.1.1 11 | -------------------------------------------------------------------------------- /scripts/ply/latency_kprobe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Print latencies of a process context function having kprobe and kretprobe 4 | 5 | if [ $# -eq 0 ] 6 | then 7 | echo "Usage: $0 <function name> ..." 8 | exit 1 9 | fi 10 | 11 | plycmd="" 12 | for fn in "${@:1}" 13 | do 14 | plycmd+="kprobe:$fn {start[kpid] = time; callers[kpid] = caller;}" 15 | plycmd+=" 16 | kretprobe:$fn / start[kpid] / { 17 | @latencies[callers[kpid]] = quantize(time - start[kpid]); 18 | delete start[kpid]; 19 | delete callers[kpid]; 20 | } " 21 | done 22 | 23 | cmd="sudo ply '$plycmd'" 24 | eval "$cmd" 25 | -------------------------------------------------------------------------------- /scripts/ply/nr_calls_kprobe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Print number of executions of a function 4 | 5 | if [ $# -eq 0 ] 6 | then 7 | echo "Usage: $0 <function name> ..." 8 | exit 1 9 | fi 10 | 11 | plycmd="" 12 | for fn in "${@:1}" 13 | do 14 | plycmd+="kprobe:$fn {@[caller] = count();} " 15 | done 16 | 17 | cmd="sudo ply '$plycmd'" 18 | eval "$cmd" 19 | -------------------------------------------------------------------------------- /scripts/ply/xtime_kprobe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Print per-process execution time of a process context function having kprobe 4 | # and kretprobe 5 | 6 | if [ $# -eq 0 ] 7 | then 8 | echo "Usage: $0 <function name> ..." 9 | exit 1 10 | fi 11 | 12 | plycmd="" 13 | for fn in "${@:1}" 14 | do 15 | plycmd+="kprobe:$fn {start[kpid] = time;}" 16 | plycmd+=" 17 | kretprobe:$fn / start[kpid] / { 18 | latency[kpid] = time - start[kpid]; 19 | if ($fn[kpid]) 20 | $fn[kpid] = $fn[kpid] + latency[kpid]; 21 | else 22 | $fn[kpid] = latency[kpid]; 23 | delete latency[kpid]; 24 | delete start[kpid]; 25 | } " 26 | done 27 | 28 | cmd="sudo ply '$plycmd'" 29 | eval "$cmd" 30 | -------------------------------------------------------------------------------- /scripts/repeat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 3 ] 4 | then 5 | echo "Usage: $0 <delay> <count> <command>" 6 | exit 1 7 | fi 8 | 9 | delay=$1 10 | count=$2 11 | cmd=$3 12 | 13 | iter=0 14 | 15 | while [ $iter -lt "$count" ] || [ "$count" -eq -1 ] 16 | do 17 | $cmd 18 | sleep "$delay" 19 | iter=$((iter + 1)) 20 | done 21 | -------------------------------------------------------------------------------- /scripts/report/fmt_tbl.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Format table in easy-to-read format 5 | """ 6 | 7 | import argparse 8 | import sys 9 | 10 | def fmt_pr_tbl(rows, field_lengths): 11 | seperator = ' ' * nr_min_spaces 12 | for row in rows: 13 | formatted_fields = [] 14 | for idx, field in enumerate(row): 15 | field_len = field_lengths[idx] 16 | if idx < len(row) - 1: 17 | spaces = ' ' * (field_len - len(field)) 18 | else: 19 | spaces = '' 20 | formatted_fields.append('%s%s' % (field, spaces)) 21 | 22 | print(seperator.join(formatted_fields)) 23 | 24 | def fmt_tbl(lines): 25 | rows = [] 26 | field_lengths = [] 27 | 28 | for line in lines: 29 | line = line.strip() 30 | if line == '': 31 | fmt_pr_tbl(rows, field_lengths) 32 | print('') 33 | rows = [] 34 | field_lengths = [] 35 | continue 36 | if line.startswith('#'): 37 | continue 38 | fields = line.split() 39 | rows.append(fields) 40 | if field_lengths and len(field_lengths) != len(fields): 41 | print('Wrong table input: %s' % line) 42 | exit(1) 43 | for idx, field in enumerate(fields): 44 | if len(field_lengths) <= idx: 45 | field_lengths.append(len(field)) 46 | continue 47 | if len(field) > field_lengths[idx]: 48 | field_lengths[idx] = len(field) 49 | fmt_pr_tbl(rows, field_lengths) 50 | 51 | def main(): 52 | global nr_min_spaces 53 | 54 | parser = argparse.ArgumentParser() 55 | parser.add_argument('file', metavar='<file>', nargs='?', 56 | help='input file') 57 | parser.add_argument('--example', action='store_true', help='show example') 58 | parser.add_argument('--spaces', type=int, default=1, 59 | help='minimum number of spaces between fields') 60 | 61 | args = parser.parse_args() 62 | 63 | nr_min_spaces = args.spaces 64 | 65 | if args.example: 66 | test(args.spaces) 67 | return 68 | 69 | if args.file: 70 | with open(args.file, 'r') as f: 71 | lines = f.read().split('\n') 72 | else: 73 | lines = sys.stdin 74 | fmt_tbl(lines) 75 | 76 | def test(): 77 | test_input = """ 78 | 1 2 3 79 | 4 5 6 80 | 7 8 9 81 | abc de 10 82 | 83 | 84 | 12 345 89 85 | # comment 86 | 1 2 3""" 87 | fmt_tbl(test_input.split('\n')) 88 | 89 | if __name__ == '__main__': 90 | main() 91 | -------------------------------------------------------------------------------- /scripts/report/memfree_to_used.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument('file', metavar='<memfree file>', 7 | help='file containing memfree data') 8 | args = parser.parse_args() 9 | filepath = args.file 10 | 11 | free_values = [] 12 | with open(filepath, 'r') as f: 13 | for line in f: 14 | free_values.append(int(line.split()[1])) 15 | 16 | initfree = free_values[0] 17 | for idx, v in enumerate(free_values): 18 | print(idx, initfree - v) 19 | -------------------------------------------------------------------------------- /scripts/report/paths.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -lt 3 ] 4 | then 5 | echo "Usage: $0 <prefix> <suffix> <middle [middle...]>" 6 | exit 1 7 | fi 8 | 9 | prefix=$1 10 | suffix=$2 11 | shift 12 | shift 13 | while test $# -gt 0 14 | do 15 | middles="$middles $1" 16 | shift 17 | done 18 | 19 | for m in $middles 20 | do 21 | paths="$paths $prefix$m$suffix" 22 | done 23 | 24 | echo "$paths" 25 | -------------------------------------------------------------------------------- /scripts/report/pinatrace2record.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 <pinatrace output file path>" 6 | exit 1 7 | fi 8 | 9 | F=$1 10 | 11 | awk '{ 12 | if ($1 != "#eof" && $1 != "#" && NF > 3) { 13 | print NR " " $3 14 | } 15 | }' "$F" 16 | -------------------------------------------------------------------------------- /scripts/report/recs2tbl.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Convert records to table 5 | 6 | For example, converts 7 | 8 | name1 9 | key1 val11 10 | key2 val21 11 | 12 | 13 | name2 14 | key1 val12 15 | key2 val22 16 | 17 | to 18 | 19 | <...> name1 name2 20 | key1 val11 val12 21 | key2 val21 val22 22 | ... 23 | """ 24 | 25 | import sys 26 | 27 | def main(): 28 | rec_names = [] 29 | keys = [] 30 | recs = [] 31 | rec = {} 32 | rec_name = '' 33 | for line in sys.stdin: 34 | line = line.strip() 35 | if line == '': 36 | if rec: 37 | recs.append(rec) 38 | rec_names.append(rec_name) 39 | rec = {} 40 | rec_name = [] 41 | continue 42 | if not rec_name: 43 | rec_name = line 44 | continue 45 | key, val = line.split() 46 | if key[-1] == ':': 47 | key = key[:-1] 48 | if not key in keys: 49 | keys.append(key) 50 | rec[key] = val 51 | if rec: 52 | recs.append(rec) 53 | rec_names.append(rec_name) 54 | 55 | print('xxx\t%s' % '\t'.join(rec_names)) 56 | for key in keys: 57 | fields = [] 58 | for idx, rec in enumerate(recs): 59 | if idx == 0: 60 | fields.append(key) 61 | if key in rec: 62 | fields.append(rec[key]) 63 | else: 64 | fields.append('-') 65 | print('\t'.join(fields)) 66 | 67 | if __name__ == '__main__': 68 | main() 69 | -------------------------------------------------------------------------------- /scripts/report/recs_to_diff.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Convert record to diff from first value. For example, files with content 5 | ``` 6 | pswpin 123 7 | pswpin 124 8 | pswpin 126 9 | ``` 10 | 11 | Will be converted to 12 | ``` 13 | 0 0 14 | 1 1 15 | 2 2 16 | ``` 17 | """ 18 | 19 | import argparse 20 | 21 | parser = argparse.ArgumentParser() 22 | parser.add_argument('record_file', metavar='<file>', help='record file') 23 | args = parser.parse_args() 24 | 25 | filepath = args.record_file 26 | 27 | with open(filepath, 'r') as f: 28 | first_val = 0 29 | for idx, line in enumerate(f): 30 | if idx == 0: 31 | first_val = int(line.split()[1]) 32 | print(idx, int(line.split()[1]) - first_val) 33 | -------------------------------------------------------------------------------- /scripts/report/tbl2recs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Transfer table to records 5 | 6 | For example, converts 7 | 8 | <...> name1 name2 9 | key1 val11 val12 10 | key2 val21 val22 11 | ... 12 | 13 | to 14 | 15 | name1 16 | key1 val11 17 | key2 val21 18 | 19 | 20 | name2 21 | key1 val12 22 | key2 val22 23 | """ 24 | 25 | import sys 26 | 27 | def main(): 28 | names = None 29 | recs = [] 30 | keys = [] 31 | for line in sys.stdin: 32 | line = line.strip() 33 | if line.startswith('#'): 34 | continue 35 | if not names: 36 | names = line.split()[1:] 37 | continue 38 | for idx, field in enumerate(line.split()): 39 | if idx == 0: 40 | key = field 41 | keys.append(key) 42 | continue 43 | if len(recs) < idx: 44 | rec = {} 45 | recs.append(rec) 46 | else: 47 | rec = recs[idx - 1] 48 | rec[key] = field 49 | 50 | for idx, rec in enumerate(recs): 51 | print(names[idx]) 52 | for key in keys: 53 | print('%s\t%s' % (key, rec[key])) 54 | if idx != len(recs) - 1: 55 | print('\n') 56 | 57 | if __name__ == '__main__': 58 | main() 59 | -------------------------------------------------------------------------------- /scripts/report/yzoom.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import sys 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument('min', metavar='<min>', type=int, help='minimal y value') 8 | parser.add_argument('max', metavar='<max>', type=int, help='maximum y value') 9 | args = parser.parse_args() 10 | 11 | ymin = args.min 12 | ymax = args.max 13 | 14 | for l in sys.stdin: 15 | # We support hexadecimal with prefix '0x' 16 | yval = int(l.split()[1], 0) 17 | if yval > ymin and yval < ymax: 18 | print(l.strip()) 19 | -------------------------------------------------------------------------------- /scripts/run_memcg_lim.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Run a command with memory cgroup that has specific memory limit 4 | 5 | if [ $# -ne 2 ] 6 | then 7 | echo "Usage: $0 <mem limit in MiB> <command>" 8 | exit 1 9 | fi 10 | 11 | MEMLIM=$(($1 * 1024 * 1024)) 12 | COMM=$2 13 | 14 | MEMCG_ORIG_DIR=/sys/fs/cgroup/memory/ 15 | MEMCG_DIR=/sys/fs/cgroup/memory/run_memcg_lim_$USER 16 | sudo mkdir "$MEMCG_DIR" 17 | sudo bash -c "echo $$ > $MEMCG_DIR/tasks" 18 | sudo bash -c "echo $MEMLIM > $MEMCG_DIR/memory.limit_in_bytes" 19 | 20 | echo "COMM: $COMM" 21 | 22 | eval "$COMM" 23 | 24 | while read -r pid; do 25 | sudo bash -c "echo $pid > $MEMCG_ORIG_DIR/tasks" 26 | done < "$MEMCG_DIR"/tasks 27 | 28 | sudo rmdir "$MEMCG_DIR" 29 | -------------------------------------------------------------------------------- /scripts/run_memcg_lim_spike.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Run a command with memory cgroup that has specified memory limit and memory 4 | # spike stress 5 | 6 | if [ $# -ne 4 ] 7 | then 8 | echo "Usage: $0 <mem limit in MiB> <spike size in MiB> \\" 9 | echo " <spike interval (seconds)> <command>" 10 | exit 1 11 | fi 12 | 13 | BINDIR=$(dirname "$0") 14 | cd "$BINDIR" || (echo 'failed moving to bindir'; exit 1) 15 | 16 | MEMLIM=$(($1 * 1024 * 1024)) 17 | SZ_SPIKE=$2 18 | SPIKE_INTERVAL=$3 19 | COMM=$4 20 | 21 | MEMCG_ORIG_DIR=/sys/fs/cgroup/memory/ 22 | MEMCG_DIR=/sys/fs/cgroup/memory/run_memcg_lim_$USER 23 | sudo mkdir "$MEMCG_DIR" 24 | sudo bash -c "echo $$ > $MEMCG_DIR/tasks" 25 | sudo bash -c "echo $MEMLIM > $MEMCG_DIR/memory.limit_in_bytes" 26 | 27 | ./memcg_mspike.sh "$SZ_SPIKE" "$SPIKE_INTERVAL" & 28 | 29 | echo "COMM: $COMM" 30 | 31 | eval "$COMM" 32 | 33 | killall memcg_mspike.sh 34 | 35 | 36 | 37 | while read -r pid; do 38 | sudo bash -c "echo $pid > $MEMCG_ORIG_DIR/tasks" 39 | done < "$MEMCG_DIR"/tasks 40 | 41 | sudo rmdir "$MEMCG_DIR" 42 | -------------------------------------------------------------------------------- /scripts/subprocs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import argparse 4 | import os 5 | import sys 6 | 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument('pid', metavar='<pid>', type=int, help='process id') 9 | args = parser.parse_args() 10 | 11 | fpath = os.path.realpath(os.path.dirname(__file__)) 12 | sys.path.append(fpath + '/../') 13 | import exp 14 | 15 | pid = args.pid 16 | 17 | for pid in exp.childs_of(pid, False, print_tree=False): 18 | print(pid) 19 | -------------------------------------------------------------------------------- /scripts/turn_thp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SYSFS_THP_ENABLED=/sys/kernel/mm/transparent_hugepage/enabled 4 | 5 | if [ $# -ne 1 ]; 6 | then 7 | echo "Usage: $0 <always|madvise|never>" 8 | echo "" 9 | echo "Current status: $(cat $SYSFS_THP_ENABLED)" 10 | echo "" 11 | exit 1 12 | fi 13 | 14 | echo "$1" > $SYSFS_THP_ENABLED 15 | -------------------------------------------------------------------------------- /scripts/ufsi.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | description = "Unusable Free Space Index" 4 | epilog = """ 5 | This script calculates `unusable free space index`[0] which represents degree 6 | of fragmentation of system for given order of pages. The paper describes the 7 | value as below: 8 | 9 | Fu(j) = (TotalFree - sum([2**i * ki for i in range(j, n)])) / TotalFree 10 | 11 | `j` is the desired order of the desired allocation 12 | `TotalFree` is the number of free pages 13 | `n` is the largest order of allocation that can be satisfied 14 | `ki` is the number of free page blocks if order `i` 15 | 16 | System has no fragmentation if the value is 0, has more fragmentation as the 17 | value goes to 1. 18 | 19 | [0] Gorman, Mel, and Andy Whitcroft. "The what, the why and the where to of 20 | anti-fragmentation." Ottawa Linux Symposium. Vol. 1. 2006. 21 | """ 22 | 23 | import argparse 24 | import sys 25 | 26 | class Zone: 27 | name = None 28 | free_pages = [] 29 | 30 | def __init__(self, name, free_pages): 31 | self.name = name 32 | self.free_pages = free_pages 33 | 34 | def nr_free_pages(self): 35 | ret = 0 36 | for order, nr_pages in enumerate(self.free_pages): 37 | ret += 2**order * nr_pages 38 | return ret 39 | 40 | def nr_usable_pages(self, order): 41 | ret = 0 42 | for idx, nr_pages in enumerate(self.free_pages[order:]): 43 | ret += 2**(order + idx) * nr_pages 44 | return ret 45 | 46 | def hrsf(nr_bytes): 47 | "human readable size format" 48 | if nr_bytes > 2**30: 49 | nr_bytes = "%.2f GiB" % (nr_bytes / 2.0**30) 50 | elif nr_bytes > 2**20: 51 | nr_bytes = "%.2f MiB" % (nr_bytes / 2.0**20) 52 | elif nr_bytes > 2**10: 53 | nr_bytes = "%.2f KiB" % (nr_bytes / 2.0**10) 54 | else: 55 | nr_bytes = "%d B" % nr_bytes 56 | return nr_bytes 57 | 58 | def main(): 59 | parser = argparse.ArgumentParser(description=description, epilog=epilog, 60 | formatter_class=argparse.RawDescriptionHelpFormatter) 61 | parser.add_argument('order', type=int, metavar='<order>', 62 | help='order of desired contiguous pages') 63 | args = parser.parse_args() 64 | order = args.order 65 | 66 | with open('/proc/buddyinfo', 'r') as f: 67 | binfo = f.read() 68 | """ 69 | binfo is in below format: 70 | 71 | Node 0, zone DMA 1 1 0 0 2 1 1 72 | Node 0, zone DMA32 3986 3751 3348 2811 2044 1233 760 73 | Node 0, zone Normal 2380 928 1518 7668 12639 12078 11520 74 | Node 1, zone Normal 681 2489 1869 12689 23714 23179 22081 75 | """ 76 | 77 | zones = [] 78 | free_bdpages = [] 79 | for line in binfo.strip('\n').split('\n'): 80 | fields = line.split() 81 | zone = Zone(' '.join(fields[:4]), [int(x) for x in fields[4:]]) 82 | zones.append(zone) 83 | 84 | SZ_PAGE = 4096 85 | 86 | for zone in zones: 87 | usable = zone.nr_usable_pages(order) * SZ_PAGE 88 | total = zone.nr_free_pages() * SZ_PAGE 89 | ufsi = (total - usable) / total if total > 0 else 1.0 90 | print("%s: %f (total %s, usable %s)" % (zone.name, 91 | ufsi, hrsf(total), hrsf(usable))) 92 | 93 | usable = sum([z.nr_usable_pages(order) for z in zones]) * SZ_PAGE 94 | total = sum([z.nr_free_pages() for z in zones]) * SZ_PAGE 95 | ufsi = (total - usable) / total if total > 0 else 1.0 96 | print("Total: %f (total %s, usable %s)" % ( 97 | ufsi, hrsf(total), hrsf(usable))) 98 | 99 | if __name__ == '__main__': 100 | main() 101 | -------------------------------------------------------------------------------- /scripts/vmastat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import subprocess 5 | 6 | parser = argparse.ArgumentParser() 7 | parser.add_argument('--target', nargs='+', 8 | help='specify target pids or commands') 9 | parser.add_argument('--verbose', '-v', action='store_true', help='verbose output') 10 | args = parser.parse_args() 11 | target = args.target 12 | verbose = args.verbose 13 | 14 | res = subprocess.check_output("ps --no-headers -e -o pid,cmd".split()) 15 | res = res.decode('utf-8') 16 | procs = [] 17 | for l in res.split('\n'): 18 | fields = l.split() 19 | if len(fields) == 0: 20 | continue 21 | pid = fields[0] 22 | cmd = "" 23 | if len(fields) > 1: 24 | cmd = fields[1] 25 | if target and not pid in target and not cmd.split('/')[-1] in target: 26 | continue 27 | procs.append([pid, cmd]) 28 | 29 | nr_anon_vmas = 0 30 | nr_file_vmas = 0 31 | 32 | nr_vmas_map = {} 33 | for p in procs: 34 | pid = p[0] 35 | try: 36 | with open("/proc/%s/maps" % pid, 'r') as f: 37 | nr_vmas = 0 38 | for l in f: 39 | nr_vmas += 1 40 | fields = l.split() 41 | if len(fields) < 6: 42 | nr_anon_vmas += 1 43 | continue 44 | if not fields[5] in ['[stack]', '[vvar]', '[vdso]', 45 | '[vsyscall]']: 46 | nr_file_vmas += 1 47 | nr_vmas_map["%s (%s)" % (p[0], p[1])] = nr_vmas 48 | except: 49 | pass 50 | 51 | if verbose: 52 | print("proc\tnr_vmas") 53 | for p, n in sorted(nr_vmas_map.iteritems(), key=lambda k,v: (v,k)): 54 | print("%s\t%d" % (p, nr_vmas_map[p])) 55 | print('') 56 | 57 | nr_vmas_sorted = sorted(nr_vmas_map.values()) 58 | l = len(nr_vmas_sorted) 59 | print("nr_procs: %d" % l) 60 | print("nr_total_vmas: %d" % sum(nr_vmas_sorted)) 61 | print("nr_anon_vmas: %d" % nr_anon_vmas) 62 | print("nr_file_vmas: %d" % nr_file_vmas) 63 | if len(procs) <= 1: 64 | exit(0) 65 | print("average_nr_vmas: %d" % (sum(nr_vmas_sorted) / l)) 66 | print("min\t25th\t50th\t75th\tmax") 67 | print("%d\t%d\t%d\t%d\t%d" % (nr_vmas_sorted[0], nr_vmas_sorted[l // 4], 68 | nr_vmas_sorted[l // 4], nr_vmas_sorted[l // 4 * 3], nr_vmas_sorted[-1])) 69 | -------------------------------------------------------------------------------- /scripts/wait_machine.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 3 ] 4 | then 5 | echo "Usage: $0 <on|off> <target machine ip> <timeout>" 6 | exit 1 7 | fi 8 | 9 | ONOFF=$1 10 | ADDR=$2 11 | TIMEOUT=$3 12 | 13 | while true; 14 | do 15 | if [ $SECONDS -gt "$TIMEOUT" ] 16 | then 17 | echo "Timeout! $SECONDS > $TIMEOUT" 18 | exit 1 19 | fi 20 | if ping "$ADDR" -c 1 > /dev/null 21 | then 22 | if [ "$ONOFF" = "on" ] 23 | then 24 | break 25 | fi 26 | else 27 | if [ "$ONOFF" = "off" ] 28 | then 29 | break 30 | fi 31 | fi 32 | 33 | sleep 2 34 | done 35 | -------------------------------------------------------------------------------- /scripts/wait_workof.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 <process name>" 6 | exit 1 7 | fi 8 | 9 | function ticks_used() { 10 | CMD=$1 11 | pids=$(pidof "$CMD") 12 | if [ "$pids" == "" ] 13 | then 14 | echo "0" 15 | fi 16 | 17 | TICKS=0 18 | for pid in $pids 19 | do 20 | TICKS=$(( TICKS + \ 21 | $(awk '{print $14 + $15}' /proc/"$pid"/stat) )) 22 | done 23 | echo $TICKS 24 | } 25 | 26 | CMD=$1 27 | while true; 28 | do 29 | BEFORE_TICK=$(ticks_used "$CMD") 30 | sleep 0.5 31 | DIFF=$(( $(ticks_used "$CMD") - BEFORE_TICK )) 32 | if [ $DIFF == "0" ] 33 | then 34 | break 35 | fi 36 | done 37 | -------------------------------------------------------------------------------- /scripts/zram_swap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "Usage: $0 <size of zram device>" 6 | exit 1 7 | fi 8 | 9 | ZRAM_SIZE=$1 10 | NR_CPUS=$(grep -c processor /proc/cpuinfo) 11 | 12 | modprobe zram 13 | echo 1 > /sys/block/zram0/reset 14 | echo "$NR_CPUS" > /sys/block/zram0/max_comp_streams 15 | echo "$ZRAM_SIZE" > /sys/block/zram0/disksize 16 | 17 | swapoff -a 18 | mkswap /dev/zram0 19 | swapon /dev/zram0 20 | echo "zram swap ($ZRAM_SIZE) enabled" 21 | -------------------------------------------------------------------------------- /ssh_args.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | "Help argument parsing for remote experiments" 4 | 5 | __author__ = "SeongJae Park" 6 | __email__ = "sj38.park@gmail.com" 7 | __copyright__ = "Copyright (c) 2015-2020, SeongJae Park" 8 | __license__ = "GPLv2" 9 | 10 | import getpass 11 | import os 12 | import sys 13 | 14 | USAGE="%s <user name> <target> <ssh port> [password]" % sys.argv[0] 15 | 16 | def parse_input(custom_usage=USAGE): 17 | if len(sys.argv) < 4: 18 | print("usage: ", custom_usage) 19 | print("") 20 | exit(1) 21 | 22 | user = sys.argv[1] 23 | target = sys.argv[2] 24 | port = sys.argv[3] 25 | if len(sys.argv) > 4: 26 | password = sys.argv[4] 27 | else: 28 | password = getpass.getpass("password for %s at %s: " % (user, target)) 29 | return user, target, port, password 30 | -------------------------------------------------------------------------------- /test_run_exps/bye: -------------------------------------------------------------------------------- 1 | main echo "buy" 2 | -------------------------------------------------------------------------------- /test_run_exps/check.exps: -------------------------------------------------------------------------------- 1 | # first experiment 2 | start echo '1 sleeps. will fail.' 3 | main sleep 1 4 | end echo 'end!' 5 | check exit 1 6 | 7 | start echo '3 sleeps. will success.' 8 | main for i in `seq 1 3`; \ 9 | do \ 10 | echo 'main 3: '$i; \ 11 | sleep 1; \ 12 | done 13 | end echo 'end!' 14 | check exit 0 15 | 16 | -------------------------------------------------------------------------------- /test_run_exps/childs_processes_background.exps: -------------------------------------------------------------------------------- 1 | # first experiment 2 | start echo "start" 3 | main ./test_run_exps/run_secs.py 6 4 | back ./test_run_exps/spawn_process.py 5 5 | end echo 'end!' 6 | -------------------------------------------------------------------------------- /test_run_exps/hello: -------------------------------------------------------------------------------- 1 | main echo "hello" 2 | -------------------------------------------------------------------------------- /test_run_exps/infini_root_test.exps: -------------------------------------------------------------------------------- 1 | # first experiment 2 | start echo "start" 3 | main ./test_run_exps/run_secs.py 6 4 | back while true; do ./test_run_exps/run_secs.py 2; sleep 1; done 5 | end echo 'end!' 6 | -------------------------------------------------------------------------------- /test_run_exps/multiple_lines.exps: -------------------------------------------------------------------------------- 1 | # first experiment 2 | start echo "start" 3 | main ./test_run_exps/run_secs.py \ 4 | 6 5 | back while true; \ 6 | do ./test_run_exps/run_secs.py 2; \ 7 | sleep 1; \ 8 | done 9 | end echo 'end!' 10 | -------------------------------------------------------------------------------- /test_run_exps/multiple_mains.exps: -------------------------------------------------------------------------------- 1 | # first experiment 2 | start echo '6 and 2' 3 | main for i in `seq 1 6`; \ 4 | do \ 5 | echo 'main 6: '$i; \ 6 | sleep 1; \ 7 | done 8 | main for i in `seq 1 2`; \ 9 | do \ 10 | echo 'main 2: '$i; \ 11 | sleep 1; \ 12 | done 13 | end echo 'end!' 14 | 15 | start echo '3 and 8' 16 | main for i in `seq 1 3`; \ 17 | do \ 18 | echo 'main 3: '$i; \ 19 | sleep 1; \ 20 | done 21 | main for i in `seq 1 8`; \ 22 | do \ 23 | echo 'main 8: '$i; \ 24 | sleep 1; \ 25 | done 26 | end echo 'end!' 27 | -------------------------------------------------------------------------------- /test_run_exps/run_secs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import subprocess 4 | import sys 5 | import time 6 | 7 | for i in range(int(sys.argv[1])): 8 | print(i) 9 | time.sleep(1) 10 | print("buy buy") 11 | -------------------------------------------------------------------------------- /test_run_exps/spawn_process.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import subprocess 4 | import sys 5 | import time 6 | 7 | remaining_spawns = int(sys.argv[1]) - 1 8 | 9 | if remaining_spawns > 0: 10 | print('will spawn %d more child' % remaining_spawns) 11 | cmd = '%s %d' % (__file__, remaining_spawns) 12 | subprocess.call(cmd, shell=True) 13 | 14 | while True: 15 | time.sleep(3) 16 | -------------------------------------------------------------------------------- /test_run_exps/test_multiple_expss.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | "test multiple experiments spec files support of run_exps.py" 4 | 5 | import os 6 | 7 | testdir = os.path.abspath(os.path.dirname(__file__)) 8 | lzdir = os.path.join(os.path.dirname(__file__), os.pardir) 9 | lzdir = os.path.abspath(lzdir) 10 | cmd = "%s %s %s" % (os.path.join(lzdir, "run_exps.py"), 11 | os.path.join(testdir, "hello"), os.path.join(testdir, "bye")) 12 | print("execute: ", cmd) 13 | os.system(cmd) 14 | -------------------------------------------------------------------------------- /unsorted/unwrap_text.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | 5 | def main(): 6 | lines = sys.stdin.read() 7 | collected_lines = [] 8 | for line in lines.split('\n'): 9 | if line == '': 10 | print(' '.join(collected_lines)) 11 | print() 12 | collected_lines = [] 13 | continue 14 | collected_lines.append(line) 15 | print(' '.join(collected_lines)) 16 | 17 | if __name__ == '__main__': 18 | main() 19 | -------------------------------------------------------------------------------- /workloads/blogbench/blogbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | WORKING_DIR=$PWD/$(dirname "$0") 4 | RUN_DIR="run_dir" 5 | 6 | pushd "$WORKING_DIR" 7 | 8 | uname -a 9 | 10 | if ! which blogbench 11 | then 12 | echo "[error] blogbench not installed" 13 | popd 14 | exit 1 15 | fi 16 | 17 | if [ ! -d $RUN_DIR ] 18 | then 19 | mkdir $RUN_DIR 20 | fi 21 | rm -fr "${RUN_DIR:?}"/* 22 | 23 | blogbench -d "$WORKING_DIR/$RUN_DIR" 24 | 25 | popd 26 | -------------------------------------------------------------------------------- /workloads/cloudsuite/datacaching/average_60results.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | 5 | nr_sample = 60 6 | 7 | legend = [] 8 | data = [] 9 | thenext = False 10 | for line in sys.stdin: 11 | splt = line.split() 12 | if len(splt) == 15 and splt[0] == 'timeDiff,': 13 | if len(legend) == 0: 14 | legend = splt 15 | thenext = True 16 | continue 17 | elif not thenext: 18 | continue 19 | thenext = False 20 | data.append([float(x) for x in line.split(',')]) 21 | 22 | if len(data) < 1: 23 | exit(1) 24 | 25 | if len(data[0]) < 1: 26 | exit(1) 27 | 28 | if len(data) > 60: 29 | data = data[-60:] 30 | 31 | out = [] 32 | for i in range(len(data[0])): 33 | sum_ = 0 34 | for j in range(len(data)): 35 | sum_ += data[j][i] 36 | out.append(str(sum_ / len(data))) 37 | print(" ".join(legend)) 38 | print(", ".join(out)) 39 | -------------------------------------------------------------------------------- /workloads/cloudsuite/datacaching/build_dockerimages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | WORKING_DIR=`dirname $0` 4 | 5 | pushd $WORKING_DIR/../ 6 | 7 | if [ ! -d cloudsuite ] 8 | then 9 | echo "Fetch cloudsuite source code first" 10 | exit 1 11 | fi 12 | 13 | REPONAME="lb-cloudsuite/data-caching" 14 | 15 | pushd ./cloudsuite/benchmarks/data-caching/server 16 | docker build -t $REPONAME:server ./ 17 | 18 | pushd ../client 19 | docker build -t $REPONAME:client ./ 20 | -------------------------------------------------------------------------------- /workloads/cloudsuite/datacaching/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker rm -f dc-server 4 | docker rm -f dc-client 5 | -------------------------------------------------------------------------------- /workloads/cloudsuite/datacaching/datacaching.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=`dirname $0` 4 | pushd $BINDIR 5 | 6 | ./cleanup.sh 7 | ./startserver.sh 8 | 9 | docker run -d --name dc-client --net caching_network \ 10 | lb-cloudsuite/data-caching:client \ 11 | /bin/sh -c "while true; do sleep 3; done" 12 | 13 | WORKDIR=/usr/src/memcached/memcached_client 14 | DATADIR=/usr/src/memcached/twitter_dataset 15 | 16 | docker exec dc-client \ 17 | /bin/bash -c "echo 'dc-server, 11211' > $WORKDIR/docker_servers.txt" 18 | 19 | # warmup 20 | docker exec dc-client \ 21 | $WORKDIR/loader \ 22 | -a $DATADIR/twitter_dataset_unscaled \ 23 | -o $DATADIR/twitter_dataset_100x \ 24 | -s $WORKDIR/docker_servers.txt -w 4 -S 100 -D 10240 -j -T 2 25 | 26 | # run 27 | docker exec dc-client \ 28 | $WORKDIR/loader \ 29 | -a $DATADIR/twitter_dataset_100x \ 30 | -s $WORKDIR/docker_servers.txt -g 0.8 -T 1 -c 20 -w 6 -t 180 31 | 32 | 33 | popd 34 | -------------------------------------------------------------------------------- /workloads/cloudsuite/datacaching/startserver.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker network create caching_network 4 | 5 | # We set -n as 1024 as a survey[1] about Facebook memcached usage says so. 6 | # 7 | # [1] Atikoglu, Berk, et al. "Workload analysis of a large-scale key-value 8 | # store." ACM SIGMETRICS Performance Evaluation Review. Vol. 40. No. 1. 9 | # ACM, 2012. 10 | docker run --name dc-server --net caching_network -d lb-cloudsuite/data-caching:server -t 4 -m 10240 -n 1024 11 | -------------------------------------------------------------------------------- /workloads/cloudsuite/fetch-cloudsuite.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Because official cloudsuite is updated often, we use a fork. 4 | REPO="https://github.com/sjp38/cloudsuite-personal.git" 5 | TARGET_DIR="cloudsuite" 6 | TARGET_REV="e48bc2434bc2d15fcec7f496245726a6a35ca000" 7 | 8 | if ! which git &> /dev/null 9 | then 10 | echo "[Error] git not found. Please install it." 11 | exit 1 12 | fi 13 | 14 | if [ -d $TARGET_DIR ] 15 | then 16 | cd $TARGET_DIR || (echo "cd $TARGET_DIR failed"; exit 1) 17 | if [ $TARGET_REV == "$(git rev-parse HEAD)" ] 18 | then 19 | echo "Already fetched. Nothing to do." 20 | exit 0 21 | else 22 | echo "$TARGET_DIR directory should be removed first." 23 | exit 1 24 | fi 25 | fi 26 | 27 | git clone $REPO $TARGET_DIR 28 | cd $TARGET_DIR || (echo "cd $TARGET_DIR failed"; exit 1) 29 | git checkout $TARGET_REV 30 | -------------------------------------------------------------------------------- /workloads/cloudsuite/webserving/build_dockerimages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | WORKING_DIR=`dirname $0` 4 | 5 | pushd $WORKING_DIR/../ 6 | 7 | if [ ! -d cloudsuite ] 8 | then 9 | echo "Fetch cloudsuite source code first" 10 | exit 1 11 | fi 12 | 13 | REPONAME="lb-cloudsuite/web-serving" 14 | 15 | pushd ./cloudsuite/benchmarks/web-serving/db_server 16 | docker build -t $REPONAME:mysql ./ 17 | 18 | pushd ../memcached_server 19 | docker build -t $REPONAME:memcached ./ 20 | 21 | pushd ../web_server 22 | docker build -t $REPONAME:webserver ./ 23 | 24 | pushd ../faban_client 25 | docker build -t $REPONAME:client ./ 26 | -------------------------------------------------------------------------------- /workloads/cloudsuite/webserving/cleanup-webserving.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for CONT in faban_client web_server memcache_server mysql_server 4 | do 5 | docker rm -f $CONT 6 | done 7 | -------------------------------------------------------------------------------- /workloads/cloudsuite/webserving/parse_summary.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Receive summary.xml from stdin and print out essential fields 5 | """ 6 | 7 | import xml.etree.ElementTree as ET 8 | import sys 9 | 10 | def walk_elem(elem, level=0): 11 | outindent = " " * level 12 | indent = " " * (level + 1) 13 | print(outindent, "{") 14 | print(indent, "tag: ", elem.tag) 15 | print(indent, "attr: ", elem.attrib) 16 | print(indent, "text: ", elem.text) 17 | for child in elem: 18 | walk_elem(child, level + 1) 19 | print(outindent, "}") 20 | 21 | xmldat = "".join(sys.stdin) 22 | root = ET.fromstring(xmldat) 23 | #walk_elem(root, 0) 24 | 25 | print("users:\t", root.find("./driverSummary/users").text) 26 | print("nr_ops:\t", root.find("./driverSummary/totalOps").text) 27 | print("ops:\t", root.find("./benchSummary/metric").text) 28 | 29 | response_times = root.findall("./driverSummary/responseTimes/operation") 30 | for op in response_times: 31 | for lat in op: 32 | if lat.tag == "passed": 33 | continue 34 | if lat.tag == "percentile": 35 | lat.tag = "%s%s %s" % (lat.attrib["nth"], lat.attrib["suffix"], lat.tag) 36 | print("%s %s: %s" % (op.attrib["name"], lat.tag, lat.text)) 37 | -------------------------------------------------------------------------------- /workloads/cloudsuite/webserving/rm_dockerimages.sh: -------------------------------------------------------------------------------- 1 | for tag in mysql memcached webserver client 2 | do 3 | docker rmi lb-cloudsuite/web-serving:$tag 4 | done 5 | -------------------------------------------------------------------------------- /workloads/cloudsuite/webserving/run-webserving.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker run --net=host --name=faban_client lb-cloudsuite/web-serving:client 127.0.0.1 $1 4 | -------------------------------------------------------------------------------- /workloads/cloudsuite/webserving/setup-webserving.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #docker run -dt --net=host --name=mysql_server cloudsuite/web-serving:db_server 127.0.0.1 4 | #docker run -dt --net=host --name=memcache_server cloudsuite/web-serving:memcached_server 5 | #docker run -dt --net=host --name=web_server cloudsuite/web-serving:web_server /etc/bootstrap.sh 127.0.0.1 127.0.0.1 80 6 | 7 | docker run -dt --net=host --name=mysql_server lb-cloudsuite/web-serving:mysql 127.0.0.1 8 | docker run -dt --net=host --name=memcache_server lb-cloudsuite/web-serving:memcached 9 | docker run -dt --net=host --name=web_server lb-cloudsuite/web-serving:webserver /etc/bootstrap.sh 127.0.0.1 127.0.0.1 80 10 | -------------------------------------------------------------------------------- /workloads/cloudsuite/webserving/summary.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ] 4 | then 5 | echo "USAGE: $0 <run-webserving.sh output file>" 6 | exit 1 7 | fi 8 | 9 | FILE=$1 10 | 11 | STARTLINE=`grep --text -n "BUILD SUCCESSFUL" $FILE | awk -F ':' '{print $1}'` 12 | STARTLINE=$(($STARTLINE + 2)) 13 | tail -n +$STARTLINE $FILE | head -n -1 14 | -------------------------------------------------------------------------------- /workloads/cloudsuite/webserving/webserving.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FILEDIR=`dirname $0` 4 | 5 | pushd $FILEDIR 6 | ./cleanup-webserving.sh 7 | ./setup-webserving.sh 8 | 9 | # Wait for mysql startup 10 | # TODO: Wait more gracefully 11 | echo "Wait mysql startup..." 12 | for i in {1..5} 13 | do 14 | printf "%d " $i 15 | sleep 1 16 | done 17 | printf "\n" 18 | 19 | ./run-webserving.sh $1 20 | popd 21 | -------------------------------------------------------------------------------- /workloads/ebizzy/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | cd "$BINDIR" || exit 1 5 | 6 | if [ -f ebizzy-0.3/ebizzy ] 7 | then 8 | echo "ebizzy-0.3 directory already exists!" 9 | exit 1 10 | fi 11 | 12 | wget http://www.phoronix-test-suite.com/benchmark-files/ebizzy-0.3.tar.gz 13 | tar xvf ebizzy-0.3.tar.gz 14 | cd ebizzy-0.3 || exit 1 15 | cc -pthread -lpthread -O3 -o ebizzy ebizzy.c 16 | -------------------------------------------------------------------------------- /workloads/ebizzy/run-spf-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | cd "$BINDIR" || exit 1 5 | 6 | if [ ! -f ./ebizzy-0.3/ebizzy ] 7 | then 8 | echo "ebizzy is not installed at $BINDIR/ebizzy-0.3/ebizzy" 9 | exit 1 10 | fi 11 | 12 | if [ -z "$THREADS" ] 13 | then 14 | THREADS="1 2 4 9 18 36 72" 15 | fi 16 | 17 | for t in $THREADS 18 | do 19 | PERF=$(./ebizzy-0.3/ebizzy -mTt "$t" | grep records/s) 20 | echo "$t $PERF" 21 | done 22 | -------------------------------------------------------------------------------- /workloads/gcma/gcma.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -lt 2 ]; 4 | then 5 | echo "Usage: $0 <number of iteration> <count of pages to be alloced>" 6 | echo "" 7 | exit 1 8 | fi 9 | 10 | WORKING_DIR=$(dirname "$0") 11 | 12 | DEBUGFS_ROOT="/sys/kernel/debug" 13 | DEBUGFS=$DEBUGFS"/cma_eval" 14 | NR_CMA_REQ=$1 15 | NR_ALLOC_PAGES=$2 16 | 17 | echo "[[ Test Start ]]" 18 | echo "kernel version: $(uname -r)" 19 | echo "kernel param: \"$(cat /proc/cmdline)\"" 20 | echo "date: $(date)" 21 | echo "" 22 | 23 | pushd "$WORKING_DIR" 24 | 25 | echo "[before stat]" 26 | ./gcma_stat.sh 27 | 28 | for i in $(seq 1 "$NR_CMA_REQ") 29 | do 30 | for NR in $NR_ALLOC_PAGES 31 | do 32 | sleep 2 33 | echo "$NR" > "$DEBUGFS_ROOT"/cma_eval/eval 34 | done 35 | done 36 | 37 | echo "" 38 | echo "[latency]" 39 | cat $DEBUGFS_ROOT/cma_eval/res 40 | 41 | echo "" 42 | echo "[histogram]" 43 | cat $DEBUGFS_ROOT/cma_eval/res.hist 44 | 45 | echo "" 46 | echo "[after stat]" 47 | "$WORKING_DIR"/gcma_stat.sh 48 | 49 | popd 50 | 51 | echo "[[ Test Done ]]" 52 | -------------------------------------------------------------------------------- /workloads/gcma/gcma_stat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DEBUGFS_DIR="/sys/kernel/debug/gcma" 4 | FILES_FILE="files" 5 | TMP_DIR="/tmp" 6 | 7 | if [ ! -d $DEBUGFS_DIR ]; then 8 | echo "gcma debugfs not exist" 9 | exit 1 10 | fi 11 | 12 | ls $DEBUGFS_DIR > $TMP_DIR/$FILES_FILE 13 | 14 | while read FILE; 15 | do 16 | echo $FILE 17 | cat $DEBUGFS_DIR/$FILE 18 | echo "" 19 | done <$TMP_DIR/$FILES_FILE 20 | 21 | rm $TMP_DIR/$FILES_FILE 22 | -------------------------------------------------------------------------------- /workloads/kbuild/kbuild.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | WORKING_DIR=$(dirname "$0") 4 | 5 | echo "[[ kernbuild start ]]" 6 | echo "kernel version: $(uname -r)" 7 | echo "kernel param: \"$(cat /proc/cmdline)\"" 8 | echo "date: $(date)" 9 | echo "" 10 | 11 | pushd "$WORKING_DIR" 12 | 13 | KSRC_FTP_DIR="http://ftp.kernel.org/pub/linux/kernel/v4.x/" 14 | KVER="linux-4.0" 15 | KSRC_FILE=$KVER".tar.xz" 16 | KSRC_FTP=$KSRC_FTP_DIR$KSRC_FILE 17 | if [ ! -f $KSRC_FILE ] 18 | then 19 | echo "curl $KSRC_FTP > $KSRC_FILE" 20 | curl "$KSRC_FTP" > "$KSRC_FILE" 21 | echo "curled!" 22 | fi 23 | 24 | if [ ! -d build_dir ] 25 | then 26 | mkdir build_dir 27 | tar Jxf $KSRC_FILE -C build_dir/ 28 | fi 29 | 30 | NR_CPU=$(grep -c "processor" /proc/cpuinfo) 31 | 32 | pushd build_dir/$KVER 33 | 34 | make distclean 35 | make defconfig 36 | echo 3 > /proc/sys/vm/drop_caches 37 | ( /usr/bin/time make -sj $((NR_CPU * 4)) ) 2>&1 38 | 39 | popd 40 | popd 41 | echo "[[ Test Done ]]" 42 | -------------------------------------------------------------------------------- /workloads/mem_stress/Makefile: -------------------------------------------------------------------------------- 1 | .SUFFIXES: .c .o 2 | 3 | CFLAGS := -Wall -Werror -g 4 | CC := gcc 5 | LIBS := -lpthread 6 | 7 | OBJS := stress.o 8 | TARGET := stress 9 | 10 | ${TARGET} : ${OBJS} 11 | ${CC} ${CFLAGS} -o $@ ${OBJS} ${LIBS} 12 | 13 | .c.o : 14 | ${CC} ${CFLAGS} -c $< -o $@ ${LIBS} 15 | 16 | clean: 17 | rm -f ${OBJS} ${TARGET} core 18 | 19 | APPS := stress 20 | -------------------------------------------------------------------------------- /workloads/mem_stress/README: -------------------------------------------------------------------------------- 1 | Stress memory 2 | 3 | Workload 4 | ======== 5 | 6 | Create a huge file and randomly read / write the file using multiple threads. 7 | 8 | 9 | Usage 10 | ===== 11 | 12 | ./mem_stress.sh 13 | -------------------------------------------------------------------------------- /workloads/mem_stress/mem_stress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | WORKING_DIR=$(dirname "$0") 4 | 5 | pushd "$WORKING_DIR" 6 | 7 | DATA_FILE="2000MiB_file" 8 | 9 | if [ ! -f $DATA_FILE ] 10 | then 11 | echo "$DATA_FILE not found. create it." 12 | dd if=/dev/zero of=$DATA_FILE bs=1M count=2000 13 | fi 14 | 15 | make 16 | ./stress 600 17 | 18 | popd 19 | -------------------------------------------------------------------------------- /workloads/mem_stress/stress.c: -------------------------------------------------------------------------------- 1 | /** 2 | * stress memory 3 | */ 4 | #include <stdio.h> 5 | #include <stdlib.h> 6 | #include <string.h> 7 | #include <time.h> 8 | #include <unistd.h> 9 | #include <pthread.h> 10 | 11 | #define FILEPATH "2000MiB_file" 12 | #define FILE_SIZE ((unsigned long)2 * 1000 * 1024 * 1024) /* 2000 MiB */ 13 | 14 | #define NR_WORKER 20 15 | 16 | #define RW_SIZE (43 * 1024 * 1024) 17 | 18 | FILE *fp; 19 | int stop_stress = 0; 20 | int RUNTIME = 10; 21 | 22 | static void *random_rw(void *arg) 23 | { 24 | int offset, read_only, value; 25 | char *buffer; 26 | 27 | buffer = (char *)malloc(RW_SIZE); 28 | 29 | printf("[worker] start random rw\n"); 30 | while (!stop_stress) { 31 | offset = rand() % (FILE_SIZE - RW_SIZE); 32 | fseek(fp, offset, SEEK_SET); 33 | 34 | fread(buffer, sizeof(char), RW_SIZE, fp); 35 | 36 | read_only = rand() % 1; 37 | if (!read_only) { 38 | value = rand(); 39 | memset(buffer, value, RW_SIZE); 40 | fseek(fp, offset, SEEK_SET); 41 | fwrite(buffer, sizeof(char), RW_SIZE, fp); 42 | } 43 | } 44 | free(buffer); 45 | printf("[worker] done...\n"); 46 | return 0; 47 | } 48 | 49 | int main(int argc, char **argv) 50 | { 51 | pthread_t worker[NR_WORKER]; 52 | int i; 53 | 54 | if (argc > 1) 55 | RUNTIME = atoi(argv[1]); 56 | 57 | fp = fopen(FILEPATH, "r+"); 58 | 59 | 60 | srand(time(NULL)); 61 | for (i = 0; i < NR_WORKER; i++) { 62 | if (pthread_create(&worker[i], NULL, random_rw, NULL)) { 63 | fprintf(stderr, "failed to create worker thread!\n"); 64 | exit(1); 65 | } 66 | } 67 | sleep(RUNTIME); 68 | stop_stress = 1; 69 | for (i = 0; i < NR_WORKER; i++) 70 | pthread_join(worker[i], NULL); 71 | 72 | return 0; 73 | } 74 | -------------------------------------------------------------------------------- /workloads/mosbench/.gitignore: -------------------------------------------------------------------------------- 1 | mosbench/ 2 | -------------------------------------------------------------------------------- /workloads/mosbench/fetch-mosbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -d mosbench ] 4 | then 5 | echo "mosbench directory already exists!" 6 | exit 1 7 | fi 8 | 9 | git clone https://github.com/sjp38/mosbench 10 | cd mosbench || exit 1 11 | # Check out latest version as of 19 Dec 2018. 12 | git checkout c250c395fab356ab83413db43bf9844cb4f63d4f # Mar 4 2013 13 | -------------------------------------------------------------------------------- /workloads/mosbench/metis/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | cd "$BINDIR" || exit 1 5 | 6 | if [ ! -d ../mosbench/metis ] 7 | then 8 | echo "source code for metis is not found." 9 | exit 1 10 | fi 11 | 12 | if ! dpkg -l | grep -q libnuma-dev 13 | then 14 | echo "libnuma-dev package is not installed" 15 | exit 1 16 | fi 17 | 18 | make -C ../mosbench/metis 19 | -------------------------------------------------------------------------------- /workloads/mosbench/metis/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | cd "$BINDIR" || exit 1 5 | 6 | if [ ! -f ../mosbench/metis/obj/app/wrmem ] 7 | then 8 | echo "the execution file for the metis is not found." 9 | exit 1 10 | fi 11 | 12 | if [ $# -ne 1 ] 13 | then 14 | echo "Usage: $0 <number of cores>" 15 | exit 1 16 | fi 17 | 18 | NR_CORES=$1 19 | 20 | (time ../mosbench/metis/obj/app/wrmem -p "$NR_CORES") 2>&1 21 | -------------------------------------------------------------------------------- /workloads/mosbench/psearchy/.gitignore: -------------------------------------------------------------------------------- 1 | linux-4.19.10* 2 | -------------------------------------------------------------------------------- /workloads/mosbench/psearchy/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | cd "$BINDIR" || exit 1 5 | 6 | if [ ! -d ../mosbench/psearchy ] 7 | then 8 | echo "psearchy source code directory not found." 9 | exit 1 10 | fi 11 | 12 | if ! dpkg -l | grep -q libdb-dev 13 | then 14 | echo "libdb-dev package is not installed" 15 | exit 1 16 | fi 17 | 18 | make -C ../mosbench/psearchy/mkdb all 19 | -------------------------------------------------------------------------------- /workloads/mosbench/psearchy/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | cd "$BINDIR" || exit 1 5 | 6 | PEDSORT=../mosbench/psearchy/mkdb/pedsort 7 | 8 | if [ ! -f $PEDSORT ] 9 | then 10 | echo "The execution file is not found." 11 | exit 1 12 | fi 13 | 14 | if [ $# -ne 1 ] 15 | then 16 | echo "Usage: $0 <number of cores>" 17 | exit 1 18 | fi 19 | 20 | NR_CORES=$1 21 | 22 | LNXSRC=linux-4.19.10 23 | TMPD=./tmp 24 | TARGET_DIR=$TMPD/$LNXSRC 25 | if [ ! -d $TARGET_DIR ] 26 | then 27 | mkdir -p $TMPD 28 | sudo mount -t tmpfs -o rw,size=10G tmpfs $TMPD 29 | sudo chown "$USER" "$TMPD" 30 | 31 | if [ ! -f linux-4.19.10.tar.xz ] 32 | then 33 | wget https://cdn.kernel.org/pub/linux/kernel/v4.x/$LNXSRC.tar.xz 34 | fi 35 | tar -C $TMPD -xvf linux-4.19.10.tar.xz 36 | fi 37 | 38 | rm -fr "$TMPD/db" 39 | for i in $(seq 0 $((NR_CORES - 1))) 40 | do 41 | mkdir -p "$TMPD/db/db$i" 42 | done 43 | 44 | sync 45 | 46 | SZ_HASHTABLE=1024 47 | 48 | find $TARGET_DIR -type f | \ 49 | $PEDSORT -t "$TMPD/db/db" -c "$NR_CORES" -m "$SZ_HASHTABLE" 50 | -------------------------------------------------------------------------------- /workloads/oltpbench/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BINDIR=$(dirname "$0") 4 | pushd "$BINDIR" 5 | 6 | if [ ! -d src ] 7 | then 8 | echo "Source code is not exists. Fetch it..." 9 | ./fetch-src.sh 10 | fi 11 | 12 | cd src/ || exit 1 13 | ant 14 | 15 | popd 16 | -------------------------------------------------------------------------------- /workloads/oltpbench/fetch-src.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | git clone https://github.com/oltpbenchmark/oltpbench src 4 | -------------------------------------------------------------------------------- /workloads/raspistill/raspistill.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import time 6 | 7 | repeat = 10 8 | warmup = 10 9 | 10 | USAGE = "%s <repeat count> <warmup seconds> <output image path>" 11 | 12 | if len(sys.argv) < 4: 13 | print("Usage: ", USAGE) 14 | exit(1) 15 | 16 | repeat = int(sys.argv[1]) 17 | warmup = float(sys.argv[2]) 18 | img_path = sys.argv[3] 19 | 20 | os.system("uname -a") 21 | 22 | os.system("dmesg -c > /dev/null") 23 | for i in range(repeat): 24 | cmd = "/usr/bin/time " 25 | cmd += "raspistill -t 1 -q 1 -o %s 2>&1" % img_path 26 | os.system(cmd) 27 | time.sleep(warmup) 28 | os.system("dmesg | grep cma") 29 | os.system("dmesg -c > /dev/null") 30 | time.sleep(2) 31 | print("") 32 | --------------------------------------------------------------------------------