├── .gitignore ├── Dockerfile ├── gen_output.sh ├── README.md ├── run_fio.sh ├── parse_var_bs.py ├── plot_stat.py ├── parse_rwmixread_bw.py ├── parse_latency.py ├── parse_bw.py └── parse_stat.py /.gitignore: -------------------------------------------------------------------------------- 1 | log 2 | test 3 | output 4 | __pycache__ 5 | *.pyc -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:7 2 | 3 | RUN yum update -y \ 4 | && yum install -y fio libaio-devel \ 5 | && yum clean all 6 | 7 | RUN mkdir /fio 8 | 9 | ADD . /fio 10 | 11 | VOLUME /fio -------------------------------------------------------------------------------- /gen_output.sh: -------------------------------------------------------------------------------- 1 | com_args="" 2 | var_bs_args="" 3 | mix_read_args="" 4 | for arg in "$@" 5 | do 6 | com_args+=" -i ${arg}/com" 7 | var_bs_args+=" -i ${arg}/var-bs" 8 | mix_read_args+=" -i ${arg}/mixread" 9 | done 10 | 11 | function generate { 12 | cmd="python $@" 13 | echo $cmd 14 | eval $cmd 15 | 16 | ret=$? 17 | if [ $ret -ne 0 ] 18 | then 19 | exit $ret 20 | fi 21 | } 22 | 23 | generate parse_latency.py ${com_args} 24 | generate parse_bw.py ${com_args} 25 | generate parse_var_bs.py ${var_bs_args} 26 | generate parse_rwmixread_bw.py ${mix_read_args} 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # A simple tool to run FIO and parse the result 2 | 3 | ## Usage 4 | 5 | ### FIO 6 | 7 | Use FIO on different devices and collect the FIO results to OUTPUT 8 | 9 | ``` 10 | DIR=/data1 OUTPUT=./log1 ./run_fio.sh 11 | DIR=/data2 OUTPUT=./log2 ./run_fio.sh 12 | ``` 13 | 14 | ## Collection 15 | 16 | ### Latency 17 | 18 | 19 | ```bash 20 | python parse_latency.py -i ./log1/com -i ./log1/com 21 | ``` 22 | 23 | It will generate following files in the directory './output' 24 | 25 | ```bash 26 | read100-lat-mean.csv 27 | read100-lat-p99.csv 28 | read70-lat-mean.csv 29 | read70-lat-p99.csv 30 | write100-lat-mean.csv 31 | write100-lat-p99.csv 32 | write30-lat-mean.csv 33 | write30-lat-p99.csv 34 | ``` 35 | 36 | `read100` means 100% read, `read70` means 70% read + 30% write, and so on. 37 | 38 | ### Bandwidth 39 | 40 | ```bash 41 | python parse_bw.py -i ./log1/com -i ./log1/com 42 | ``` 43 | 44 | It will generate following files in the directory './output' 45 | 46 | ```bash 47 | read100-bandwidth.csv 48 | read70-bandwidth.csv 49 | write100-bandwidth.csv 50 | write30-bandwidth.csv 51 | ``` 52 | 53 | ### Read Latency with different block size 54 | 55 | ```bash 56 | python parse_var_bs.py -i ./log1/var-bs -i ./log2/var-bs 57 | ``` 58 | 59 | It will generate following files in the directory './output' 60 | 61 | ```bash 62 | read100-bs-lat-mean.csv 63 | read100-bs-lat-p99.csv 64 | ``` 65 | 66 | ### TL;DR 67 | 68 | You can use `./gen_output.sh` to generate all the CSV files directly, like: 69 | 70 | ```bash 71 | ./gen_output.sh ./log1 ./log2 72 | ``` 73 | 74 | After you generate all CSV files, you can use many tools to convert them to Markdown table, or paste them to the Excel. 75 | 76 | ## TODO 77 | 78 | - Use pyplot to generate the chart -------------------------------------------------------------------------------- /run_fio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR=${DIR=./} 4 | OUTPUT=${OUTPUT=./log} 5 | SIZE=${SIZE=1g} 6 | 7 | mkdir -p ${OUTPUT}/com ${OUTPUT}/mixread ${OUTPUT}/var-bs 8 | 9 | args=" 10 | --directory ${DIR} \ 11 | --output-format json \ 12 | -filename=test \ 13 | -filesize=${SIZE} \ 14 | -thread \ 15 | -runtime=60 \ 16 | -numjobs=4 \ 17 | -ioengine=libaio \ 18 | -direct=1 \ 19 | -name=\"FIO test\"" 20 | 21 | function run_fio { 22 | cmd="fio ${args} $1" 23 | echo $cmd 24 | eval $cmd 25 | 26 | ret=$? 27 | if [ $ret -ne 0 ] 28 | then 29 | exit $ret 30 | fi 31 | } 32 | 33 | for iodepth in 1 2 4 8 16 32 34 | do 35 | echo "100% write 4k with different iodepth" ${iodepth} 36 | run_fio "-rw=write --output ${OUTPUT}/com/write-4k-iodepth${iodepth} -bs=4k -iodepth=${iodepth}" 37 | 38 | echo "100% read 4k with different iodepth" ${iodepth} 39 | run_fio "-rw=read --output ${OUTPUT}/com/read-4k-iodepth${iodepth} -bs=4k -iodepth=${iodepth}" 40 | 41 | echo "70% read 4k and 30% write 4k with different iodepth" ${iodepth} 42 | run_fio "-rw=readwrite --output ${OUTPUT}/com/read70-write-4k-iodepth${iodepth} -bs=4k -iodepth=${iodepth} -rwmixread=70" 43 | done 44 | 45 | for mixread in 100 90 80 70 60 50 46 | do 47 | mixwrite=$((100 - ${mixread})) 48 | echo ${mixread}% "read 4k and" ${mixwrite}% "write 4k with 32 iodepth" 49 | run_fio "-rw=readwrite --output ${OUTPUT}/mixread/read${mixread}-4k-iodepth32 -iodepth=32 -bs=4k -rwmixread=${mixread}" 50 | 51 | echo ${mixread}% "read 4k and" ${mixwrite}% "write 128k with 32 iodepth" 52 | run_fio "-rw=readwrite --output ${OUTPUT}/mixread/read${mixread}-4k-write-128k-iodepth32 -iodepth=32 -bs=4k,128k -rwmixread=${mixread}" 53 | done 54 | 55 | for iodepth in 1 2 4 8 56 | do 57 | for bs in 4k 8k 32k 64k 58 | do 59 | echo "write" ${bs} "with iodepth" ${iodepth} 60 | run_fio "-rw=write --output ${OUTPUT}/var-bs/write-${bs}-iodepth${iodepth} -bs=${bs} -iodepth=${iodepth}" 61 | 62 | echo "read" ${bs} "with iodepth" ${iodepth} 63 | run_fio "-rw=read --output ${OUTPUT}/var-bs/read-${bs}-iodepth${iodepth} -bs=${bs} -iodepth=${iodepth}" 64 | done 65 | done -------------------------------------------------------------------------------- /parse_var_bs.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import getopt 3 | import operator 4 | 5 | from parse_stat import * 6 | 7 | def usage(): 8 | print("Usage %s [-i|-o|-h] [--help|--input|--output|]" % sys.argv[0]) 9 | 10 | def filter_stats(stats, f): 11 | seen = set() 12 | stats = [stat for stat in filter(f, stats) if not (stat.iodepth in seen or seen.add(stat.iodepth))] 13 | stats.sort(key=lambda stat: stat.iodepth) 14 | return stats 15 | 16 | def main(): 17 | try: 18 | opts, args = getopt.getopt(sys.argv[1:], "hi:o", ["help", "input=", "output"]) 19 | except getopt.GetoptError as err: 20 | print(str(err)) 21 | usage() 22 | sys.exit(2) 23 | 24 | inputs = [] 25 | output = "./output" 26 | for opt, arg in opts: 27 | if opt in ("-h", "--help"): 28 | usage() 29 | sys.exit(1) 30 | elif opt in ("-i", "--input"): 31 | inputs.append(arg) 32 | elif opt in ("-o", "--output"): 33 | output = arg 34 | 35 | iodepts = [1, 2, 4, 8] 36 | 37 | lats = [] 38 | for log_dir in inputs: 39 | stats = parse_dir(log_dir, lambda stat: True) 40 | 41 | lats.append(stats) 42 | 43 | 44 | with Output(output, "read100-bs-lat-mean") as f: 45 | f.write_head("label,1,2,4,8") 46 | 47 | 48 | for bs in ["4k", "8k", "32k", "64k"]: 49 | for stats in lats: 50 | stats = filter_stats(stats, 51 | lambda stat: stat.bs == bs and stat.rwmixread == 100 and stat.iodepth in iodepts) 52 | 53 | if len(stats) == 0: 54 | continue 55 | 56 | f.write_stats("%s-%s" % (stats[0].disk_name, bs), stats, lambda stat: "%.1f" % (stat.read.lat_mean)) 57 | 58 | 59 | with Output(output, "read100-bs-lat-p99") as f: 60 | f.write_head("label,1,2,4,8") 61 | 62 | 63 | for bs in ["4k", "8k", "32k", "64k"]: 64 | for stats in lats: 65 | stats = filter_stats(stats, 66 | lambda stat: stat.bs == bs and stat.rwmixread == 100 and stat.iodepth in iodepts) 67 | 68 | if len(stats) == 0: 69 | continue 70 | 71 | f.write_stats("%s-%s" % (stats[0].disk_name, bs), stats, lambda stat: "%.1f" % (stat.read.lat_p99)) 72 | 73 | 74 | 75 | if __name__ == "__main__": 76 | main() -------------------------------------------------------------------------------- /plot_stat.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import matplotlib.ticker as ticker 3 | 4 | 5 | class Row: 6 | label = "" 7 | cols = [] 8 | 9 | def __init__(self, label, cols): 10 | self.label = label 11 | self.cols = cols 12 | 13 | def draw_lat_with_iodepth(y_label, iodepths, rows): 14 | for row in rows: 15 | plt.plot(iodepths, row.cols, label = row.label) 16 | 17 | ax = plt.axes() 18 | plt.xscale('log', basex=2) 19 | ax.xaxis.set_major_formatter(ticker.ScalarFormatter()) 20 | ax.xaxis.set_minor_formatter(ticker.NullFormatter()) 21 | ax.xaxis.set_minor_locator(ticker.NullLocator()) 22 | 23 | plt.yscale('log') 24 | ax.yaxis.set_major_formatter(ticker.ScalarFormatter()) 25 | ax.yaxis.set_minor_formatter(ticker.NullFormatter()) 26 | ax.yaxis.set_minor_locator(ticker.NullLocator()) 27 | ax.yaxis.grid(True) 28 | 29 | plt.xlabel('Queue Depth') 30 | plt.ylabel(y_label) 31 | plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=2, fancybox=False, shadow=True) 32 | 33 | 34 | def draw_bw_with_iodepth(y_label, iodepths, rows): 35 | for row in rows: 36 | plt.plot(iodepths, row.cols, label = row.label) 37 | 38 | ax = plt.axes() 39 | plt.xscale('log', basex=2) 40 | ax.xaxis.set_major_formatter(ticker.ScalarFormatter()) 41 | ax.xaxis.set_minor_formatter(ticker.NullFormatter()) 42 | ax.xaxis.set_minor_locator(ticker.NullLocator()) 43 | 44 | # Use proper formatter and locator for Y axis later. 45 | ax.yaxis.grid(True) 46 | 47 | plt.xlabel('Queue Depth') 48 | plt.ylabel(y_label) 49 | plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=2, fancybox=False, shadow=True) 50 | 51 | def draw_bw_with_read_ratio(y_label, read_ratios, rows): 52 | for row in rows: 53 | plt.plot(read_ratios, row.cols, label = row.label) 54 | 55 | ax = plt.axes() 56 | ax.set_xlim(100, 50) 57 | ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d%%')) 58 | 59 | # Use proper formatter and locator for Y axis later. 60 | ax.yaxis.grid(True) 61 | 62 | plt.xlabel('Read percentage') 63 | plt.ylabel(y_label) 64 | plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=2, fancybox=False, shadow=True) 65 | 66 | 67 | if __name__ == "__main__": 68 | # rows = [] 69 | 70 | # rows.append(Row("a", [1, 30, 40, 50, 110, 800])) 71 | # rows.append(Row("b", [100, 300, 400, 500, 600, 900])) 72 | 73 | # # draw_lat_with_iodepth("Mean Read Latency (us)\n(Read Only)", [1, 2, 4, 8, 16, 32], rows) 74 | 75 | # draw_bw_with_iodepth("Bandwidth (MB/s)\n(Read Only)", [1, 2, 4, 8, 16, 32], rows) 76 | 77 | rows = [] 78 | rows.append(Row("a", [1000, 900, 800, 700, 600, 200])) 79 | rows.append(Row("b", [100, 300, 400, 500, 600, 900])) 80 | 81 | draw_bw_with_read_ratio("Bandwidth (MB/s)", [100, 90, 80, 70, 60, 50], rows) 82 | 83 | plt.show() -------------------------------------------------------------------------------- /parse_rwmixread_bw.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import getopt 3 | import operator 4 | 5 | from parse_stat import * 6 | 7 | def usage(): 8 | print("Usage %s [-i|-o|-h] [--help|--input|--output|]" % sys.argv[0]) 9 | 10 | 11 | def filter_stats_by_rwmixread(stats, f): 12 | seen = set() 13 | stats = [stat for stat in filter(f, stats) if not (stat.rwmixread in seen or seen.add(stat.rwmixread))] 14 | stats.sort(key=lambda stat: stat.rwmixread, reverse=True) 15 | return stats 16 | 17 | def main(): 18 | try: 19 | opts, args = getopt.getopt(sys.argv[1:], "hi:o", ["help", "input=", "output"]) 20 | except getopt.GetoptError as err: 21 | print(str(err)) 22 | usage() 23 | sys.exit(2) 24 | 25 | inputs = [] 26 | output = "./output" 27 | for opt, arg in opts: 28 | if opt in ("-h", "--help"): 29 | usage() 30 | sys.exit(1) 31 | elif opt in ("-i", "--input"): 32 | inputs.append(arg) 33 | elif opt in ("-o", "--output"): 34 | output = arg 35 | 36 | iodepts = [1, 2, 4, 8, 16, 32] 37 | 38 | lats = [] 39 | for log_dir in inputs: 40 | stats = parse_dir(log_dir, lambda stat: True) 41 | 42 | lats.append(stats) 43 | 44 | 45 | # Define customized filter here 46 | rwmixreads = [100, 90, 80, 70, 60, 50] 47 | def f(stat): 48 | return stat.bs == "4k" and stat.rwmixread in rwmixreads and stat.iodepth == 32 49 | 50 | with Output(output, "mix-read-bandwidth") as f: 51 | f.write_head("label,100%,90%,80%,70%,60%,50%") 52 | 53 | 54 | for stats in lats: 55 | stats = filter_stats_by_rwmixread(stats, 56 | lambda stat: stat.bs == "4k" and stat.rwmixread in rwmixreads and stat.iodepth == 32) 57 | if len(stats) == 0: 58 | continue 59 | 60 | disk_name = stats[0].disk_name 61 | f.write_stats("%s-Reads" % disk_name, stats, lambda stat: "%.1f" % (stat.read.bw)) 62 | f.write_stats("%s-Writes" % disk_name, stats, lambda stat: "%.1f" % (stat.write.bw)) 63 | f.write_stats("%s-Combines" % disk_name, stats, lambda stat: "%.1f" % (stat.read.bw + stat.write.bw)) 64 | 65 | with Output(output, "mix-read-var-write-bandwidth") as f: 66 | f.write_head("label,100%,90%,80%,70%,60%,50%") 67 | 68 | 69 | for stats in lats: 70 | stats = filter_stats_by_rwmixread(stats, 71 | lambda stat: stat.bs == "4k" and stat.rwmixread in rwmixreads and stat.iodepth == 32) 72 | if len(stats) == 0: 73 | continue 74 | 75 | f.write_stats("%s-4KB Write" % stats[0].disk_name, stats, lambda stat: "%.1f" % (stat.read.bw + stat.write.bw)) 76 | 77 | for stats in lats: 78 | stats = filter_stats_by_rwmixread(stats, 79 | lambda stat: stat.bs == "4k,128k" and stat.rwmixread in rwmixreads and stat.iodepth == 32) 80 | if len(stats) == 0: 81 | continue 82 | 83 | f.write_stats("%s-128KB Write" % stats[0].disk_name, stats, lambda stat: "%.1f" % (stat.read.bw + stat.write.bw)) 84 | 85 | 86 | 87 | if __name__ == "__main__": 88 | main() -------------------------------------------------------------------------------- /parse_latency.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import getopt 3 | import operator 4 | 5 | from parse_stat import * 6 | 7 | def usage(): 8 | print("Usage %s [-i|-o|-h] [--help|--input|--output|]" % sys.argv[0]) 9 | 10 | def filter_stats(stats, f): 11 | seen = set() 12 | stats = [stat for stat in filter(f, stats) if not (stat.iodepth in seen or seen.add(stat.iodepth))] 13 | stats.sort(key=lambda stat: stat.iodepth) 14 | return stats 15 | 16 | def main(): 17 | try: 18 | opts, args = getopt.getopt(sys.argv[1:], "hi:o", ["help", "input=", "output"]) 19 | except getopt.GetoptError as err: 20 | print(str(err)) 21 | usage() 22 | sys.exit(2) 23 | 24 | inputs = [] 25 | output = "./output" 26 | for opt, arg in opts: 27 | if opt in ("-h", "--help"): 28 | usage() 29 | sys.exit(1) 30 | elif opt in ("-i", "--input"): 31 | inputs.append(arg) 32 | elif opt in ("-o", "--output"): 33 | output = arg 34 | 35 | iodepts = [1, 2, 4, 8, 16, 32] 36 | 37 | lats = [] 38 | for log_dir in inputs: 39 | stats = parse_dir(log_dir, lambda stat: True) 40 | 41 | lats.append(stats) 42 | 43 | 44 | for rwmixread in [100, 70]: 45 | def filter_stat(stat): 46 | return stat.bs == "4k" and stat.rwmixread == rwmixread and stat.iodepth in iodepts 47 | 48 | with Output(output, "read%d-lat-mean" % rwmixread) as f: 49 | f.write_head("label,1,2,4,8,16,32") 50 | 51 | for stats in lats: 52 | stats = filter_stats(stats, filter_stat) 53 | 54 | if len(stats) == 0: 55 | continue 56 | 57 | f.write_stats(stats[0].disk_name, stats, lambda stat: "%.1f" % (stat.read.lat_mean)) 58 | 59 | with Output(output, "read%d-lat-p99" % rwmixread) as f: 60 | f.write_head("label,1,2,4,8,16,32") 61 | 62 | for stats in lats: 63 | stats = filter_stats(stats, filter_stat) 64 | 65 | if len(stats) == 0: 66 | continue 67 | 68 | f.write_stats(stats[0].disk_name, stats, lambda stat: "%.1f" % (stat.read.lat_p99)) 69 | 70 | for rwmixwrite in [100, 30]: 71 | def filter_stat(stat): 72 | return stat.bs == "4k" and stat.rwmixread == 100 - rwmixwrite and stat.iodepth in iodepts 73 | 74 | with Output(output, "write%d-lat-mean" % rwmixwrite) as f: 75 | f.write_head("label,1,2,4,8,16,32") 76 | 77 | for stats in lats: 78 | stats = filter_stats(stats, filter_stat) 79 | 80 | if len(stats) == 0: 81 | continue 82 | 83 | f.write_stats(stats[0].disk_name, stats, lambda stat: "%.1f" % (stat.write.lat_mean)) 84 | 85 | with Output(output, "write%d-lat-p99" % rwmixwrite) as f: 86 | f.write_head("label,1,2,4,8,16,32") 87 | 88 | for stats in lats: 89 | stats = filter_stats(stats, filter_stat) 90 | 91 | if len(stats) == 0: 92 | continue 93 | 94 | f.write_stats(stats[0].disk_name, stats, lambda stat: "%.1f" % (stat.write.lat_p99)) 95 | 96 | 97 | if __name__ == "__main__": 98 | main() -------------------------------------------------------------------------------- /parse_bw.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import getopt 3 | import operator 4 | 5 | from parse_stat import * 6 | 7 | def usage(): 8 | print("Usage %s [-i|-o|-h] [--help|--input|--output|]" % sys.argv[0]) 9 | 10 | def filter_stats(stats, f): 11 | seen = set() 12 | stats = [stat for stat in filter(f, stats) if not (stat.iodepth in seen or seen.add(stat.iodepth))] 13 | stats.sort(key=lambda stat: stat.iodepth) 14 | return stats 15 | 16 | def filter_stats_by_rwmixread(stats, f): 17 | seen = set() 18 | stats = [stat for stat in filter(f, stats) if not (stat.rwmixread in seen or seen.add(stat.rwmixread))] 19 | stats.sort(key=lambda stat: stat.rwmixread, reverse=True) 20 | return stats 21 | 22 | def main(): 23 | try: 24 | opts, args = getopt.getopt(sys.argv[1:], "hi:o", ["help", "input=", "output"]) 25 | except getopt.GetoptError as err: 26 | print(str(err)) 27 | usage() 28 | sys.exit(2) 29 | 30 | inputs = [] 31 | output = "./output" 32 | for opt, arg in opts: 33 | if opt in ("-h", "--help"): 34 | usage() 35 | sys.exit(1) 36 | elif opt in ("-i", "--input"): 37 | inputs.append(arg) 38 | elif opt in ("-o", "--output"): 39 | output = arg 40 | 41 | iodepts = [1, 2, 4, 8, 16, 32] 42 | 43 | lats = [] 44 | for log_dir in inputs: 45 | stats = parse_dir(log_dir, lambda stat: True) 46 | 47 | lats.append(stats) 48 | 49 | 50 | with Output(output, "read100-bandwidth") as f: 51 | f.write_head("label,1,2,4,8,16,32") 52 | 53 | 54 | for stats in lats: 55 | stats = filter_stats(stats, 56 | lambda stat: stat.bs == "4k" and stat.rwmixread == 100 and stat.iodepth in iodepts) 57 | if len(stats) == 0: 58 | continue 59 | 60 | f.write_stats(stats[0].disk_name, stats, lambda stat: "%.1f" % (stat.read.bw)) 61 | 62 | 63 | with Output(output, "read70-bandwidth") as f: 64 | f.write_head("label,1,2,4,8,16,32") 65 | 66 | for stats in lats: 67 | stats = filter_stats(stats, lambda stat: stat.bs == "4k" and stat.rwmixread == 70 and stat.iodepth in iodepts) 68 | if len(stats) == 0: 69 | continue 70 | 71 | f.write_stats(stats[0].disk_name, stats, lambda stat: "%.1f" % (stat.read.bw)) 72 | 73 | with Output(output, "write100-bandwidth") as f: 74 | f.write_head("label,1,2,4,8,16,32") 75 | 76 | 77 | for stats in lats: 78 | stats = filter_stats(stats, 79 | lambda stat: stat.bs == "4k" and stat.rwmixread == 0 and stat.iodepth in iodepts) 80 | if len(stats) == 0: 81 | continue 82 | 83 | f.write_stats(stats[0].disk_name, stats, lambda stat: "%.1f" % (stat.write.bw)) 84 | 85 | 86 | with Output(output, "write30-bandwidth") as f: 87 | f.write_head("label,1,2,4,8,16,32") 88 | 89 | for stats in lats: 90 | stats = filter_stats(stats, lambda stat: stat.bs == "4k" and stat.rwmixread == 70 and stat.iodepth in iodepts) 91 | if len(stats) == 0: 92 | continue 93 | 94 | f.write_stats(stats[0].disk_name, stats, lambda stat: "%.1f" % (stat.write.bw)) 95 | 96 | 97 | if __name__ == "__main__": 98 | main() -------------------------------------------------------------------------------- /parse_stat.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os, errno 3 | import json 4 | 5 | class SubJobStat: 6 | bw = 0.0 7 | iops = 0.0 8 | lat_mean = 0.0 9 | lat_p99 = 0.0 10 | 11 | def parse(self, job): 12 | self.bw += job["bw"] / 1024.0 13 | self.iops += job["iops"] 14 | self.lat_mean += job["clat_ns"]["mean"] / 1000.0 15 | self.lat_p99 += job["clat_ns"]["percentile"]["99.000000"] / 1000.0 16 | 17 | def adjust(self, job_count): 18 | self.lat_mean /= job_count 19 | self.lat_p99 /= job_count 20 | 21 | def __str__(self): 22 | return "bw: %0.1fM/s, iops: %0.1f, lat mean: %0.1fus, lat p99: %0.1fus" % (self.bw, self.iops, self.lat_mean, self.lat_p99) 23 | 24 | class JobStat: 25 | disk_name = "" 26 | bs = "4k" 27 | ioengine = "libaio" 28 | iodepth = 1 29 | rwmixread = 100 30 | read = None 31 | write = None 32 | 33 | def has_read(self): 34 | return self.rwmixread > 0 35 | 36 | def has_write(self): 37 | return self.rwmixread != 100 38 | 39 | def __init__(self, name): 40 | with open(name) as f: 41 | data = json.load(f) 42 | 43 | g_opts = data["global options"] 44 | 45 | self.read = SubJobStat() 46 | self.write = SubJobStat() 47 | self.ioengine = g_opts["ioengine"] 48 | 49 | bs = g_opts.get("bs", None) 50 | iodepth = g_opts.get("iodepth", None) 51 | rwmixread = g_opts.get("rwmixread", None) 52 | 53 | self.disk_name = data["disk_util"][0]["name"] 54 | 55 | job_count = len(data["jobs"]) 56 | 57 | for job in data["jobs"]: 58 | job_opts = job["job options"] 59 | if bs == None: 60 | bs = job_opts["bs"] 61 | 62 | if iodepth == None: 63 | iodepth = job_opts["iodepth"] 64 | 65 | if rwmixread == None: 66 | rwmixread = job_opts.get("rwmixread", None) 67 | 68 | 69 | self.read.parse(job["read"]) 70 | self.write.parse(job["write"]) 71 | 72 | 73 | if rwmixread != None: 74 | self.rwmixread = int(rwmixread) 75 | elif not self.write.lat_p99 > 0: 76 | # No write 77 | self.rwmixread = 100 78 | else: 79 | # No read 80 | self.rwmixread = 0 81 | 82 | self.read.adjust(job_count) 83 | self.write.adjust(job_count) 84 | 85 | self.bs = bs 86 | self.iodepth = int(iodepth) 87 | 88 | 89 | def __str__(self): 90 | return "disk: %s, ioengine: %s, iodepth: %d, bs: %s, rwmixread: %d\nread: %s\nwrite: %s\n" % (self.disk_name, self.ioengine, self.iodepth, self.bs, self.rwmixread, self.read, self.write) 91 | 92 | 93 | def parse_dir(name, f): 94 | files = os.listdir(name) 95 | files.sort() 96 | 97 | stats = [JobStat(os.path.join(name, file)) for file in files] 98 | 99 | return list(filter(f, stats)) 100 | 101 | 102 | class Output: 103 | fd = None 104 | def __init__(self, output, name): 105 | try: 106 | os.makedirs(output) 107 | except OSError as e: 108 | if e.errno != errno.EEXIST: 109 | raise 110 | 111 | self.fd = open(os.path.join(output, "%s.csv" % name), "w") 112 | 113 | def __enter__(self): 114 | return self 115 | 116 | def __exit__(self, type, value, traceback): 117 | self.fd.close() 118 | 119 | def write_head(self, head): 120 | self.fd.write(head) 121 | self.fd.write("\n") 122 | 123 | 124 | def write_stats(self, label, stats, f): 125 | self.fd.write("%s,%s" % (label, ",".join(map(f, stats)))) 126 | self.fd.write("\n") 127 | 128 | 129 | def main(): 130 | log_dir = "./log/com" 131 | if len(sys.argv) == 2: 132 | log_dir = sys.argv[1] 133 | 134 | 135 | stats = parse_dir(log_dir, lambda stat: True) 136 | for stat in stats: 137 | print(stat) 138 | 139 | 140 | if __name__ == "__main__": 141 | main() --------------------------------------------------------------------------------