├── README.md ├── plot.py └── requirements.txt /README.md: -------------------------------------------------------------------------------- 1 | # google_benchmark_plot 2 | 3 | A python script to visualize the output from [google-benchmark][1] 4 | 5 | Here is a 2 minute demo: 6 | 7 | ![2 minute demo](https://i.imgur.com/rEYqrWp.gif) 8 | 9 | ## Using the script 10 | 11 | **If you are feeling lucky, try** 12 | ``` 13 | ./your_benchmark_exe --benchmark_format=csv | python plot.py 14 | ``` 15 | 16 | ### Detailed instructions 17 | 18 | * Clone the repository 19 | * Run `pip install -r requirements.txt` (works with both python 2 and 3) 20 | * Obtain the benchmark results as a csv, this is done by running your benchmark as 21 | 22 | ``` 23 | ./your_benchmark_exe --benchmark_format=csv > benchmark.csv 24 | ``` 25 | 26 | * Pass the csv file to this script 27 | 28 | ``` 29 | python plot.py -f benchmark.csv 30 | ``` 31 | 32 | ### What else does it do 33 | 34 | The script allows you to customize the plot using command line flags. The 35 | complete help text can be seen by calling 36 | 37 | ``` 38 | $ python plot.py -h 39 | 40 | usage: plot.py [-h] [-f FILE] [-m METRIC] [-t TRANSFORM] [-r RELATIVE_TO] 41 | [--xlabel XLABEL] [--ylabel YLABEL] [--title TITLE] [--logx] 42 | [--logy] 43 | 44 | Visualize google-benchmark output 45 | 46 | optional arguments: 47 | -h, --help show this help message and exit 48 | -f FILE path to file containing the csv or json benchmark data 49 | -m METRIC metric to plot on the y-axis, valid choices are: real_time, 50 | cpu_time, bytes_per_second, items_per_second 51 | -t TRANSFORM transform to apply to the chosen metric, valid choices are: 52 | inverse 53 | -r RELATIVE_TO plot metrics relative to this label 54 | --xlabel XLABEL label of the x-axis 55 | --ylabel YLABEL label of the y-axis 56 | --title TITLE title of the plot 57 | --logx plot x-axis on a logarithmic scale 58 | --logy plot y-axis on a logarithmic scale 59 | --output File in which to save the graph 60 | ``` 61 | 62 | [1]: https://github.com/google/benchmark 63 | -------------------------------------------------------------------------------- /plot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Script to visualize google-benchmark output""" 3 | from __future__ import print_function 4 | import argparse 5 | import sys 6 | import logging 7 | import json 8 | import pandas as pd 9 | import matplotlib.pyplot as plt 10 | import pathlib 11 | 12 | logging.basicConfig(format="[%(levelname)s] %(message)s") 13 | 14 | METRICS = [ 15 | "real_time", 16 | "cpu_time", 17 | "bytes_per_second", 18 | "items_per_second", 19 | "iterations", 20 | ] 21 | TRANSFORMS = {"": lambda x: x, "inverse": lambda x: 1.0 / x} 22 | 23 | 24 | def get_default_ylabel(args): 25 | """Compute default ylabel for commandline args""" 26 | label = "" 27 | if args.transform == "": 28 | label = args.metric 29 | else: 30 | label = args.transform + "(" + args.metric + ")" 31 | if args.relative_to is not None: 32 | label += " relative to %s" % args.relative_to 33 | return label 34 | 35 | 36 | def parse_args(): 37 | """Parse commandline arguments""" 38 | parser = argparse.ArgumentParser(description="Visualize google-benchmark output") 39 | parser.add_argument( 40 | "-f", 41 | metavar="FILE", 42 | type=argparse.FileType("r"), 43 | default=sys.stdin, 44 | dest="file", 45 | help="path to file containing the csv or json benchmark data", 46 | ) 47 | parser.add_argument( 48 | "-m", 49 | metavar="METRIC", 50 | choices=METRICS, 51 | default=METRICS[0], 52 | dest="metric", 53 | help="metric to plot on the y-axis, valid choices are: %s" % ", ".join(METRICS), 54 | ) 55 | parser.add_argument( 56 | "-t", 57 | metavar="TRANSFORM", 58 | choices=TRANSFORMS.keys(), 59 | default="", 60 | help="transform to apply to the chosen metric, valid choices are: %s" 61 | % ", ".join(list(TRANSFORMS)), 62 | dest="transform", 63 | ) 64 | parser.add_argument( 65 | "-r", 66 | metavar="RELATIVE_TO", 67 | type=str, 68 | default=None, 69 | dest="relative_to", 70 | help="plot metrics relative to this label", 71 | ) 72 | parser.add_argument( 73 | "--xlabel", type=str, default="input size", help="label of the x-axis" 74 | ) 75 | parser.add_argument("--ylabel", type=str, help="label of the y-axis") 76 | parser.add_argument("--title", type=str, default="", help="title of the plot") 77 | parser.add_argument( 78 | "--logx", action="store_true", help="plot x-axis on a logarithmic scale" 79 | ) 80 | parser.add_argument( 81 | "--logy", action="store_true", help="plot y-axis on a logarithmic scale" 82 | ) 83 | parser.add_argument( 84 | "--output", type=str, default="", help="File in which to save the graph" 85 | ) 86 | 87 | args = parser.parse_args() 88 | if args.ylabel is None: 89 | args.ylabel = get_default_ylabel(args) 90 | return args 91 | 92 | 93 | def parse_input_size(name): 94 | splits = name.split("/") 95 | if len(splits) == 1: 96 | return 1 97 | return int(splits[1]) 98 | 99 | 100 | def read_data(args): 101 | """Read and process dataframe using commandline args""" 102 | extension = pathlib.Path(args.file.name).suffix 103 | try: 104 | if extension == ".csv": 105 | data = pd.read_csv(args.file, usecols=["name", args.metric]) 106 | elif extension == ".json": 107 | json_data = json.load(args.file) 108 | data = pd.DataFrame(json_data["benchmarks"]) 109 | else: 110 | logging.error("Unsupported file extension '{}'".format(extension)) 111 | exit(1) 112 | except ValueError: 113 | logging.error( 114 | 'Could not parse the benchmark data. Did you forget "--benchmark_format=[csv|json] when running the benchmark"?' 115 | ) 116 | exit(1) 117 | data["label"] = data["name"].apply(lambda x: x.split("/")[0]) 118 | data["input"] = data["name"].apply(parse_input_size) 119 | data[args.metric] = data[args.metric].apply(TRANSFORMS[args.transform]) 120 | return data 121 | 122 | 123 | def plot_groups(label_groups, args): 124 | """Display the processed data""" 125 | for label, group in label_groups.items(): 126 | plt.plot(group["input"], group[args.metric], label=label, marker=".") 127 | if args.logx: 128 | plt.xscale("log") 129 | if args.logy: 130 | plt.yscale("log") 131 | plt.xlabel(args.xlabel) 132 | plt.ylabel(args.ylabel) 133 | plt.title(args.title) 134 | plt.legend() 135 | if args.output: 136 | logging.info("Saving to %s" % args.output) 137 | plt.savefig(args.output) 138 | else: 139 | plt.show() 140 | 141 | 142 | def main(): 143 | """Entry point of the program""" 144 | args = parse_args() 145 | data = read_data(args) 146 | label_groups = {} 147 | for label, group in data.groupby("label"): 148 | label_groups[label] = group.set_index("input", drop=False) 149 | if args.relative_to is not None: 150 | try: 151 | baseline = label_groups[args.relative_to][args.metric].copy() 152 | except KeyError as key: 153 | msg = "Key %s is not present in the benchmark output" 154 | logging.error(msg, str(key)) 155 | exit(1) 156 | 157 | if args.relative_to is not None: 158 | for label in label_groups: 159 | label_groups[label][args.metric] /= baseline 160 | plot_groups(label_groups, args) 161 | 162 | 163 | if __name__ == "__main__": 164 | main() 165 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | pandas 3 | --------------------------------------------------------------------------------