diff --git a/README.md b/README.md index 4a6b8db6..e7882db3 100644 --- a/README.md +++ b/README.md @@ -56,6 +56,27 @@ use: ```bash python -m report.console_tables --storage results/0001_bench.json ``` +To see `games/applications` results as a bar graph, run: +```bash +python visualization/view.py view +``` +For example: +```bash +python visualization/view.py view results/0001_bench.json fidelity +``` +This will compare each method of the benchmark using the datatype as a metric. + +To compare two results files with a bar graph, run: +```bash +python visualization/view.py compare +``` +For example: +```bash +python visualization/view.py compare results/0001_bench.json results/0002_bench.json fidelity +``` +This will average out the datatype metric for each file, and compare the two. + +Currently supported datatypes are `fidelity`, which gives average fidelity for each benchmark, and `meantime`, which gives the average time for each benchmark. ## Warning This code is still under development. There are many razer sharp edges. diff --git a/visualization/view.py b/visualization/view.py new file mode 100644 index 00000000..222d70be --- /dev/null +++ b/visualization/view.py @@ -0,0 +1,153 @@ +import sys +import json +from turtle import color +import matplotlib.pyplot as plt +from numpy import mean + + +def fidelity(contents, contents2 = None): + if not contents2: + try: + bench_name = {} + bench_fid= {} + for i,bench in enumerate(contents["benchmarks"]): + bench_name[bench["name"]] = i + 1 + if bench_fid.get(bench["name"]): + bench_fid[bench["name"]] = bench_fid.get(bench["name"]) + bench["stats"]["quality"]["fidelity"] + else: + bench_fid[bench["name"]] = bench["stats"]["quality"]["fidelity"] + fidavg = [] + for i in bench_fid: + fidavg.append(bench_fid[i]/bench_name[i]) + return bench_name.keys(), fidavg + except Exception as e: + print("Uh, Oh! Something went wrong") + print(e) + else: + #try: + bench_names = ["Benchmark #1", "Benchmark #2"] + bench_fid = {"Benchmark #1" : 0 , "Benchmark #2" : 0} + count1 = 0 + count2 = 0 + avgs = [] + for i, bench in enumerate(contents["benchmarks"]): + count1+=1 + bench_fid["Benchmark #1"] = bench_fid.get("Benchmark #1") + bench["stats"]["quality"]["fidelity"] + + for i, bench in enumerate(contents2["benchmarks"]): + count2+=1 + bench_fid["Benchmark #2"] = bench_fid.get("Benchmark #2") + bench["stats"]["quality"]["fidelity"] + + avgs.append(bench_fid["Benchmark #1"]/count1) + avgs.append(bench_fid["Benchmark #2"]/count2) + print(avgs) + return bench_names, avgs + #except Exception as e: + #print("Uh, Oh! Something went wrong") + #print(e) + +def meantime(contents, contents2 = None): + if not contents2: + try: + bench_name = {} + bench_mean= {} + for i,bench in enumerate(contents["benchmarks"]): + bench_name[bench["name"]] = i + 1 + if bench_mean.get(bench["name"]): + bench_mean[bench["name"]] = bench_mean.get(bench["name"]) + bench["stats"]["timing"]["mean"] + else: + bench_mean[bench["name"]] = bench["stats"]["timing"]["mean"] + timeavg = [] + for i in bench_mean: + timeavg.append(bench_mean[i]/bench_name[i]) + return bench_name.keys(), timeavg + except Exception as e: + print("Uh, Oh! Something went wrong") + print(e) + else: + try: + bench_names = ["Benchmark #1", "Benchmark #2"] + bench_times = {"Benchmark #1" : 0, "Benchmark #2" : 0} + count1 = 0 + count2 = 0 + avgs = [] + for bench in enumerate(contents["benchmarks"]): + count1+=1 + bench_times["Benchmark #1"] += bench["stats"]["timing"]["mean"] + + for bench in enumerate(contents2["benchmarks"]): + count2+=1 + bench_times["Benchmark #1"] += bench["stats"]["timing"]["mean"] + + avgs.append(bench_times["Benchmark #1"]/count1) + avgs.append(bench_times["Benchmark #2"]/count2) + return bench_names, avgs + except Exception as e: + print("Uh, Oh! Something went wrong") + print(e) + + + + +def main(args): + #arg parsing + if len(args) == 4: + viewtype = args[1] + file = args[2] + datatype = args[3] + if len(args) == 5: + viewtype = args[1] + file = args[2] + file2 = args[3] + datatype = args[4] + if viewtype == "view": + results_file = file + with open(results_file, 'r') as f: + contents = json.load(f) + if datatype == "fidelity": + x_axis, y_axis = fidelity(contents) + ax = plt.subplot() + plt.bar(x_axis, y_axis, color="darkolivegreen") + plt.xlabel("Benchmark Name") + plt.ylabel("Fidelity") + plt.setp(ax.get_xticklabels(), rotation=30, ha='right') + plt.show() + elif datatype == "meantime": + x_axis, y_axis = meantime(contents) + ax = plt.subplot() + plt.bar(x_axis, y_axis, color="darkolivegreen") + plt.xlabel("Benchmark Name") + plt.ylabel("Mean Time") + plt.setp(ax.get_xticklabels(), rotation=30, ha='right') + plt.show() + else: + print("Invalid Data Type! Allowed parameters are: 'fidelity' and 'meantime'") + if viewtype == "compare": + with open(file, 'r') as f: + contents1 = json.load(f) + with open(file2, 'r') as f: + contents2 = json.load(f) + if datatype == "fidelity": + x_axis, y_axis = fidelity(contents1, contents2) + ax = plt.subplot() + plt.bar(x_axis, y_axis, color="darkolivegreen") + plt.xlabel("Benchmark Name") + plt.ylabel("Fidelity") + plt.setp(ax.get_xticklabels(), rotation=30, ha='right') + plt.show() + elif datatype == "meantime": + x_axis, y_axis = meantime(contents1, contents2) + ax = plt.subplot() + plt.bar(x_axis, y_axis, color="darkolivegreen") + plt.xlabel("Benchmark Name") + plt.ylabel("Mean Time") + plt.setp(ax.get_xticklabels(), rotation=30, ha='right') + plt.show() + else: + print("Invalid Data Type! Allowed parameters are: 'fidelity' and 'meantime'") +if __name__ == "__main__": + main(sys.argv) + + + +