Browse Source

Format perf2tex

jonathanvdc 8 years ago
parent
commit
7453e50742
1 changed files with 53 additions and 22 deletions
  1. 53 22
      performance/perf2tex.py

+ 53 - 22
performance/perf2tex.py

@@ -28,18 +28,21 @@ LATEX_DOCUMENT_HEADER = r"""\begin{document}
 LATEX_DOCUMENT_FOOTER = r"""\end{tikzpicture}
 LATEX_DOCUMENT_FOOTER = r"""\end{tikzpicture}
 \end{document}"""
 \end{document}"""
 
 
+
 def encode_latex_string(value):
 def encode_latex_string(value):
     """Encodes the given string as a LaTeX string."""
     """Encodes the given string as a LaTeX string."""
     # I guess this is good enough for now. This may need to be
     # I guess this is good enough for now. This may need to be
     # revisited if we encounter more complicated names.
     # revisited if we encounter more complicated names.
     return '{%s}' % value.replace('_', '\\_')
     return '{%s}' % value.replace('_', '\\_')
 
 
+
 def assemble_latex_chart(optimization_levels, color_defs, test_names, data):
 def assemble_latex_chart(optimization_levels, color_defs, test_names, data):
     """Assembles a LaTeX chart from the given components."""
     """Assembles a LaTeX chart from the given components."""
     lines = []
     lines = []
     lines.append(LATEX_HEADER)
     lines.append(LATEX_HEADER)
     for color_name, (red, green, blue) in color_defs:
     for color_name, (red, green, blue) in color_defs:
-        lines.append(r'\definecolor{%s}{HTML}{%02X%02X%02X}' % (color_name, red, green, blue))
+        lines.append(r'\definecolor{%s}{HTML}{%02X%02X%02X}' %
+                     (color_name, red, green, blue))
     lines.append(LATEX_DOCUMENT_HEADER)
     lines.append(LATEX_DOCUMENT_HEADER)
     lines.append(r"""
     lines.append(r"""
     \begin{axis}[
     \begin{axis}[
@@ -66,17 +69,18 @@ def assemble_latex_chart(optimization_levels, color_defs, test_names, data):
     for color_name, points in data:
     for color_name, points in data:
         lines.append(r"""
         lines.append(r"""
         \addplot[style={%s,fill=%s,mark=none}]
         \addplot[style={%s,fill=%s,mark=none}]
-            coordinates {%s};""" % (
-                color_name, color_name,
-                ' '.join([('(%s,%s)' % (encode_latex_string(name), measurement))
-                          for name, measurement in points])))
+            coordinates {%s};""" % (color_name, color_name, ' '.join(
+            [('(%s,%s)' % (encode_latex_string(name), measurement))
+             for name, measurement in points])))
     lines.append(r"""
     lines.append(r"""
-        \legend{%s}""" % ','.join(map(encode_latex_string, optimization_levels)))
+        \legend{%s}""" %
+                 ','.join(map(encode_latex_string, optimization_levels)))
     lines.append(r"""
     lines.append(r"""
     \end{axis}""")
     \end{axis}""")
     lines.append(LATEX_DOCUMENT_FOOTER)
     lines.append(LATEX_DOCUMENT_FOOTER)
     return '\n'.join(lines)
     return '\n'.join(lines)
 
 
+
 def create_latex_chart(perf_data, sorted_opt_levels=None):
 def create_latex_chart(perf_data, sorted_opt_levels=None):
     """Creates a LaTeX chart for the given performance data."""
     """Creates a LaTeX chart for the given performance data."""
     if sorted_opt_levels is None:
     if sorted_opt_levels is None:
@@ -100,6 +104,7 @@ def create_latex_chart(perf_data, sorted_opt_levels=None):
 
 
     return assemble_latex_chart(opt_levels, color_defs, test_names, data)
     return assemble_latex_chart(opt_levels, color_defs, test_names, data)
 
 
+
 def get_mean_runtimes(perf_data):
 def get_mean_runtimes(perf_data):
     """Computes the mean run-time of every optimization level in the given
     """Computes the mean run-time of every optimization level in the given
        performance data."""
        performance data."""
@@ -108,6 +113,7 @@ def get_mean_runtimes(perf_data):
         for opt_level in perf_data.keys()
         for opt_level in perf_data.keys()
     }
     }
 
 
+
 def get_baseline_optimization_level(perf_data):
 def get_baseline_optimization_level(perf_data):
     """Gets a baseline optimization level from the given performance data.
     """Gets a baseline optimization level from the given performance data.
        This baseline optimization level is guaranteed to be for every test case.
        This baseline optimization level is guaranteed to be for every test case.
@@ -128,7 +134,9 @@ def get_baseline_optimization_level(perf_data):
         return None
         return None
 
 
     # Pick the optimization level with the lowest total run-time as the baseline.
     # Pick the optimization level with the lowest total run-time as the baseline.
-    return min(candidate_opt_levels, key=lambda opt_level: sum(perf_data[opt_level].values()))
+    return min(candidate_opt_levels,
+               key=lambda opt_level: sum(perf_data[opt_level].values()))
+
 
 
 def get_relative_measurements(perf_data, baseline_optimization_level):
 def get_relative_measurements(perf_data, baseline_optimization_level):
     """Computes a map of measurements that are relative to the given optimization level."""
     """Computes a map of measurements that are relative to the given optimization level."""
@@ -141,13 +149,17 @@ def get_relative_measurements(perf_data, baseline_optimization_level):
 
 
     return results
     return results
 
 
+
 def perf_list_to_dict(perf_list):
 def perf_list_to_dict(perf_list):
     """Converts performance data from a list representation to a dictionary representation."""
     """Converts performance data from a list representation to a dictionary representation."""
     return {opt_level: dict(tests) for opt_level, tests in perf_list}
     return {opt_level: dict(tests) for opt_level, tests in perf_list}
 
 
+
 def perf_dict_to_list(perf_dict):
 def perf_dict_to_list(perf_dict):
     """Converts performance data from a dictionary representation to a list representation."""
     """Converts performance data from a dictionary representation to a list representation."""
-    return [(opt_level, tests.items()) for opt_level, tests in perf_dict.items()]
+    return [(opt_level, tests.items())
+            for opt_level, tests in perf_dict.items()]
+
 
 
 def interpolate(value_range, index, length):
 def interpolate(value_range, index, length):
     """Uses an index and a length to interpolate in the given range."""
     """Uses an index and a length to interpolate in the given range."""
@@ -157,13 +169,19 @@ def interpolate(value_range, index, length):
     else:
     else:
         return min_val + float(index) * (max_val - min_val) / float(length - 1)
         return min_val + float(index) * (max_val - min_val) / float(length - 1)
 
 
+
 def sort_by_runtime(perf_data):
 def sort_by_runtime(perf_data):
     """Sorts the optimization levels by mean relative runtimes."""
     """Sorts the optimization levels by mean relative runtimes."""
     baseline_opt_level = get_baseline_optimization_level(perf_data)
     baseline_opt_level = get_baseline_optimization_level(perf_data)
     relative_perf = get_relative_measurements(perf_data, baseline_opt_level)
     relative_perf = get_relative_measurements(perf_data, baseline_opt_level)
     # Sort the optimization levels by their mean runtimes.
     # Sort the optimization levels by their mean runtimes.
     mean_runtimes = get_mean_runtimes(relative_perf)
     mean_runtimes = get_mean_runtimes(relative_perf)
-    return list(sorted(mean_runtimes.keys(), key=lambda opt_level: mean_runtimes[opt_level], reverse=True))
+    return list(
+        sorted(
+            mean_runtimes.keys(),
+            key=lambda opt_level: mean_runtimes[opt_level],
+            reverse=True))
+
 
 
 def generate_color_scheme(sorted_opt_levels):
 def generate_color_scheme(sorted_opt_levels):
     """Assigns a color to every optimization level in the given performance data."""
     """Assigns a color to every optimization level in the given performance data."""
@@ -177,27 +195,39 @@ def generate_color_scheme(sorted_opt_levels):
         hue = interpolate((min_hue, max_hue), i, len(sorted_opt_levels))
         hue = interpolate((min_hue, max_hue), i, len(sorted_opt_levels))
         sat = interpolate((min_sat, max_sat), i, len(sorted_opt_levels))
         sat = interpolate((min_sat, max_sat), i, len(sorted_opt_levels))
         val = interpolate((min_val, max_val), i, len(sorted_opt_levels))
         val = interpolate((min_val, max_val), i, len(sorted_opt_levels))
-        color = [component * 255 for component in colorsys.hsv_to_rgb(hue, sat, val)]
+        color = [
+            component * 255 for component in colorsys.hsv_to_rgb(hue, sat, val)
+        ]
         color_scheme[opt_level] = color
         color_scheme[opt_level] = color
 
 
     return color_scheme
     return color_scheme
 
 
+
 def main():
 def main():
     arg_parser = argparse.ArgumentParser()
     arg_parser = argparse.ArgumentParser()
     arg_parser.add_argument('input', help='The performance data file.')
     arg_parser.add_argument('input', help='The performance data file.')
     arg_parser.add_argument(
     arg_parser.add_argument(
-        '-q', '--quantity', type=str,
-        help="The quantity to build a bar chart for. Defaults to '%s'" % utils.TOTAL_TIME_QUANTITY,
+        '-q',
+        '--quantity',
+        type=str,
+        help="The quantity to build a bar chart for. Defaults to '%s'" %
+        utils.TOTAL_TIME_QUANTITY,
         default=utils.TOTAL_TIME_QUANTITY)
         default=utils.TOTAL_TIME_QUANTITY)
     arg_parser.add_argument(
     arg_parser.add_argument(
-        '-O', '--opt', type=str, nargs='*',
+        '-O',
+        '--opt',
+        type=str,
+        nargs='*',
         help="Filters on optimization levels.")
         help="Filters on optimization levels.")
     arg_parser.add_argument(
     arg_parser.add_argument(
-        '-t', '--test', type=str, nargs='*',
-        help="Filters on tests.")
+        '-t', '--test', type=str, nargs='*', help="Filters on tests.")
     arg_parser.add_argument(
     arg_parser.add_argument(
-        '-r', '--relative', action='store_const', const=True,
-        help="Produce bars that are relative to some baseline.", default=False)
+        '-r',
+        '--relative',
+        action='store_const',
+        const=True,
+        help="Produce bars that are relative to some baseline.",
+        default=False)
 
 
     args = arg_parser.parse_args()
     args = arg_parser.parse_args()
 
 
@@ -206,10 +236,9 @@ def main():
 
 
     if args.opt:
     if args.opt:
         optimization_set = set(args.opt)
         optimization_set = set(args.opt)
-        perf_data = [
-            (optimization_level, measurements)
-            for optimization_level, measurements in perf_data
-            if optimization_level in optimization_set]
+        perf_data = [(optimization_level, measurements)
+                     for optimization_level, measurements in perf_data
+                     if optimization_level in optimization_set]
         sorted_opt_levels = list(args.opt)
         sorted_opt_levels = list(args.opt)
 
 
     if args.test:
     if args.test:
@@ -229,9 +258,11 @@ def main():
 
 
     if args.relative:
     if args.relative:
         baseline_opt_level = get_baseline_optimization_level(perf_data_dict)
         baseline_opt_level = get_baseline_optimization_level(perf_data_dict)
-        perf_data_dict = get_relative_measurements(perf_data_dict, baseline_opt_level)
+        perf_data_dict = get_relative_measurements(perf_data_dict,
+                                                   baseline_opt_level)
 
 
     print(create_latex_chart(perf_data_dict, sorted_opt_levels))
     print(create_latex_chart(perf_data_dict, sorted_opt_levels))
 
 
+
 if __name__ == '__main__':
 if __name__ == '__main__':
     main()
     main()