• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1"""
2Static Analyzer qualification infrastructure.
3
4This source file contains all the functionality related to benchmarking
5the analyzer on a set projects.  Right now, this includes measuring
6execution time and peak memory usage.  Benchmark runs analysis on every
7project multiple times to get a better picture about the distribution
8of measured values.
9
10Additionally, this file includes a comparison routine for two benchmarking
11results that plots the result together on one chart.
12"""
13
14import SATestUtils as utils
15from SATestBuild import ProjectTester, stdout, TestInfo
16from ProjectMap import ProjectInfo
17
18import pandas as pd
19from typing import List, Tuple
20
21
22INDEX_COLUMN = "index"
23
24
25def _save(data: pd.DataFrame, file_path: str):
26    data.to_csv(file_path, index_label=INDEX_COLUMN)
27
28
29def _load(file_path: str) -> pd.DataFrame:
30    return pd.read_csv(file_path, index_col=INDEX_COLUMN)
31
32
33class Benchmark:
34    """
35    Becnhmark class encapsulates one functionality: it runs the analysis
36    multiple times for the given set of projects and stores results in the
37    specified file.
38    """
39    def __init__(self, projects: List[ProjectInfo], iterations: int,
40                 output_path: str):
41        self.projects = projects
42        self.iterations = iterations
43        self.out = output_path
44
45    def run(self):
46        results = [self._benchmark_project(project)
47                   for project in self.projects]
48
49        data = pd.concat(results, ignore_index=True)
50        _save(data, self.out)
51
52    def _benchmark_project(self, project: ProjectInfo) -> pd.DataFrame:
53        if not project.enabled:
54            stdout(f" \n\n--- Skipping disabled project {project.name}\n")
55            return
56
57        stdout(f" \n\n--- Benchmarking project {project.name}\n")
58
59        test_info = TestInfo(project)
60        tester = ProjectTester(test_info, silent=True)
61        project_dir = tester.get_project_dir()
62        output_dir = tester.get_output_dir()
63
64        raw_data = []
65
66        for i in range(self.iterations):
67            stdout(f"Iteration #{i + 1}")
68            time, mem = tester.build(project_dir, output_dir)
69            raw_data.append({"time": time, "memory": mem,
70                             "iteration": i, "project": project.name})
71            stdout(f"time: {utils.time_to_str(time)}, "
72                   f"peak memory: {utils.memory_to_str(mem)}")
73
74        return pd.DataFrame(raw_data)
75
76
77def compare(old_path: str, new_path: str, plot_file: str):
78    """
79    Compare two benchmarking results stored as .csv files
80    and produce a plot in the specified file.
81    """
82    old = _load(old_path)
83    new = _load(new_path)
84
85    old_projects = set(old["project"])
86    new_projects = set(new["project"])
87    common_projects = old_projects & new_projects
88
89    # Leave only rows for projects common to both dataframes.
90    old = old[old["project"].isin(common_projects)]
91    new = new[new["project"].isin(common_projects)]
92
93    old, new = _normalize(old, new)
94
95    # Seaborn prefers all the data to be in one dataframe.
96    old["kind"] = "old"
97    new["kind"] = "new"
98    data = pd.concat([old, new], ignore_index=True)
99
100    # TODO: compare data in old and new dataframes using statistical tests
101    #       to check if they belong to the same distribution
102    _plot(data, plot_file)
103
104
105def _normalize(old: pd.DataFrame,
106               new: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
107    # This creates a dataframe with all numerical data averaged.
108    means = old.groupby("project").mean()
109    return _normalize_impl(old, means), _normalize_impl(new, means)
110
111
112def _normalize_impl(data: pd.DataFrame, means: pd.DataFrame):
113    # Right now 'means' has one row corresponding to one project,
114    # while 'data' has N rows for each project (one for each iteration).
115    #
116    # In order for us to work easier with this data, we duplicate
117    # 'means' data to match the size of the 'data' dataframe.
118    #
119    # All the columns from 'data' will maintain their names, while
120    # new columns coming from 'means' will have "_mean" suffix.
121    joined_data = data.merge(means, on="project", suffixes=("", "_mean"))
122    _normalize_key(joined_data, "time")
123    _normalize_key(joined_data, "memory")
124    return joined_data
125
126
127def _normalize_key(data: pd.DataFrame, key: str):
128    norm_key = _normalized_name(key)
129    mean_key = f"{key}_mean"
130    data[norm_key] = data[key] / data[mean_key]
131
132
133def _normalized_name(name: str) -> str:
134    return f"normalized {name}"
135
136
137def _plot(data: pd.DataFrame, plot_file: str):
138    import matplotlib
139    import seaborn as sns
140    from matplotlib import pyplot as plt
141
142    sns.set_style("whitegrid")
143    # We want to have time and memory charts one above the other.
144    figure, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 6))
145
146    def _subplot(key: str, ax: matplotlib.axes.Axes):
147        sns.boxplot(x="project", y=_normalized_name(key), hue="kind",
148                    data=data, palette=sns.color_palette("BrBG", 2), ax=ax)
149
150    _subplot("time", ax1)
151    # No need to have xlabels on both top and bottom charts.
152    ax1.set_xlabel("")
153
154    _subplot("memory", ax2)
155    # The legend on the top chart is enough.
156    ax2.get_legend().remove()
157
158    figure.savefig(plot_file)
159