• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2# Copyright 2017 The PDFium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5"""Looks for performance regressions on all pushes since the last run.
6
7Run this nightly to have a periodical check for performance regressions.
8
9Stores the results for each run and last checkpoint in a results directory.
10"""
11
12import argparse
13import datetime
14import json
15import os
16import sys
17
18# pylint: disable=relative-import
19from common import PrintWithTime
20from common import RunCommandPropagateErr
21from githelper import GitHelper
22from safetynet_conclusions import PrintConclusionsDictHumanReadable
23
24
25class JobContext(object):
26  """Context for a single run, including name and directory paths."""
27
28  def __init__(self, args):
29    self.datetime = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
30    self.results_dir = args.results_dir
31    self.last_revision_covered_file = os.path.join(self.results_dir,
32                                                   'last_revision_covered')
33    self.run_output_dir = os.path.join(self.results_dir,
34                                       'profiles_%s' % self.datetime)
35    self.run_output_log_file = os.path.join(self.results_dir,
36                                            '%s.log' % self.datetime)
37
38
39class JobRun(object):
40  """A single run looking for regressions since the last one."""
41
42  def __init__(self, args, context):
43    """Constructor.
44
45    Args:
46      args: Namespace with arguments passed to the script.
47      context: JobContext for this run.
48    """
49    self.git = GitHelper()
50    self.args = args
51    self.context = context
52
53  def Run(self):
54    """Searches for regressions.
55
56    Will only write a checkpoint when first run, and on all subsequent runs
57    a comparison is done against the last checkpoint.
58
59    Returns:
60      Exit code for the script: 0 if no significant changes are found; 1 if
61      there was an error in the comparison; 3 if there was a regression; 4 if
62      there was an improvement and no regression.
63    """
64    pdfium_src_dir = os.path.join(
65        os.path.dirname(__file__), os.path.pardir, os.path.pardir)
66    os.chdir(pdfium_src_dir)
67
68    branch_to_restore = self.git.GetCurrentBranchName()
69
70    if not self.args.no_checkout:
71      self.git.FetchOriginMaster()
72      self.git.Checkout('origin/master')
73
74    # Make sure results dir exists
75    if not os.path.exists(self.context.results_dir):
76      os.makedirs(self.context.results_dir)
77
78    if not os.path.exists(self.context.last_revision_covered_file):
79      result = self._InitialRun()
80    else:
81      with open(self.context.last_revision_covered_file) as f:
82        last_revision_covered = f.read().strip()
83      result = self._IncrementalRun(last_revision_covered)
84
85    self.git.Checkout(branch_to_restore)
86    return result
87
88  def _InitialRun(self):
89    """Initial run, just write a checkpoint.
90
91    Returns:
92      Exit code for the script.
93    """
94    current = self.git.GetCurrentBranchHash()
95
96    PrintWithTime('Initial run, current is %s' % current)
97
98    self._WriteCheckpoint(current)
99
100    PrintWithTime('All set up, next runs will be incremental and perform '
101                  'comparisons')
102    return 0
103
104  def _IncrementalRun(self, last_revision_covered):
105    """Incremental run, compare against last checkpoint and update it.
106
107    Args:
108      last_revision_covered: String with hash for last checkpoint.
109
110    Returns:
111      Exit code for the script.
112    """
113    current = self.git.GetCurrentBranchHash()
114
115    PrintWithTime('Incremental run, current is %s, last is %s' %
116                  (current, last_revision_covered))
117
118    if not os.path.exists(self.context.run_output_dir):
119      os.makedirs(self.context.run_output_dir)
120
121    if current == last_revision_covered:
122      PrintWithTime('No changes seen, finishing job')
123      output_info = {
124          'metadata':
125              self._BuildRunMetadata(last_revision_covered, current, False)
126      }
127      self._WriteRawJson(output_info)
128      return 0
129
130    # Run compare
131    cmd = [
132        'testing/tools/safetynet_compare.py', '--this-repo',
133        '--machine-readable',
134        '--branch-before=%s' % last_revision_covered,
135        '--output-dir=%s' % self.context.run_output_dir
136    ]
137    cmd.extend(self.args.input_paths)
138
139    json_output = RunCommandPropagateErr(cmd)
140
141    if json_output is None:
142      return 1
143
144    output_info = json.loads(json_output)
145
146    run_metadata = self._BuildRunMetadata(last_revision_covered, current, True)
147    output_info.setdefault('metadata', {}).update(run_metadata)
148    self._WriteRawJson(output_info)
149
150    PrintConclusionsDictHumanReadable(
151        output_info,
152        colored=(not self.args.output_to_log and not self.args.no_color),
153        key='after')
154
155    status = 0
156
157    if output_info['summary']['improvement']:
158      PrintWithTime('Improvement detected.')
159      status = 4
160
161    if output_info['summary']['regression']:
162      PrintWithTime('Regression detected.')
163      status = 3
164
165    if status == 0:
166      PrintWithTime('Nothing detected.')
167
168    self._WriteCheckpoint(current)
169
170    return status
171
172  def _WriteRawJson(self, output_info):
173    json_output_file = os.path.join(self.context.run_output_dir, 'raw.json')
174    with open(json_output_file, 'w') as f:
175      json.dump(output_info, f)
176
177  def _BuildRunMetadata(self, revision_before, revision_after,
178                        comparison_performed):
179    return {
180        'datetime': self.context.datetime,
181        'revision_before': revision_before,
182        'revision_after': revision_after,
183        'comparison_performed': comparison_performed,
184    }
185
186  def _WriteCheckpoint(self, checkpoint):
187    if not self.args.no_checkpoint:
188      with open(self.context.last_revision_covered_file, 'w') as f:
189        f.write(checkpoint + '\n')
190
191
192def main():
193  parser = argparse.ArgumentParser()
194  parser.add_argument('results_dir', help='where to write the job results')
195  parser.add_argument(
196      'input_paths',
197      nargs='+',
198      help='pdf files or directories to search for pdf files '
199      'to run as test cases')
200  parser.add_argument(
201      '--no-checkout',
202      action='store_true',
203      help='whether to skip checking out origin/master. Use '
204      'for script debugging.')
205  parser.add_argument(
206      '--no-checkpoint',
207      action='store_true',
208      help='whether to skip writing the new checkpoint. Use '
209      'for script debugging.')
210  parser.add_argument(
211      '--no-color',
212      action='store_true',
213      help='whether to write output without color escape '
214      'codes.')
215  parser.add_argument(
216      '--output-to-log',
217      action='store_true',
218      help='whether to write output to a log file')
219  args = parser.parse_args()
220
221  job_context = JobContext(args)
222
223  if args.output_to_log:
224    log_file = open(job_context.run_output_log_file, 'w')
225    sys.stdout = log_file
226    sys.stderr = log_file
227
228  run = JobRun(args, job_context)
229  result = run.Run()
230
231  if args.output_to_log:
232    log_file.close()
233
234  return result
235
236
237if __name__ == '__main__':
238  sys.exit(main())
239