• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2# Copyright (C) 2018 The Android Open Source Project
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#      http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16from __future__ import absolute_import
17from __future__ import division
18from __future__ import print_function
19
20import argparse
21import datetime
22import difflib
23import glob
24import importlib
25import json
26import os
27import re
28import subprocess
29import sys
30import tempfile
31
32from itertools import chain
33from google.protobuf import reflection, text_format
34
35from proto_utils import create_message_factory, serialize_textproto_trace, serialize_python_trace
36
37ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
38ENV = {
39    'PERFETTO_BINARY_PATH': os.path.join(ROOT_DIR, 'test', 'data'),
40}
41if sys.platform.startswith('linux'):
42  ENV['PATH'] = os.path.join(ROOT_DIR, 'buildtools', 'linux64', 'clang', 'bin')
43elif sys.platform.startswith('dawin'):
44  # Sadly, on macOS we need to check out the Android deps to get
45  # llvm symbolizer.
46  ENV['PATH'] = os.path.join(ROOT_DIR, 'buildtools', 'ndk', 'toolchains',
47                             'llvm', 'prebuilt', 'darwin-x86_64', 'bin')
48elif sys.platform.startswith('win32'):
49  ENV['PATH'] = os.path.join(ROOT_DIR, 'buildtools', 'win', 'clang', 'bin')
50
51
52class Test(object):
53
54  def __init__(self, type, trace_path, query_path_or_metric, expected_path):
55    self.type = type
56    self.trace_path = trace_path
57    self.query_path_or_metric = query_path_or_metric
58    self.expected_path = expected_path
59
60
61class PerfResult(object):
62
63  def __init__(self, test_type, trace_path, query_path_or_metric,
64               ingest_time_ns_str, real_time_ns_str):
65    self.test_type = test_type
66    self.trace_path = trace_path
67    self.query_path_or_metric = query_path_or_metric
68    self.ingest_time_ns = int(ingest_time_ns_str)
69    self.real_time_ns = int(real_time_ns_str)
70
71
72class TestResult(object):
73
74  def __init__(self, test_type, input_name, trace, cmd, expected, actual,
75               stderr, exit_code):
76    self.test_type = test_type
77    self.input_name = input_name
78    self.trace = trace
79    self.cmd = cmd
80    self.expected = expected
81    self.actual = actual
82    self.stderr = stderr
83    self.exit_code = exit_code
84
85
86def create_metrics_message_factory(metrics_descriptor_paths):
87  return create_message_factory(metrics_descriptor_paths,
88                                'perfetto.protos.TraceMetrics')
89
90
91def write_diff(expected, actual):
92  expected_lines = expected.splitlines(True)
93  actual_lines = actual.splitlines(True)
94  diff = difflib.unified_diff(
95      expected_lines, actual_lines, fromfile='expected', tofile='actual')
96  for line in diff:
97    sys.stderr.write(line)
98
99
100def run_metrics_test(trace_processor_path, gen_trace_path, metric,
101                     expected_path, perf_path, metrics_message_factory):
102  with open(expected_path, 'r') as expected_file:
103    expected = expected_file.read()
104
105  json_output = os.path.basename(expected_path).endswith('.json.out')
106  cmd = [
107      trace_processor_path,
108      '--run-metrics',
109      metric,
110      '--metrics-output=%s' % ('json' if json_output else 'binary'),
111      '--perf-file',
112      perf_path,
113      gen_trace_path,
114  ]
115  tp = subprocess.Popen(
116      cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=ENV)
117  (stdout, stderr) = tp.communicate()
118
119  if json_output:
120    expected_text = expected
121    actual_text = stdout.decode('utf8')
122  else:
123    # Expected will be in text proto format and we'll need to parse it to a real
124    # proto.
125    expected_message = metrics_message_factory()
126    text_format.Merge(expected, expected_message)
127
128    # Actual will be the raw bytes of the proto and we'll need to parse it into
129    # a message.
130    actual_message = metrics_message_factory()
131    actual_message.ParseFromString(stdout)
132
133    # Convert both back to text format.
134    expected_text = text_format.MessageToString(expected_message)
135    actual_text = text_format.MessageToString(actual_message)
136
137  return TestResult('metric', metric, gen_trace_path, cmd, expected_text,
138                    actual_text, stderr.decode('utf8'), tp.returncode)
139
140
141def run_query_test(trace_processor_path, gen_trace_path, query_path,
142                   expected_path, perf_path):
143  with open(expected_path, 'r') as expected_file:
144    expected = expected_file.read()
145
146  cmd = [
147      trace_processor_path,
148      '-q',
149      query_path,
150      '--perf-file',
151      perf_path,
152      gen_trace_path,
153  ]
154
155  tp = subprocess.Popen(
156      cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=ENV)
157  (stdout, stderr) = tp.communicate()
158  return TestResult('query', query_path, gen_trace_path, cmd, expected,
159                    stdout.decode('utf8'), stderr.decode('utf8'), tp.returncode)
160
161
162def run_all_tests(trace_processor, trace_descriptor_path,
163                  extension_descriptor_paths, metrics_message_factory, tests,
164                  keep_input, rebase):
165  perf_data = []
166  test_failure = 0
167  rebased = 0
168  for test in tests:
169    trace_path = test.trace_path
170    expected_path = test.expected_path
171    if not os.path.exists(trace_path):
172      sys.stderr.write('Trace file not found {}\n'.format(trace_path))
173      test_failure += 1
174      continue
175    elif not os.path.exists(expected_path):
176      sys.stderr.write('Expected file not found {}\n'.format(expected_path))
177      test_failure += 1
178      continue
179
180    is_generated_trace = trace_path.endswith('.py') or trace_path.endswith(
181        '.textproto')
182    if trace_path.endswith('.py'):
183      gen_trace_file = tempfile.NamedTemporaryFile(delete=False)
184      serialize_python_trace(trace_descriptor_path, trace_path, gen_trace_file)
185      gen_trace_path = os.path.realpath(gen_trace_file.name)
186    elif trace_path.endswith('.textproto'):
187      gen_trace_file = tempfile.NamedTemporaryFile(delete=False)
188      serialize_textproto_trace(trace_descriptor_path,
189                                extension_descriptor_paths, trace_path,
190                                gen_trace_file)
191      gen_trace_path = os.path.realpath(gen_trace_file.name)
192    else:
193      gen_trace_file = None
194      gen_trace_path = trace_path
195
196    # We can't use delete=True here. When using that on Windwows, the resulting
197    # file is opened in exclusive mode (in turn that's a subtle side-effect of
198    # the underlying CreateFile(FILE_ATTRIBUTE_TEMPORARY)) and TP fails to open
199    # the passed path.
200    tmp_perf_file = tempfile.NamedTemporaryFile(delete=False)
201    sys.stderr.write('[ RUN      ] {} {}\n'.format(
202        os.path.basename(test.query_path_or_metric),
203        os.path.basename(trace_path)))
204
205    tmp_perf_path = tmp_perf_file.name
206    if test.type == 'queries':
207      query_path = test.query_path_or_metric
208
209      if not os.path.exists(test.query_path_or_metric):
210        print('Query file not found {}'.format(query_path))
211        test_failure += 1
212        continue
213
214      result = run_query_test(trace_processor, gen_trace_path, query_path,
215                              expected_path, tmp_perf_path)
216    elif test.type == 'metrics':
217      result = run_metrics_test(trace_processor, gen_trace_path,
218                                test.query_path_or_metric, expected_path,
219                                tmp_perf_path, metrics_message_factory)
220    else:
221      assert False
222
223    perf_lines = [line.decode('utf8') for line in tmp_perf_file.readlines()]
224    tmp_perf_file.close()
225    os.remove(tmp_perf_file.name)
226
227    if gen_trace_file:
228      if keep_input:
229        sys.stderr.write(
230            "Saving generated input trace: {}\n".format(gen_trace_path))
231      else:
232        gen_trace_file.close()
233        os.remove(gen_trace_path)
234
235    def write_cmdlines():
236      if is_generated_trace:
237        sys.stderr.write(
238            'Command to generate trace:\n'
239            'tools/serialize_test_trace.py --descriptor {} {} > {}\n'.format(
240                os.path.relpath(trace_descriptor_path, ROOT_DIR),
241                os.path.relpath(trace_path, ROOT_DIR),
242                os.path.relpath(gen_trace_path, ROOT_DIR)))
243      sys.stderr.write('Command line:\n{}\n'.format(' '.join(result.cmd)))
244
245    contents_equal = (
246        result.expected.replace('\r\n',
247                                '\n') == result.actual.replace('\r\n', '\n'))
248    if result.exit_code != 0 or not contents_equal:
249      sys.stderr.write(result.stderr)
250
251      if result.exit_code == 0:
252        sys.stderr.write(
253            'Expected did not match actual for trace {} and {} {}\n'.format(
254                trace_path, result.test_type, result.input_name))
255        sys.stderr.write('Expected file: {}\n'.format(expected_path))
256        write_cmdlines()
257        write_diff(result.expected, result.actual)
258      else:
259        write_cmdlines()
260
261      sys.stderr.write('[     FAIL ] {} {}\n'.format(
262          os.path.basename(test.query_path_or_metric),
263          os.path.basename(trace_path)))
264
265      if rebase:
266        if result.exit_code == 0:
267          sys.stderr.write('Rebasing {}\n'.format(expected_path))
268          with open(expected_path, 'w') as f:
269            f.write(result.actual)
270          rebase += 1
271        else:
272          sys.stderr.write(
273              'Rebase failed for {} as query failed\n'.format(expected_path))
274
275      test_failure += 1
276    else:
277      assert len(perf_lines) == 1
278      perf_numbers = perf_lines[0].split(',')
279
280      assert len(perf_numbers) == 2
281      perf_result = PerfResult(test.type, trace_path, test.query_path_or_metric,
282                               perf_numbers[0], perf_numbers[1])
283      perf_data.append(perf_result)
284
285      sys.stderr.write(
286          '[       OK ] {} {} (ingest: {} ms, query: {} ms)\n'.format(
287              os.path.basename(test.query_path_or_metric),
288              os.path.basename(trace_path),
289              perf_result.ingest_time_ns / 1000000,
290              perf_result.real_time_ns / 1000000))
291
292  return test_failure, perf_data, rebased
293
294
295def read_all_tests_from_index(index_path, query_metric_pattern, trace_pattern):
296  index_dir = os.path.dirname(index_path)
297
298  with open(index_path, 'r') as index_file:
299    index_lines = index_file.readlines()
300
301  tests = []
302  for line in index_lines:
303    stripped = line.strip()
304    if stripped.startswith('#'):
305      continue
306    elif not stripped:
307      continue
308
309    [trace_fname, query_fname_or_metric, expected_fname] = stripped.split(' ')
310    if not query_metric_pattern.match(os.path.basename(query_fname_or_metric)):
311      continue
312
313    if not trace_pattern.match(os.path.basename(trace_fname)):
314      continue
315
316    trace_path = os.path.abspath(os.path.join(index_dir, trace_fname))
317    expected_path = os.path.abspath(os.path.join(index_dir, expected_fname))
318
319    if query_fname_or_metric.endswith('.sql'):
320      test_type = 'queries'
321      query_path_or_metric = os.path.abspath(
322          os.path.join(index_dir, query_fname_or_metric))
323    else:
324      test_type = 'metrics'
325      query_path_or_metric = query_fname_or_metric
326
327    tests.append(
328        Test(test_type, trace_path, query_path_or_metric, expected_path))
329  return tests
330
331
332def read_all_tests(query_metric_pattern, trace_pattern):
333  include_index_dir = os.path.join(ROOT_DIR, 'test', 'trace_processor')
334  include_index = os.path.join(include_index_dir, 'include_index')
335  tests = []
336  with open(include_index, 'r') as include_file:
337    for index_relpath in include_file.readlines():
338      index_path = os.path.join(include_index_dir, index_relpath.strip())
339      tests.extend(
340          read_all_tests_from_index(index_path, query_metric_pattern,
341                                    trace_pattern))
342  return tests
343
344
345def main():
346  parser = argparse.ArgumentParser()
347  parser.add_argument('--test-type', type=str, default='all')
348  parser.add_argument('--trace-descriptor', type=str)
349  parser.add_argument('--metrics-descriptor', type=str)
350  parser.add_argument('--perf-file', type=str)
351  parser.add_argument(
352      '--query-metric-filter',
353      default='.*',
354      type=str,
355      help=
356      'Filter the name of query files or metrics to diff test (regex syntax)')
357  parser.add_argument(
358      '--trace-filter',
359      default='.*',
360      type=str,
361      help='Filter the name of trace files to diff test (regex syntax)')
362  parser.add_argument(
363      '--keep-input',
364      action='store_true',
365      help='Save the (generated) input pb file for debugging')
366  parser.add_argument(
367      '--rebase',
368      action='store_true',
369      help='Update the expected output file with the actual result')
370  parser.add_argument(
371      'trace_processor', type=str, help='location of trace processor binary')
372  args = parser.parse_args()
373
374  query_metric_pattern = re.compile(args.query_metric_filter)
375  trace_pattern = re.compile(args.trace_filter)
376
377  tests = read_all_tests(query_metric_pattern, trace_pattern)
378  sys.stderr.write('[==========] Running {} tests.\n'.format(len(tests)))
379
380  out_path = os.path.dirname(args.trace_processor)
381  if args.trace_descriptor:
382    trace_descriptor_path = args.trace_descriptor
383  else:
384    def find_trace_descriptor(parent):
385      trace_protos_path = os.path.join(parent, 'gen', 'protos', 'perfetto',
386                                       'trace')
387      return os.path.join(trace_protos_path, 'trace.descriptor')
388
389    trace_descriptor_path = find_trace_descriptor(out_path)
390    if not os.path.exists(trace_descriptor_path):
391      trace_descriptor_path = find_trace_descriptor(
392          os.path.join(out_path, 'gcc_like_host'))
393
394
395  if args.metrics_descriptor:
396    metrics_descriptor_paths = [args.metrics_descriptor]
397  else:
398    metrics_protos_path = os.path.join(out_path, 'gen', 'protos', 'perfetto',
399                                       'metrics')
400    metrics_descriptor_paths = [
401        os.path.join(metrics_protos_path, 'metrics.descriptor'),
402        os.path.join(metrics_protos_path, 'chrome',
403                     'all_chrome_metrics.descriptor')
404    ]
405
406  chrome_extensions = os.path.join(out_path, 'gen', 'protos', 'third_party',
407                                   'chromium', 'chrome_track_event.descriptor')
408  test_extensions = os.path.join(out_path, 'gen', 'protos', 'perfetto', 'trace',
409                                 'test_extensions.descriptor')
410
411  metrics_message_factory = create_metrics_message_factory(
412      metrics_descriptor_paths)
413
414  test_run_start = datetime.datetime.now()
415  test_failure, perf_data, rebased = run_all_tests(
416      args.trace_processor, trace_descriptor_path,
417      [chrome_extensions, test_extensions], metrics_message_factory, tests,
418      args.keep_input, args.rebase)
419  test_run_end = datetime.datetime.now()
420
421  sys.stderr.write('[==========] {} tests ran. ({} ms total)\n'.format(
422      len(tests), int((test_run_end - test_run_start).total_seconds() * 1000)))
423  sys.stderr.write('[  PASSED  ] {} tests.\n'.format(len(tests) - test_failure))
424  if args.rebase:
425    sys.stderr.write('{} tests rebased.\n'.format(rebased))
426
427  if test_failure == 0:
428    if args.perf_file:
429      test_dir = os.path.join(ROOT_DIR, 'test')
430      trace_processor_dir = os.path.join(test_dir, 'trace_processor')
431
432      metrics = []
433      sorted_data = sorted(
434          perf_data,
435          key=lambda x: (x.test_type, x.trace_path, x.query_path_or_metric))
436      for perf_args in sorted_data:
437        trace_short_path = os.path.relpath(perf_args.trace_path, test_dir)
438
439        query_short_path_or_metric = perf_args.query_path_or_metric
440        if perf_args.test_type == 'queries':
441          query_short_path_or_metric = os.path.relpath(
442              perf_args.query_path_or_metric, trace_processor_dir)
443
444        metrics.append({
445            'metric': 'tp_perf_test_ingest_time',
446            'value': float(perf_args.ingest_time_ns) / 1.0e9,
447            'unit': 's',
448            'tags': {
449                'test_name':
450                    '{}-{}'.format(trace_short_path,
451                                   query_short_path_or_metric),
452                'test_type':
453                    perf_args.test_type,
454            },
455            'labels': {},
456        })
457        metrics.append({
458            'metric': 'perf_test_real_time',
459            'value': float(perf_args.real_time_ns) / 1.0e9,
460            'unit': 's',
461            'tags': {
462                'test_name':
463                    '{}-{}'.format(
464                        os.path.relpath(perf_args.trace_path, test_dir),
465                        query_short_path_or_metric),
466                'test_type':
467                    perf_args.test_type,
468            },
469            'labels': {},
470        })
471
472      output_data = {'metrics': metrics}
473      with open(args.perf_file, 'w+') as perf_file:
474        perf_file.write(json.dumps(output_data, indent=2))
475    return 0
476  else:
477    sys.stderr.write('[  FAILED  ] {} tests.\n'.format(test_failure))
478    return 1
479
480
481if __name__ == '__main__':
482  sys.exit(main())
483