• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env vpython
2#
3# Copyright 2021 The ANGLE Project Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7# process_angle_perf_results.py:
8#   Perf result merging and upload. Adapted from the Chromium script:
9#   https://chromium.googlesource.com/chromium/src/+/main/tools/perf/process_perf_results.py
10
11from __future__ import print_function
12
13import argparse
14import collections
15import datetime
16import json
17import logging
18import multiprocessing
19import os
20import pathlib
21import shutil
22import subprocess
23import sys
24import tempfile
25import time
26import uuid
27
28logging.basicConfig(
29    level=logging.INFO,
30    format='(%(levelname)s) %(asctime)s pid=%(process)d'
31    '  %(module)s.%(funcName)s:%(lineno)d  %(message)s')
32
33PY_UTILS = str(pathlib.Path(__file__).resolve().parents[1] / 'src' / 'tests' / 'py_utils')
34if PY_UTILS not in sys.path:
35    os.stat(PY_UTILS) and sys.path.insert(0, PY_UTILS)
36import angle_metrics
37import angle_path_util
38
39angle_path_util.AddDepsDirToPath('tools/perf')
40from core import path_util
41
42path_util.AddTelemetryToPath()
43from core import upload_results_to_perf_dashboard
44from core import results_merger
45
46path_util.AddAndroidPylibToPath()
47try:
48    from pylib.utils import logdog_helper
49except ImportError:
50    pass
51
52path_util.AddTracingToPath()
53from tracing.value import histogram
54from tracing.value import histogram_set
55from tracing.value.diagnostics import generic_set
56from tracing.value.diagnostics import reserved_infos
57
58RESULTS_URL = 'https://chromeperf.appspot.com'
59JSON_CONTENT_TYPE = 'application/json'
60MACHINE_GROUP = 'ANGLE'
61BUILD_URL = 'https://ci.chromium.org/ui/p/angle/builders/ci/%s/%d'
62
63GSUTIL_PY_PATH = str(
64    pathlib.Path(__file__).resolve().parents[1] / 'third_party' / 'depot_tools' / 'gsutil.py')
65
66
67def _upload_perf_results(json_to_upload, name, configuration_name, build_properties,
68                         output_json_file):
69    """Upload the contents of result JSON(s) to the perf dashboard."""
70    args = [
71        '--buildername',
72        build_properties['buildername'],
73        '--buildnumber',
74        build_properties['buildnumber'],
75        '--name',
76        name,
77        '--configuration-name',
78        configuration_name,
79        '--results-file',
80        json_to_upload,
81        '--results-url',
82        RESULTS_URL,
83        '--output-json-file',
84        output_json_file,
85        '--perf-dashboard-machine-group',
86        MACHINE_GROUP,
87        '--got-angle-revision',
88        build_properties['got_angle_revision'],
89        '--send-as-histograms',
90        '--project',
91        'angle',
92    ]
93
94    if build_properties.get('git_revision'):
95        args.append('--git-revision')
96        args.append(build_properties['git_revision'])
97
98    #TODO(crbug.com/1072729): log this in top level
99    logging.info('upload_results_to_perf_dashboard: %s.' % args)
100
101    return upload_results_to_perf_dashboard.main(args)
102
103
104def _merge_json_output(output_json, jsons_to_merge, extra_links, test_cross_device=False):
105    """Merges the contents of one or more results JSONs.
106
107  Args:
108    output_json: A path to a JSON file to which the merged results should be
109      written.
110    jsons_to_merge: A list of JSON files that should be merged.
111    extra_links: a (key, value) map in which keys are the human-readable strings
112      which describe the data, and value is logdog url that contain the data.
113  """
114    begin_time = time.time()
115    merged_results = results_merger.merge_test_results(jsons_to_merge, test_cross_device)
116
117    # Only append the perf results links if present
118    if extra_links:
119        merged_results['links'] = extra_links
120
121    with open(output_json, 'w') as f:
122        json.dump(merged_results, f)
123
124    end_time = time.time()
125    print_duration('Merging json test results', begin_time, end_time)
126    return 0
127
128
129def _handle_perf_json_test_results(benchmark_directory_map, test_results_list):
130    """Checks the test_results.json under each folder:
131
132  1. mark the benchmark 'enabled' if tests results are found
133  2. add the json content to a list for non-ref.
134  """
135    begin_time = time.time()
136    benchmark_enabled_map = {}
137    for benchmark_name, directories in benchmark_directory_map.items():
138        for directory in directories:
139            # Obtain the test name we are running
140            is_ref = '.reference' in benchmark_name
141            enabled = True
142            try:
143                with open(os.path.join(directory, 'test_results.json')) as json_data:
144                    json_results = json.load(json_data)
145                    if not json_results:
146                        # Output is null meaning the test didn't produce any results.
147                        # Want to output an error and continue loading the rest of the
148                        # test results.
149                        logging.warning('No results produced for %s, skipping upload' % directory)
150                        continue
151                    if json_results.get('version') == 3:
152                        # Non-telemetry tests don't have written json results but
153                        # if they are executing then they are enabled and will generate
154                        # chartjson results.
155                        if not bool(json_results.get('tests')):
156                            enabled = False
157                    if not is_ref:
158                        # We don't need to upload reference build data to the
159                        # flakiness dashboard since we don't monitor the ref build
160                        test_results_list.append(json_results)
161            except IOError as e:
162                # TODO(crbug.com/936602): Figure out how to surface these errors. Should
163                # we have a non-zero exit code if we error out?
164                logging.error('Failed to obtain test results for %s: %s', benchmark_name, e)
165                continue
166            if not enabled:
167                # We don't upload disabled benchmarks or tests that are run
168                # as a smoke test
169                logging.info('Benchmark %s ran no tests on at least one shard' % benchmark_name)
170                continue
171            benchmark_enabled_map[benchmark_name] = True
172
173    end_time = time.time()
174    print_duration('Analyzing perf json test results', begin_time, end_time)
175    return benchmark_enabled_map
176
177
178def _generate_unique_logdog_filename(name_prefix):
179    return name_prefix + '_' + str(uuid.uuid4())
180
181
182def _handle_perf_logs(benchmark_directory_map, extra_links):
183    """ Upload benchmark logs to logdog and add a page entry for them. """
184    begin_time = time.time()
185    benchmark_logs_links = collections.defaultdict(list)
186
187    for benchmark_name, directories in benchmark_directory_map.items():
188        for directory in directories:
189            benchmark_log_file = os.path.join(directory, 'benchmark_log.txt')
190            if os.path.exists(benchmark_log_file):
191                with open(benchmark_log_file) as f:
192                    uploaded_link = logdog_helper.text(
193                        name=_generate_unique_logdog_filename(benchmark_name), data=f.read())
194                    benchmark_logs_links[benchmark_name].append(uploaded_link)
195
196    logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Logs')
197    logdog_stream = logdog_helper.text(
198        logdog_file_name,
199        json.dumps(benchmark_logs_links, sort_keys=True, indent=4, separators=(',', ': ')),
200        content_type=JSON_CONTENT_TYPE)
201    extra_links['Benchmarks logs'] = logdog_stream
202    end_time = time.time()
203    print_duration('Generating perf log streams', begin_time, end_time)
204
205
206def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links):
207    begin_time = time.time()
208    with open(benchmarks_shard_map_file) as f:
209        benchmarks_shard_data = f.read()
210        logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Shard_Map')
211        logdog_stream = logdog_helper.text(
212            logdog_file_name, benchmarks_shard_data, content_type=JSON_CONTENT_TYPE)
213        extra_links['Benchmarks shard map'] = logdog_stream
214    end_time = time.time()
215    print_duration('Generating benchmark shard map stream', begin_time, end_time)
216
217
218def _get_benchmark_name(directory):
219    return os.path.basename(directory).replace(" benchmark", "")
220
221
222def _scan_output_dir(task_output_dir):
223    benchmark_directory_map = {}
224    benchmarks_shard_map_file = None
225
226    directory_list = [
227        f for f in os.listdir(task_output_dir)
228        if not os.path.isfile(os.path.join(task_output_dir, f))
229    ]
230    benchmark_directory_list = []
231    for directory in directory_list:
232        for f in os.listdir(os.path.join(task_output_dir, directory)):
233            path = os.path.join(task_output_dir, directory, f)
234            if os.path.isdir(path):
235                benchmark_directory_list.append(path)
236            elif path.endswith('benchmarks_shard_map.json'):
237                benchmarks_shard_map_file = path
238    # Now create a map of benchmark name to the list of directories
239    # the lists were written to.
240    for directory in benchmark_directory_list:
241        benchmark_name = _get_benchmark_name(directory)
242        logging.debug('Found benchmark %s directory %s' % (benchmark_name, directory))
243        if benchmark_name in benchmark_directory_map.keys():
244            benchmark_directory_map[benchmark_name].append(directory)
245        else:
246            benchmark_directory_map[benchmark_name] = [directory]
247
248    return benchmark_directory_map, benchmarks_shard_map_file
249
250
251def _upload_to_skia_perf(benchmark_directory_map, benchmark_enabled_map, build_properties_map):
252    metric_filenames = []
253
254    for benchmark_name, directories in benchmark_directory_map.items():
255        if not benchmark_enabled_map.get(benchmark_name, False):
256            continue
257
258        for directory in directories:
259            metric_filenames.append(os.path.join(directory, 'angle_metrics.json'))
260
261    assert metric_filenames
262
263    buildername = build_properties_map['buildername']  # e.g. win10-nvidia-gtx1660-perf
264    skia_data = {
265        'version': 1,
266        'git_hash': build_properties_map['got_angle_revision'],
267        'key': {
268            'buildername': buildername,
269        },
270        'results': angle_metrics.ConvertToSkiaPerf(metric_filenames),
271    }
272
273    skia_perf_dir = tempfile.mkdtemp('skia_perf')
274    try:
275        local_file = os.path.join(skia_perf_dir, '%s.%s.json' % (buildername, time.time()))
276        with open(local_file, 'w') as f:
277            json.dump(skia_data, f, indent=2)
278        gs_dir = 'gs://angle-perf-skia/angle_perftests/%s/' % (
279            datetime.datetime.now().strftime('%Y/%m/%d/%H'))
280        upload_cmd = ['vpython3', GSUTIL_PY_PATH, 'cp', local_file, gs_dir]
281        logging.info('Skia upload: %s', ' '.join(upload_cmd))
282        subprocess.check_call(upload_cmd)
283    finally:
284        shutil.rmtree(skia_perf_dir)
285
286
287def process_perf_results(output_json,
288                         configuration_name,
289                         build_properties,
290                         task_output_dir,
291                         smoke_test_mode,
292                         output_results_dir,
293                         lightweight=False,
294                         skip_perf=False):
295    """Process perf results.
296
297  Consists of merging the json-test-format output, uploading the perf test
298  output (histogram), and store the benchmark logs in logdog.
299
300  Each directory in the task_output_dir represents one benchmark
301  that was run. Within this directory, there is a subdirectory with the name
302  of the benchmark that was run. In that subdirectory, there is a
303  perftest-output.json file containing the performance results in histogram
304  format and an output.json file containing the json test results for the
305  benchmark.
306
307  Returns:
308    (return_code, upload_results_map):
309      return_code is 0 if the whole operation is successful, non zero otherwise.
310      benchmark_upload_result_map: the dictionary that describe which benchmarks
311        were successfully uploaded.
312  """
313    handle_perf = not lightweight or not skip_perf
314    handle_non_perf = not lightweight or skip_perf
315    logging.info('lightweight mode: %r; handle_perf: %r; handle_non_perf: %r' %
316                 (lightweight, handle_perf, handle_non_perf))
317
318    begin_time = time.time()
319    return_code = 0
320    benchmark_upload_result_map = {}
321
322    benchmark_directory_map, benchmarks_shard_map_file = _scan_output_dir(task_output_dir)
323
324    test_results_list = []
325    extra_links = {}
326
327    if handle_non_perf:
328        # First, upload benchmarks shard map to logdog and add a page
329        # entry for it in extra_links.
330        if benchmarks_shard_map_file:
331            _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links)
332
333        # Second, upload all the benchmark logs to logdog and add a page entry for
334        # those links in extra_links.
335        _handle_perf_logs(benchmark_directory_map, extra_links)
336
337    # Then try to obtain the list of json test results to merge
338    # and determine the status of each benchmark.
339    benchmark_enabled_map = _handle_perf_json_test_results(benchmark_directory_map,
340                                                           test_results_list)
341
342    if not smoke_test_mode and handle_perf:
343        build_properties_map = json.loads(build_properties)
344        if not configuration_name:
345            # we are deprecating perf-id crbug.com/817823
346            configuration_name = build_properties_map['buildername']
347
348        try:
349            return_code, benchmark_upload_result_map = _handle_perf_results(
350                benchmark_enabled_map, benchmark_directory_map, configuration_name,
351                build_properties_map, extra_links, output_results_dir)
352        except Exception:
353            logging.exception('Error handling perf results jsons')
354            return_code = 1
355
356        try:
357            _upload_to_skia_perf(benchmark_directory_map, benchmark_enabled_map,
358                                 build_properties_map)
359        except Exception:
360            logging.exception('Error uploading to skia perf')
361            return_code = 1
362
363    if handle_non_perf:
364        # Finally, merge all test results json, add the extra links and write out to
365        # output location
366        try:
367            _merge_json_output(output_json, test_results_list, extra_links)
368        except Exception:
369            logging.exception('Error handling test results jsons.')
370
371    end_time = time.time()
372    print_duration('Total process_perf_results', begin_time, end_time)
373    return return_code, benchmark_upload_result_map
374
375
376def _merge_histogram_results(histogram_lists):
377    merged_results = []
378    for histogram_list in histogram_lists:
379        merged_results += histogram_list
380
381    return merged_results
382
383
384def _load_histogram_set_from_dict(data):
385    histograms = histogram_set.HistogramSet()
386    histograms.ImportDicts(data)
387    return histograms
388
389
390def _add_build_info(results, benchmark_name, build_properties):
391    histograms = _load_histogram_set_from_dict(results)
392
393    common_diagnostics = {
394        reserved_infos.MASTERS:
395            build_properties['builder_group'],
396        reserved_infos.BOTS:
397            build_properties['buildername'],
398        reserved_infos.POINT_ID:
399            build_properties['angle_commit_pos'],
400        reserved_infos.BENCHMARKS:
401            benchmark_name,
402        reserved_infos.ANGLE_REVISIONS:
403            build_properties['got_angle_revision'],
404        reserved_infos.BUILD_URLS:
405            BUILD_URL % (build_properties['buildername'], build_properties['buildnumber']),
406    }
407
408    for k, v in common_diagnostics.items():
409        histograms.AddSharedDiagnosticToAllHistograms(k.name, generic_set.GenericSet([v]))
410
411    return histograms.AsDicts()
412
413
414def _merge_perf_results(benchmark_name, results_filename, directories, build_properties):
415    begin_time = time.time()
416    collected_results = []
417    for directory in directories:
418        filename = os.path.join(directory, 'perf_results.json')
419        try:
420            with open(filename) as pf:
421                collected_results.append(json.load(pf))
422        except IOError as e:
423            # TODO(crbug.com/936602): Figure out how to surface these errors. Should
424            # we have a non-zero exit code if we error out?
425            logging.error('Failed to obtain perf results from %s: %s', directory, e)
426    if not collected_results:
427        logging.error('Failed to obtain any perf results from %s.', benchmark_name)
428        return
429
430    # Assuming that multiple shards will be histogram set
431    # Non-telemetry benchmarks only ever run on one shard
432    merged_results = []
433    assert (isinstance(collected_results[0], list))
434    merged_results = _merge_histogram_results(collected_results)
435
436    # Write additional histogram build info.
437    merged_results = _add_build_info(merged_results, benchmark_name, build_properties)
438
439    with open(results_filename, 'w') as rf:
440        json.dump(merged_results, rf)
441
442    end_time = time.time()
443    print_duration(('%s results merging' % (benchmark_name)), begin_time, end_time)
444
445
446def _upload_individual(benchmark_name, directories, configuration_name, build_properties,
447                       output_json_file):
448    tmpfile_dir = tempfile.mkdtemp()
449    try:
450        upload_begin_time = time.time()
451        # There are potentially multiple directores with results, re-write and
452        # merge them if necessary
453        results_filename = None
454        if len(directories) > 1:
455            merge_perf_dir = os.path.join(os.path.abspath(tmpfile_dir), benchmark_name)
456            if not os.path.exists(merge_perf_dir):
457                os.makedirs(merge_perf_dir)
458            results_filename = os.path.join(merge_perf_dir, 'merged_perf_results.json')
459            _merge_perf_results(benchmark_name, results_filename, directories, build_properties)
460        else:
461            # It was only written to one shard, use that shards data
462            results_filename = os.path.join(directories[0], 'perf_results.json')
463
464        results_size_in_mib = os.path.getsize(results_filename) / (2**20)
465        logging.info('Uploading perf results from %s benchmark (size %s Mib)' %
466                     (benchmark_name, results_size_in_mib))
467        with open(output_json_file, 'w') as oj:
468            upload_return_code = _upload_perf_results(results_filename, benchmark_name,
469                                                      configuration_name, build_properties, oj)
470            upload_end_time = time.time()
471            print_duration(('%s upload time' % (benchmark_name)), upload_begin_time,
472                           upload_end_time)
473            return (benchmark_name, upload_return_code == 0)
474    finally:
475        shutil.rmtree(tmpfile_dir)
476
477
478def _upload_individual_benchmark(params):
479    try:
480        return _upload_individual(*params)
481    except Exception:
482        benchmark_name = params[0]
483        upload_succeed = False
484        logging.exception('Error uploading perf result of %s' % benchmark_name)
485        return benchmark_name, upload_succeed
486
487
488def _GetCpuCount(log=True):
489    try:
490        cpu_count = multiprocessing.cpu_count()
491        if sys.platform == 'win32':
492            # TODO(crbug.com/1190269) - we can't use more than 56
493            # cores on Windows or Python3 may hang.
494            cpu_count = min(cpu_count, 56)
495        return cpu_count
496    except NotImplementedError:
497        if log:
498            logging.warn('Failed to get a CPU count for this bot. See crbug.com/947035.')
499        # TODO(crbug.com/948281): This is currently set to 4 since the mac masters
500        # only have 4 cores. Once we move to all-linux, this can be increased or
501        # we can even delete this whole function and use multiprocessing.cpu_count()
502        # directly.
503        return 4
504
505
506def _load_shard_id_from_test_results(directory):
507    shard_id = None
508    test_json_path = os.path.join(directory, 'test_results.json')
509    try:
510        with open(test_json_path) as f:
511            test_json = json.load(f)
512            all_results = test_json['tests']
513            for _, benchmark_results in all_results.items():
514                for _, measurement_result in benchmark_results.items():
515                    shard_id = measurement_result['shard']
516                    break
517    except IOError as e:
518        logging.error('Failed to open test_results.json from %s: %s', test_json_path, e)
519    except KeyError as e:
520        logging.error('Failed to locate results in test_results.json: %s', e)
521    return shard_id
522
523
524def _find_device_id_by_shard_id(benchmarks_shard_map_file, shard_id):
525    try:
526        with open(benchmarks_shard_map_file) as f:
527            shard_map_json = json.load(f)
528            device_id = shard_map_json['extra_infos']['bot #%s' % shard_id]
529    except KeyError as e:
530        logging.error('Failed to locate device name in shard map: %s', e)
531    return device_id
532
533
534def _update_perf_json_with_summary_on_device_id(directory, device_id):
535    perf_json_path = os.path.join(directory, 'perf_results.json')
536    try:
537        with open(perf_json_path, 'r') as f:
538            perf_json = json.load(f)
539    except IOError as e:
540        logging.error('Failed to open perf_results.json from %s: %s', perf_json_path, e)
541    summary_key_guid = str(uuid.uuid4())
542    summary_key_generic_set = {
543        'values': ['device_id'],
544        'guid': summary_key_guid,
545        'type': 'GenericSet'
546    }
547    perf_json.insert(0, summary_key_generic_set)
548    logging.info('Inserted summary key generic set for perf result in %s: %s', directory,
549                 summary_key_generic_set)
550    stories_guids = set()
551    for entry in perf_json:
552        if 'diagnostics' in entry:
553            entry['diagnostics']['summaryKeys'] = summary_key_guid
554            stories_guids.add(entry['diagnostics']['stories'])
555    for entry in perf_json:
556        if 'guid' in entry and entry['guid'] in stories_guids:
557            entry['values'].append(device_id)
558    try:
559        with open(perf_json_path, 'w') as f:
560            json.dump(perf_json, f)
561    except IOError as e:
562        logging.error('Failed to writing perf_results.json to %s: %s', perf_json_path, e)
563    logging.info('Finished adding device id %s in perf result.', device_id)
564
565
566def _handle_perf_results(benchmark_enabled_map, benchmark_directory_map, configuration_name,
567                         build_properties, extra_links, output_results_dir):
568    """
569    Upload perf results to the perf dashboard.
570
571    This method also upload the perf results to logdog and augment it to
572    |extra_links|.
573
574    Returns:
575      (return_code, benchmark_upload_result_map)
576      return_code is 0 if this upload to perf dashboard successfully, 1
577        otherwise.
578       benchmark_upload_result_map is a dictionary describes which benchmark
579        was successfully uploaded.
580  """
581    begin_time = time.time()
582    # Upload all eligible benchmarks to the perf dashboard
583    results_dict = {}
584
585    invocations = []
586    for benchmark_name, directories in benchmark_directory_map.items():
587        if not benchmark_enabled_map.get(benchmark_name, False):
588            continue
589        # Create a place to write the perf results that you will write out to
590        # logdog.
591        output_json_file = os.path.join(output_results_dir, (str(uuid.uuid4()) + benchmark_name))
592        results_dict[benchmark_name] = output_json_file
593        #TODO(crbug.com/1072729): pass final arguments instead of build properties
594        # and configuration_name
595        invocations.append(
596            (benchmark_name, directories, configuration_name, build_properties, output_json_file))
597
598    # Kick off the uploads in multiple processes
599    # crbug.com/1035930: We are hitting HTTP Response 429. Limit ourselves
600    # to 2 processes to avoid this error. Uncomment the following code once
601    # the problem is fixed on the dashboard side.
602    # pool = multiprocessing.Pool(_GetCpuCount())
603    pool = multiprocessing.Pool(2)
604    upload_result_timeout = False
605    try:
606        async_result = pool.map_async(_upload_individual_benchmark, invocations)
607        # TODO(crbug.com/947035): What timeout is reasonable?
608        results = async_result.get(timeout=4000)
609    except multiprocessing.TimeoutError:
610        upload_result_timeout = True
611        logging.error('Timeout uploading benchmarks to perf dashboard in parallel')
612        results = []
613        for benchmark_name in benchmark_directory_map:
614            results.append((benchmark_name, False))
615    finally:
616        pool.terminate()
617
618    # Keep a mapping of benchmarks to their upload results
619    benchmark_upload_result_map = {}
620    for r in results:
621        benchmark_upload_result_map[r[0]] = r[1]
622
623    logdog_dict = {}
624    upload_failures_counter = 0
625    logdog_stream = None
626    logdog_label = 'Results Dashboard'
627    for benchmark_name, output_file in results_dict.items():
628        upload_succeed = benchmark_upload_result_map[benchmark_name]
629        if not upload_succeed:
630            upload_failures_counter += 1
631        is_reference = '.reference' in benchmark_name
632        _write_perf_data_to_logfile(
633            benchmark_name,
634            output_file,
635            configuration_name,
636            build_properties,
637            logdog_dict,
638            is_reference,
639            upload_failure=not upload_succeed)
640
641    logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')
642    logdog_stream = logdog_helper.text(
643        logdog_file_name,
644        json.dumps(dict(logdog_dict), sort_keys=True, indent=4, separators=(',', ': ')),
645        content_type=JSON_CONTENT_TYPE)
646    if upload_failures_counter > 0:
647        logdog_label += (' %s merge script perf data upload failures' % upload_failures_counter)
648    extra_links[logdog_label] = logdog_stream
649    end_time = time.time()
650    print_duration('Uploading results to perf dashboard', begin_time, end_time)
651    if upload_result_timeout or upload_failures_counter > 0:
652        return 1, benchmark_upload_result_map
653    return 0, benchmark_upload_result_map
654
655
656def _write_perf_data_to_logfile(benchmark_name, output_file, configuration_name, build_properties,
657                                logdog_dict, is_ref, upload_failure):
658    viewer_url = None
659    # logdog file to write perf results to
660    if os.path.exists(output_file):
661        results = None
662        with open(output_file) as f:
663            try:
664                results = json.load(f)
665            except ValueError:
666                logging.error('Error parsing perf results JSON for benchmark  %s' % benchmark_name)
667        if results:
668            try:
669                json_fname = _generate_unique_logdog_filename(benchmark_name)
670                output_json_file = logdog_helper.open_text(json_fname)
671                json.dump(results, output_json_file, indent=4, separators=(',', ': '))
672            except ValueError as e:
673                logging.error('ValueError: "%s" while dumping output to logdog' % e)
674            finally:
675                output_json_file.close()
676            viewer_url = output_json_file.get_viewer_url()
677    else:
678        logging.warning("Perf results JSON file doesn't exist for benchmark %s" % benchmark_name)
679
680    base_benchmark_name = benchmark_name.replace('.reference', '')
681
682    if base_benchmark_name not in logdog_dict:
683        logdog_dict[base_benchmark_name] = {}
684
685    # add links for the perf results and the dashboard url to
686    # the logs section of buildbot
687    if is_ref:
688        if viewer_url:
689            logdog_dict[base_benchmark_name]['perf_results_ref'] = viewer_url
690        if upload_failure:
691            logdog_dict[base_benchmark_name]['ref_upload_failed'] = 'True'
692    else:
693        # TODO(jmadill): Figure out if we can get a dashboard URL here. http://anglebug.com/6090
694        # logdog_dict[base_benchmark_name]['dashboard_url'] = (
695        #     upload_results_to_perf_dashboard.GetDashboardUrl(benchmark_name, configuration_name,
696        #                                                      RESULTS_URL,
697        #                                                      build_properties['got_revision_cp'],
698        #                                                      _GetMachineGroup(build_properties)))
699        if viewer_url:
700            logdog_dict[base_benchmark_name]['perf_results'] = viewer_url
701        if upload_failure:
702            logdog_dict[base_benchmark_name]['upload_failed'] = 'True'
703
704
705def print_duration(step, start, end):
706    logging.info('Duration of %s: %d seconds' % (step, end - start))
707
708
709def main():
710    """ See collect_task.collect_task for more on the merge script API. """
711    logging.info(sys.argv)
712    parser = argparse.ArgumentParser()
713    # configuration-name (previously perf-id) is the name of bot the tests run on
714    # For example, buildbot-test is the name of the android-go-perf bot
715    # configuration-name and results-url are set in the json file which is going
716    # away tools/perf/core/chromium.perf.fyi.extras.json
717    parser.add_argument('--configuration-name', help=argparse.SUPPRESS)
718    parser.add_argument('--build-properties', help=argparse.SUPPRESS)
719    parser.add_argument('--summary-json', required=True, help=argparse.SUPPRESS)
720    parser.add_argument('--task-output-dir', required=True, help=argparse.SUPPRESS)
721    parser.add_argument('-o', '--output-json', required=True, help=argparse.SUPPRESS)
722    parser.add_argument(
723        '--skip-perf',
724        action='store_true',
725        help='In lightweight mode, using --skip-perf will skip the performance'
726        ' data handling.')
727    parser.add_argument(
728        '--lightweight',
729        action='store_true',
730        help='Choose the lightweight mode in which the perf result handling'
731        ' is performed on a separate VM.')
732    parser.add_argument('json_files', nargs='*', help=argparse.SUPPRESS)
733    parser.add_argument(
734        '--smoke-test-mode',
735        action='store_true',
736        help='This test should be run in smoke test mode'
737        ' meaning it does not upload to the perf dashboard')
738
739    args = parser.parse_args()
740
741    with open(args.summary_json) as f:
742        shard_summary = json.load(f)
743    shard_failed = any(int(shard.get('exit_code', 1)) != 0 for shard in shard_summary['shards'])
744
745    output_results_dir = tempfile.mkdtemp('outputresults')
746    try:
747        return_code, _ = process_perf_results(args.output_json, args.configuration_name,
748                                              args.build_properties, args.task_output_dir,
749                                              args.smoke_test_mode, output_results_dir,
750                                              args.lightweight, args.skip_perf)
751    except Exception:
752        logging.exception('process_perf_results raised an exception')
753        return_code = 1
754    finally:
755        shutil.rmtree(output_results_dir)
756
757    if return_code != 0 and shard_failed:
758        logging.warning('Perf processing failed but one or more shards failed earlier')
759        return_code = 0  # Enables the failed build info to be rendered normally
760
761    return return_code
762
763
764if __name__ == '__main__':
765    sys.exit(main())
766