• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# -*- coding: utf-8 -*-
2# Copyright 2011 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""The class to show the banner."""
7
8from __future__ import division
9from __future__ import print_function
10
11import collections
12import datetime
13import time
14
15
16class ExperimentStatus(object):
17  """The status class."""
18
19  def __init__(self, experiment):
20    self.experiment = experiment
21    self.num_total = len(self.experiment.benchmark_runs)
22    self.completed = 0
23    self.new_job_start_time = time.time()
24    self.log_level = experiment.log_level
25
26  def _GetProgressBar(self, num_complete, num_total):
27    ret = 'Done: %s%%' % int(100.0 * num_complete / num_total)
28    bar_length = 50
29    done_char = '>'
30    undone_char = ' '
31    num_complete_chars = bar_length * num_complete // num_total
32    num_undone_chars = bar_length - num_complete_chars
33    ret += ' [%s%s]' % (num_complete_chars * done_char,
34                        num_undone_chars * undone_char)
35    return ret
36
37  def GetProgressString(self):
38    """Get the elapsed_time, ETA."""
39    current_time = time.time()
40    if self.experiment.start_time:
41      elapsed_time = current_time - self.experiment.start_time
42    else:
43      elapsed_time = 0
44    try:
45      if self.completed != self.experiment.num_complete:
46        self.completed = self.experiment.num_complete
47        self.new_job_start_time = current_time
48      time_completed_jobs = (
49          elapsed_time - (current_time - self.new_job_start_time))
50      # eta is calculated as:
51      #   ETA = (num_jobs_not_yet_started * estimated_time_per_job)
52      #          + time_left_for_current_job
53      #
54      #   where
55      #        num_jobs_not_yet_started = (num_total - num_complete - 1)
56      #
57      #        estimated_time_per_job = time_completed_jobs / num_run_complete
58      #
59      #        time_left_for_current_job = estimated_time_per_job -
60      #                                    time_spent_so_far_on_current_job
61      #
62      #  The biggest problem with this calculation is its assumption that
63      #  all jobs have roughly the same running time (blatantly false!).
64      #
65      #  ETA can come out negative if the time spent on the current job is
66      #  greater than the estimated time per job (e.g. you're running the
67      #  first long job, after a series of short jobs).  For now, if that
68      #  happens, we set the ETA to "Unknown."
69      #
70      eta_seconds = (
71          float(self.num_total - self.experiment.num_complete - 1) *
72          time_completed_jobs / self.experiment.num_run_complete +
73          (time_completed_jobs / self.experiment.num_run_complete -
74           (current_time - self.new_job_start_time)))
75
76      eta_seconds = int(eta_seconds)
77      if eta_seconds > 0:
78        eta = datetime.timedelta(seconds=eta_seconds)
79      else:
80        eta = 'Unknown'
81    except ZeroDivisionError:
82      eta = 'Unknown'
83    strings = []
84    strings.append('Current time: %s Elapsed: %s ETA: %s' %
85                   (datetime.datetime.now(),
86                    datetime.timedelta(seconds=int(elapsed_time)), eta))
87    strings.append(
88        self._GetProgressBar(self.experiment.num_complete, self.num_total))
89    return '\n'.join(strings)
90
91  def GetStatusString(self):
92    """Get the status string of all the benchmark_runs."""
93    status_bins = collections.defaultdict(list)
94    for benchmark_run in self.experiment.benchmark_runs:
95      status_bins[benchmark_run.timeline.GetLastEvent()].append(benchmark_run)
96
97    status_strings = []
98    for key, val in status_bins.items():
99      if key == 'RUNNING':
100        get_description = self._GetNamesAndIterations
101      else:
102        get_description = self._GetCompactNamesAndIterations
103      status_strings.append('%s: %s' % (key, get_description(val)))
104
105    thread_status = ''
106    thread_status_format = 'Thread Status: \n{}\n'
107    if (self.experiment.schedv2() is None and
108        self.experiment.log_level == 'verbose'):
109      # Add the machine manager status.
110      thread_status = thread_status_format.format(
111          self.experiment.machine_manager.AsString())
112    elif self.experiment.schedv2():
113      # In schedv2 mode, we always print out thread status.
114      thread_status = thread_status_format.format(
115          self.experiment.schedv2().threads_status_as_string())
116
117    result = '{}{}'.format(thread_status, '\n'.join(status_strings))
118
119    return result
120
121  def _GetNamesAndIterations(self, benchmark_runs):
122    strings = []
123    t = time.time()
124    for benchmark_run in benchmark_runs:
125      t_last = benchmark_run.timeline.GetLastEventTime()
126      elapsed = str(datetime.timedelta(seconds=int(t - t_last)))
127      strings.append("'{0}' {1}".format(benchmark_run.name, elapsed))
128    return ' %s (%s)' % (len(strings), ', '.join(strings))
129
130  def _GetCompactNamesAndIterations(self, benchmark_runs):
131    grouped_benchmarks = collections.defaultdict(list)
132    for benchmark_run in benchmark_runs:
133      grouped_benchmarks[benchmark_run.label.name].append(benchmark_run)
134
135    output_segs = []
136    for label_name, label_runs in grouped_benchmarks.items():
137      strings = []
138      benchmark_iterations = collections.defaultdict(list)
139      for benchmark_run in label_runs:
140        assert benchmark_run.label.name == label_name
141        benchmark_name = benchmark_run.benchmark.name
142        benchmark_iterations[benchmark_name].append(benchmark_run.iteration)
143      for key, val in benchmark_iterations.items():
144        val.sort()
145        iterations = ','.join(str(v) for v in val)
146        strings.append('{} [{}]'.format(key, iterations))
147      output_segs.append('  ' + label_name + ': ' + ', '.join(strings) + '\n')
148
149    return ' %s \n%s' % (len(benchmark_runs), ''.join(output_segs))
150