• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""A reproducing entity.
5
6Part of the Chrome build flags optimization.
7
8The Task class is used by different modules. Each module fills in the
9corresponding information into a Task instance. Class Task contains the bit set
10representing the flags selection. The builder module is responsible for filling
11the image and the checksum field of a Task. The executor module will put the
12execution output to the execution field.
13"""
14
15__author__ = 'yuhenglong@google.com (Yuheng Long)'
16
17import os
18import subprocess
19import sys
20from uuid import uuid4
21
22BUILD_STAGE = 1
23TEST_STAGE = 2
24
25# Message indicating that the build or test failed.
26ERROR_STRING = 'error'
27
28# The maximum number of tries a build can have. Some compilations may fail due
29# to unexpected environment circumstance. This variable defines how many tries
30# the build should attempt before giving up.
31BUILD_TRIES = 3
32
33# The maximum number of tries a test can have. Some tests may fail due to
34# unexpected environment circumstance. This variable defines how many tries the
35# test should attempt before giving up.
36TEST_TRIES = 3
37
38
39# Create the file/directory if it does not already exist.
40def _CreateDirectory(file_name):
41  directory = os.path.dirname(file_name)
42  if not os.path.exists(directory):
43    os.makedirs(directory)
44
45
46class Task(object):
47  """A single reproducing entity.
48
49  A single test of performance with a particular set of flags. It records the
50  flag set, the image, the check sum of the image and the cost.
51  """
52
53  # The command that will be used in the build stage to compile the tasks.
54  BUILD_COMMAND = None
55  # The command that will be used in the test stage to test the tasks.
56  TEST_COMMAND = None
57  # The directory to log the compilation and test results.
58  LOG_DIRECTORY = None
59
60  @staticmethod
61  def InitLogCommand(build_command, test_command, log_directory):
62    """Set up the build and test command for the task and the log directory.
63
64    This framework is generic. It lets the client specify application specific
65    compile and test methods by passing different build_command and
66    test_command.
67
68    Args:
69      build_command: The command that will be used in the build stage to compile
70        this task.
71      test_command: The command that will be used in the test stage to test this
72        task.
73      log_directory: The directory to log the compilation and test results.
74    """
75
76    Task.BUILD_COMMAND = build_command
77    Task.TEST_COMMAND = test_command
78    Task.LOG_DIRECTORY = log_directory
79
80  def __init__(self, flag_set):
81    """Set up the optimization flag selection for this task.
82
83    Args:
84      flag_set: The optimization flag set that is encapsulated by this task.
85    """
86
87    self._flag_set = flag_set
88
89    # A unique identifier that distinguishes this task from other tasks.
90    self._task_identifier = uuid4()
91
92    self._log_path = (Task.LOG_DIRECTORY, self._task_identifier)
93
94    # Initiate the hash value. The hash value is used so as not to recompute it
95    # every time the hash method is called.
96    self._hash_value = None
97
98    # Indicate that the task has not been compiled/tested.
99    self._build_cost = None
100    self._exe_cost = None
101    self._checksum = None
102    self._image = None
103    self._file_length = None
104    self._text_length = None
105
106  def __eq__(self, other):
107    """Test whether two tasks are equal.
108
109    Two tasks are equal if their flag_set are equal.
110
111    Args:
112      other: The other task with which this task is tested equality.
113    Returns:
114      True if the encapsulated flag sets are equal.
115    """
116    if isinstance(other, Task):
117      return self.GetFlags() == other.GetFlags()
118    return False
119
120  def __hash__(self):
121    if self._hash_value is None:
122      # Cache the hash value of the flags, so as not to recompute them.
123      self._hash_value = hash(self._flag_set)
124    return self._hash_value
125
126  def GetIdentifier(self, stage):
127    """Get the identifier of the task in the stage.
128
129    The flag set uniquely identifies a task in the build stage. The checksum of
130    the image of the task uniquely identifies the task in the test stage.
131
132    Args:
133      stage: The stage (build/test) in which this method is called.
134    Returns:
135      Return the flag set in build stage and return the checksum in test stage.
136    """
137
138    # Define the dictionary for different stage function lookup.
139    get_identifier_functions = {BUILD_STAGE: self.FormattedFlags,
140                                TEST_STAGE: self.__GetCheckSum}
141
142    assert stage in get_identifier_functions
143    return get_identifier_functions[stage]()
144
145  def GetResult(self, stage):
146    """Get the performance results of the task in the stage.
147
148    Args:
149      stage: The stage (build/test) in which this method is called.
150    Returns:
151      Performance results.
152    """
153
154    # Define the dictionary for different stage function lookup.
155    get_result_functions = {BUILD_STAGE: self.__GetBuildResult,
156                            TEST_STAGE: self.GetTestResult}
157
158    assert stage in get_result_functions
159
160    return get_result_functions[stage]()
161
162  def SetResult(self, stage, result):
163    """Set the performance results of the task in the stage.
164
165    This method is called by the pipeling_worker to set the results for
166    duplicated tasks.
167
168    Args:
169      stage: The stage (build/test) in which this method is called.
170      result: The performance results of the stage.
171    """
172
173    # Define the dictionary for different stage function lookup.
174    set_result_functions = {BUILD_STAGE: self.__SetBuildResult,
175                            TEST_STAGE: self.__SetTestResult}
176
177    assert stage in set_result_functions
178
179    set_result_functions[stage](result)
180
181  def Done(self, stage):
182    """Check whether the stage is done.
183
184    Args:
185      stage: The stage to be checked, build or test.
186    Returns:
187      True if the stage is done.
188    """
189
190    # Define the dictionary for different result string lookup.
191    done_string = {BUILD_STAGE: self._build_cost, TEST_STAGE: self._exe_cost}
192
193    assert stage in done_string
194
195    return done_string[stage] is not None
196
197  def Work(self, stage):
198    """Perform the task.
199
200    Args:
201      stage: The stage in which the task is performed, compile or test.
202    """
203
204    # Define the dictionary for different stage function lookup.
205    work_functions = {BUILD_STAGE: self.__Compile, TEST_STAGE: self.__Test}
206
207    assert stage in work_functions
208
209    work_functions[stage]()
210
211  def FormattedFlags(self):
212    """Format the optimization flag set of this task.
213
214    Returns:
215      The formatted optimization flag set that is encapsulated by this task.
216    """
217    return str(self._flag_set.FormattedForUse())
218
219  def GetFlags(self):
220    """Get the optimization flag set of this task.
221
222    Returns:
223      The optimization flag set that is encapsulated by this task.
224    """
225
226    return self._flag_set
227
228  def __GetCheckSum(self):
229    """Get the compilation image checksum of this task.
230
231    Returns:
232      The compilation image checksum of this task.
233    """
234
235    # The checksum should be computed before this method is called.
236    assert self._checksum is not None
237    return self._checksum
238
239  def __Compile(self):
240    """Run a compile.
241
242    This method compile an image using the present flags, get the image,
243    test the existent of the image and gathers monitoring information, and sets
244    the internal cost (fitness) for this set of flags.
245    """
246
247    # Format the flags as a string as input to compile command. The unique
248    # identifier is passed to the compile command. If concurrent processes are
249    # used to compile different tasks, these processes can use the identifier to
250    # write to different file.
251    flags = self._flag_set.FormattedForUse()
252    command = '%s %s %s' % (Task.BUILD_COMMAND, ' '.join(flags),
253                            self._task_identifier)
254
255    # Try BUILD_TRIES number of times before confirming that the build fails.
256    for _ in range(BUILD_TRIES):
257      try:
258        # Execute the command and get the execution status/results.
259        p = subprocess.Popen(command.split(),
260                             stdout=subprocess.PIPE,
261                             stderr=subprocess.PIPE)
262        (out, err) = p.communicate()
263
264        if out:
265          out = out.strip()
266          if out != ERROR_STRING:
267            # Each build results contains the checksum of the result image, the
268            # performance cost of the build, the compilation image, the length
269            # of the build, and the length of the text section of the build.
270            (checksum, cost, image, file_length, text_length) = out.split()
271            # Build successfully.
272            break
273
274        # Build failed.
275        cost = ERROR_STRING
276      except _:
277        # If there is exception getting the cost information of the build, the
278        # build failed.
279        cost = ERROR_STRING
280
281    # Convert the build cost from String to integer. The build cost is used to
282    # compare a task with another task. Set the build cost of the failing task
283    # to the max integer. The for loop will keep trying until either there is a
284    # success or BUILD_TRIES number of tries have been conducted.
285    self._build_cost = sys.maxint if cost == ERROR_STRING else float(cost)
286
287    self._checksum = checksum
288    self._file_length = file_length
289    self._text_length = text_length
290    self._image = image
291
292    self.__LogBuildCost(err)
293
294  def __Test(self):
295    """__Test the task against benchmark(s) using the input test command."""
296
297    # Ensure that the task is compiled before being tested.
298    assert self._image is not None
299
300    # If the task does not compile, no need to test.
301    if self._image == ERROR_STRING:
302      self._exe_cost = ERROR_STRING
303      return
304
305    # The unique identifier is passed to the test command. If concurrent
306    # processes are used to compile different tasks, these processes can use the
307    # identifier to write to different file.
308    command = '%s %s %s' % (Task.TEST_COMMAND, self._image,
309                            self._task_identifier)
310
311    # Try TEST_TRIES number of times before confirming that the build fails.
312    for _ in range(TEST_TRIES):
313      try:
314        p = subprocess.Popen(command.split(),
315                             stdout=subprocess.PIPE,
316                             stderr=subprocess.PIPE)
317        (out, err) = p.communicate()
318
319        if out:
320          out = out.strip()
321          if out != ERROR_STRING:
322            # The test results contains the performance cost of the test.
323            cost = out
324            # Test successfully.
325            break
326
327        # Test failed.
328        cost = ERROR_STRING
329      except _:
330        # If there is exception getting the cost information of the test, the
331        # test failed. The for loop will keep trying until either there is a
332        # success or TEST_TRIES number of tries have been conducted.
333        cost = ERROR_STRING
334
335    self._exe_cost = sys.maxint if (cost == ERROR_STRING) else float(cost)
336
337    self.__LogTestCost(err)
338
339  def __SetBuildResult(self, (checksum, build_cost, image, file_length,
340                              text_length)):
341    self._checksum = checksum
342    self._build_cost = build_cost
343    self._image = image
344    self._file_length = file_length
345    self._text_length = text_length
346
347  def __GetBuildResult(self):
348    return (self._checksum, self._build_cost, self._image, self._file_length,
349            self._text_length)
350
351  def GetTestResult(self):
352    return self._exe_cost
353
354  def __SetTestResult(self, exe_cost):
355    self._exe_cost = exe_cost
356
357  def LogSteeringCost(self):
358    """Log the performance results for the task.
359
360    This method is called by the steering stage and this method writes the
361    results out to a file. The results include the build and the test results.
362    """
363
364    steering_log = '%s/%s/steering.txt' % self._log_path
365
366    _CreateDirectory(steering_log)
367
368    with open(steering_log, 'w') as out_file:
369      # Include the build and the test results.
370      steering_result = (self._flag_set, self._checksum, self._build_cost,
371                         self._image, self._file_length, self._text_length,
372                         self._exe_cost)
373
374      # Write out the result in the comma-separated format (CSV).
375      out_file.write('%s,%s,%s,%s,%s,%s,%s\n' % steering_result)
376
377  def __LogBuildCost(self, log):
378    """Log the build results for the task.
379
380    The build results include the compilation time of the build, the result
381    image, the checksum, the file length and the text length of the image.
382    The file length of the image includes the length of the file of the image.
383    The text length only includes the length of the text section of the image.
384
385    Args:
386      log: The build log of this task.
387    """
388
389    build_result_log = '%s/%s/build.txt' % self._log_path
390
391    _CreateDirectory(build_result_log)
392
393    with open(build_result_log, 'w') as out_file:
394      build_result = (self._flag_set, self._build_cost, self._image,
395                      self._checksum, self._file_length, self._text_length)
396
397      # Write out the result in the comma-separated format (CSV).
398      out_file.write('%s,%s,%s,%s,%s,%s\n' % build_result)
399
400    # The build information about running the build.
401    build_run_log = '%s/%s/build_log.txt' % self._log_path
402    _CreateDirectory(build_run_log)
403
404    with open(build_run_log, 'w') as out_log_file:
405      # Write out the execution information.
406      out_log_file.write('%s' % log)
407
408  def __LogTestCost(self, log):
409    """Log the test results for the task.
410
411    The test results include the runtime execution time of the test.
412
413    Args:
414      log: The test log of this task.
415    """
416
417    test_log = '%s/%s/test.txt' % self._log_path
418
419    _CreateDirectory(test_log)
420
421    with open(test_log, 'w') as out_file:
422      test_result = (self._flag_set, self._checksum, self._exe_cost)
423
424      # Write out the result in the comma-separated format (CSV).
425      out_file.write('%s,%s,%s\n' % test_result)
426
427    # The execution information about running the test.
428    test_run_log = '%s/%s/test_log.txt' % self._log_path
429
430    _CreateDirectory(test_run_log)
431
432    with open(test_run_log, 'w') as out_log_file:
433      # Append the test log information.
434      out_log_file.write('%s' % log)
435
436  def IsImproved(self, other):
437    """Compare the current task with another task.
438
439    Args:
440      other: The other task against which the current task is compared.
441
442    Returns:
443      True if this task has improvement upon the other task.
444    """
445
446    # The execution costs must have been initiated.
447    assert self._exe_cost is not None
448    assert other.GetTestResult() is not None
449
450    return self._exe_cost < other.GetTestResult()
451