• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2017, The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""Utility functions for atest."""
16
17
18# pylint: disable=import-outside-toplevel
19# pylint: disable=too-many-lines
20
21from __future__ import print_function
22
23from dataclasses import dataclass
24import datetime
25import enum
26import fnmatch
27import hashlib
28import html
29import importlib
30import itertools
31import json
32import logging
33from multiprocessing import Process
34import os
35from pathlib import Path
36import pickle
37import platform
38import re
39import shutil
40import subprocess
41import sys
42from threading import Thread
43import traceback
44from typing import Any, Dict, List, Set, Tuple
45import urllib
46import xml.etree.ElementTree as ET
47import zipfile
48
49from atest import atest_decorator
50from atest import constants
51from atest.atest_enum import DetectType, ExitCode, FilterType
52from atest.metrics import metrics
53from atest.metrics import metrics_utils
54from atest.tf_proto import test_record_pb2
55
56_BASH_RESET_CODE = '\033[0m\n'
57DIST_OUT_DIR = Path(
58    os.environ.get(constants.ANDROID_BUILD_TOP, os.getcwd()) + '/out/dist/'
59)
60MAINLINE_MODULES_EXT_RE = re.compile(r'\.(apex|apks|apk)$')
61TEST_WITH_MAINLINE_MODULES_RE = re.compile(
62    r'(?P<test>.*)\[(?P<mainline_modules>.*' r'[.](apk|apks|apex))\]$'
63)
64
65# Arbitrary number to limit stdout for failed runs in run_limited_output.
66# Reason for its use is that the make command itself has its own carriage
67# return output mechanism that when collected line by line causes the streaming
68# full_output list to be extremely large.
69_FAILED_OUTPUT_LINE_LIMIT = 100
70# Regular expression to match the start of a ninja compile:
71# ex: [ 99% 39710/39711]
72_BUILD_COMPILE_STATUS = re.compile(r'\[\s*(\d{1,3}%\s+)?\d+/\d+\]')
73_BUILD_FAILURE = 'FAILED: '
74BUILD_TOP_HASH = hashlib.md5(
75    os.environ.get(constants.ANDROID_BUILD_TOP, '').encode()
76).hexdigest()
77_DEFAULT_TERMINAL_WIDTH = 80
78_DEFAULT_TERMINAL_HEIGHT = 25
79_BUILD_CMD = 'build/soong/soong_ui.bash'
80_FIND_MODIFIED_FILES_CMDS = (
81    'cd {};'
82    'local_branch=$(git rev-parse --abbrev-ref HEAD);'
83    "remote_branch=$(git branch -r | grep '\\->' | awk '{{print $1}}');"
84    # Get the number of commits from local branch to remote branch.
85    'ahead=$(git rev-list --left-right --count $local_branch...$remote_branch '
86    "| awk '{{print $1}}');"
87    # Get the list of modified files from HEAD to previous $ahead generation.
88    'git diff HEAD~$ahead --name-only'
89)
90_ANDROID_BUILD_EXT = ('.bp', '.mk')
91
92# Set of special chars for various purposes.
93_REGEX_CHARS = {'[', '(', '{', '|', '\\', '*', '?', '+', '^'}
94_WILDCARD_CHARS = {'?', '*'}
95
96_WILDCARD_FILTER_RE = re.compile(r'.*[?|*]$')
97_REGULAR_FILTER_RE = re.compile(r'.*\w$')
98# Printed before the html log line. May be used in tests to parse the html path.
99_HTML_LOG_PRINT_PREFIX = 'To access logs, press "ctrl" and click on'
100
101SUGGESTIONS = {
102    # (b/177626045) If Atest does not install target application properly.
103    'Runner reported an invalid method': 'Please reflash the device(s).',
104}
105
106_BUILD_ENV = {}
107
108CACHE_VERSION = 1
109
110_original_sys_stdout = sys.stdout
111
112
113@dataclass
114class BuildEnvProfiler:
115  """Represents the condition before and after trigging build."""
116
117  ninja_file: Path
118  ninja_file_mtime: float
119  variable_file: Path
120  variable_file_md5: str
121  clean_out: bool
122  build_files_integrity: bool
123
124
125@enum.unique
126class BuildOutputMode(enum.Enum):
127  'Represents the different ways to display build output.'
128
129  STREAMED = 'streamed'
130  LOGGED = 'logged'
131
132  def __init__(self, arg_name: str):
133    self._description = arg_name
134
135  # pylint: disable=missing-function-docstring
136  def description(self):
137    return self._description
138
139
140@dataclass
141class AndroidVariables:
142  """Class that stores the value of environment variables."""
143
144  build_top: str
145  product_out: str
146  target_out_cases: str
147  host_out: str
148  host_out_cases: str
149  target_product: str
150  build_variant: str
151
152  def __init__(self):
153    self.build_top = os.getenv('ANDROID_BUILD_TOP')
154    self.product_out = os.getenv('ANDROID_PRODUCT_OUT')
155    self.target_out_cases = os.getenv('ANDROID_TARGET_OUT_TESTCASES')
156    self.host_out = os.getenv('ANDROID_HOST_OUT')
157    self.host_out_cases = os.getenv('ANDROID_HOST_OUT_TESTCASES')
158    self.target_product = os.getenv('TARGET_PRODUCT')
159    self.build_variant = os.getenv('TARGET_BUILD_VARIANT')
160
161
162def get_build_top(*joinpaths: Any) -> Path:
163  """Get the absolute path from the given repo path."""
164  return Path(AndroidVariables().build_top, *joinpaths)
165
166
167def get_host_out(*joinpaths: Any) -> Path:
168  """Get the absolute host out path from the given path."""
169  return Path(AndroidVariables().host_out, *joinpaths)
170
171
172def get_product_out(*joinpaths: Any) -> Path:
173  """Get the absolute product out path from the given path."""
174  return Path(AndroidVariables().product_out, *joinpaths)
175
176
177def get_index_path(*filename: Any) -> Path:
178  """Get absolute path of the desired index file."""
179  return get_host_out('indices', *filename)
180
181
182def getenv_abs_path(env: str, suffix: str = None) -> Path:
183  """Translate the environment variable to an absolute path.
184
185  Args:
186      env: string of the given environment variable.
187      suffix: string that will be appended to.
188
189  Returns:
190      Absolute Path of the given environment variable.
191  """
192  env_value = os.getenv(env)
193  if not env_value:
194    return None
195
196  env_path = Path(env_value)
197  if env_path.is_absolute():
198    return env_path.joinpath(suffix) if suffix else env_path
199
200  return get_build_top(env_path, suffix) if suffix else get_build_top(env_path)
201
202
203def get_build_cmd(dump=False):
204  """Compose build command with no-absolute path and flag "--make-mode".
205
206  Args:
207      dump: boolean that determines the option of build/soong/soong_iu.bash.
208            True: used to dump build variables, equivalent to printconfig. e.g.
209              build/soong/soong_iu.bash --dumpvar-mode <VAR_NAME>
210            False: (default) used to build targets in make mode. e.g.
211              build/soong/soong_iu.bash --make-mode <MOD_NAME>
212
213  Returns:
214      A list of soong build command.
215  """
216  make_cmd = '%s/%s' % (
217      os.path.relpath(
218          os.environ.get(constants.ANDROID_BUILD_TOP, os.getcwd()), os.getcwd()
219      ),
220      _BUILD_CMD,
221  )
222  if dump:
223    return [make_cmd, '--dumpvar-mode', 'report_config']
224  return [make_cmd, '--make-mode', 'WRAPPER_TOOL=atest']
225
226
227def _capture_fail_section(full_log):
228  """Return the error message from the build output.
229
230  Args:
231      full_log: List of strings representing full output of build.
232
233  Returns:
234      capture_output: List of strings that are build errors.
235  """
236  am_capturing = False
237  capture_output = []
238  for line in full_log:
239    if am_capturing and _BUILD_COMPILE_STATUS.match(line):
240      break
241    if am_capturing or line.startswith(_BUILD_FAILURE):
242      capture_output.append(line)
243      am_capturing = True
244      continue
245  return capture_output
246
247
248def _capture_limited_output(full_log):
249  """Return the limited error message from capture_failed_section.
250
251  Args:
252      full_log: List of strings representing full output of build.
253
254  Returns:
255      output: List of strings that are build errors.
256  """
257  # Parse out the build error to output.
258  output = _capture_fail_section(full_log)
259  if not output:
260    output = full_log
261  if len(output) >= _FAILED_OUTPUT_LINE_LIMIT:
262    output = output[-_FAILED_OUTPUT_LINE_LIMIT:]
263  output = 'Output (may be trimmed):\n%s' % ''.join(output)
264  return output
265
266
267# TODO: b/187122993 refine subprocess with 'with-statement' in fixit week.
268def run_limited_output(cmd, env_vars=None):
269  """Runs a given command and streams the output on a single line in stdout.
270
271  Args:
272      cmd: A list of strings representing the command to run.
273      env_vars: Optional arg. Dict of env vars to set during build.
274
275  Raises:
276      subprocess.CalledProcessError: When the command exits with a non-0
277          exitcode.
278  """
279  # Send stderr to stdout so we only have to deal with a single pipe.
280  with subprocess.Popen(
281      cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env_vars
282  ) as proc:
283    sys.stdout.write('\n')
284    term_width, _ = get_terminal_size()
285    white_space = ' ' * int(term_width)
286    full_output = []
287    while proc.poll() is None:
288      line = proc.stdout.readline().decode('utf-8')
289      # Readline will often return empty strings.
290      if not line:
291        continue
292      full_output.append(line)
293      # Trim the line to the width of the terminal.
294      # Note: Does not handle terminal resizing, which is probably not
295      #       worth checking the width every loop.
296      if len(line) >= term_width:
297        line = line[: term_width - 1]
298      # Clear the last line we outputted.
299      sys.stdout.write('\r%s\r' % white_space)
300      sys.stdout.write('%s' % line.strip())
301      sys.stdout.flush()
302    # Reset stdout (on bash) to remove any custom formatting and newline.
303    sys.stdout.write(_BASH_RESET_CODE)
304    sys.stdout.flush()
305    # Wait for the Popen to finish completely before checking the
306    # returncode.
307    proc.wait()
308    if proc.returncode != 0:
309      raise subprocess.CalledProcessError(proc.returncode, cmd, full_output)
310
311
312def get_build_out_dir(*joinpaths) -> Path:
313  """Get android build out directory.
314
315  The order of the rules are:
316  1. OUT_DIR
317  2. OUT_DIR_COMMON_BASE
318  3. ANDROID_BUILD_TOP/out
319
320  e.g. OUT_DIR='/disk1/out' -> '/disk1/out'
321       OUT_DIR='out_dir'    -> '<build_top>/out_dir'
322
323       Assume the branch name is 'aosp-main':
324       OUT_DIR_COMMON_BASE='/disk2/out' -> '/disk1/out/aosp-main'
325       OUT_DIR_COMMON_BASE='out_dir'    -> '<build_top>/out_dir/aosp-main'
326
327  Returns:
328      Absolute Path of the out directory.
329  """
330  out_dir = getenv_abs_path('OUT_DIR')
331  if out_dir:
332    return out_dir.joinpath(*joinpaths)
333
334  # https://source.android.com/setup/build/initializing#using-a-separate-output-directory
335  basename = get_build_top().name
336  out_dir_common_base = getenv_abs_path('OUT_DIR_COMMON_BASE', basename)
337  if out_dir_common_base:
338    return out_dir_common_base.joinpath(*joinpaths)
339
340  return get_build_top('out').joinpath(*joinpaths)
341
342
343def update_build_env(env: Dict[str, str]):
344  """Method that updates build environment variables."""
345  # pylint: disable=global-statement, global-variable-not-assigned
346  global _BUILD_ENV
347  _BUILD_ENV.update(env)
348
349
350def build(build_targets: Set[str]):
351  """Shell out and invoke run_build_cmd to make build_targets.
352
353  Args:
354      build_targets: A set of strings of build targets to make.
355
356  Returns:
357      Boolean of whether build command was successful, True if nothing to
358      build.
359  """
360  if not build_targets:
361    logging.debug('No build targets, skipping build.')
362    return True
363
364  # pylint: disable=global-statement, global-variable-not-assigned
365  global _BUILD_ENV
366  full_env_vars = os.environ.copy()
367  update_build_env(full_env_vars)
368  print(
369      '\n%s\n%s'
370      % (mark_cyan('Building Dependencies...'), ', '.join(build_targets))
371  )
372  logging.debug('Building Dependencies: %s', ' '.join(build_targets))
373  cmd = get_build_cmd() + list(build_targets)
374  return _run_build_cmd(cmd, _BUILD_ENV)
375
376
377def _run_build_cmd_with_limited_output(
378    cmd: List[str], env_vars: Dict[str, str] = None
379) -> None:
380  """Runs the build command and streams the output on a single line in stdout.
381
382  Args:
383      cmd: A list of strings representing the command to run.
384      env_vars: Optional arg. Dict of env vars to set during build.
385
386  Raises:
387      subprocess.CalledProcessError: When the command exits with a non-0
388          exitcode.
389  """
390  try:
391    run_limited_output(cmd, env_vars=env_vars)
392  except subprocess.CalledProcessError as e:
393    # get error log from "OUT_DIR/error.log"
394    error_log_file = get_build_out_dir('error.log')
395    output = []
396    if error_log_file.is_file():
397      if error_log_file.stat().st_size > 0:
398        with open(error_log_file, encoding='utf-8') as f:
399          output = f.read()
400    if not output:
401      output = _capture_limited_output(e.output)
402    raise subprocess.CalledProcessError(e.returncode, e.cmd, output)
403
404
405def _run_build_cmd(cmd: List[str], env_vars: Dict[str, str]):
406  """The main process of building targets.
407
408  Args:
409      cmd: A list of soong command.
410      env_vars: Dict of environment variables used for build.
411
412  Returns:
413      Boolean of whether build command was successful, True if nothing to
414      build.
415  """
416  logging.debug('Executing command: %s', cmd)
417  build_profiler = _build_env_profiling()
418  try:
419    if env_vars.get('BUILD_OUTPUT_MODE') == BuildOutputMode.STREAMED.value:
420      print()
421      subprocess.check_call(cmd, stderr=subprocess.STDOUT, env=env_vars)
422    else:
423      # Note that piping stdout forces Soong to switch to 'dumb terminal
424      # mode' which only prints completed actions. This gives users the
425      # impression that actions are taking longer than they really are.
426      # See b/233044822 for more details.
427      log_path = get_build_out_dir('verbose.log.gz')
428      print(
429          '\n(Build log may not reflect actual status in simple output'
430          'mode; check {} for detail after build finishes.)'.format(
431              mark_cyan(f'{log_path}')
432          ),
433          end='',
434      )
435      _run_build_cmd_with_limited_output(cmd, env_vars=env_vars)
436    _send_build_condition_metrics(build_profiler, cmd)
437    print_and_log_info('Build successful')
438    return True
439  except subprocess.CalledProcessError as err:
440    print_and_log_error('Build failure when running: %s', ' '.join(cmd))
441    if err.output:
442      print_and_log_error(err.output)
443    return False
444
445
446# pylint: disable=unused-argument
447def get_result_server_args(for_test_mapping=False):
448  """Return list of args for communication with result server.
449
450  Args:
451      for_test_mapping: True if the test run is for Test Mapping to include
452        additional reporting args. Default is False.
453  """
454  # Customize test mapping argument here if needed.
455  return constants.RESULT_SERVER_ARGS
456
457
458def sort_and_group(iterable, key):
459  """Sort and group helper function."""
460  return itertools.groupby(sorted(iterable, key=key), key=key)
461
462
463def is_supported_mainline_module(installed_path: str) -> re.Match:
464  """Determine whether the given path is supported."""
465  return re.search(MAINLINE_MODULES_EXT_RE, installed_path)
466
467
468def get_test_and_mainline_modules(test_name: str) -> re.Match:
469  """Return test name and mainline modules from the given test."""
470  return TEST_WITH_MAINLINE_MODULES_RE.match(test_name)
471
472
473def is_test_mapping(args):
474  """Check if the atest command intends to run tests in test mapping.
475
476  When atest runs tests in test mapping, it must have at most one test
477  specified. If a test is specified, it must be started with  `:`,
478  which means the test value is a test group name in TEST_MAPPING file, e.g.,
479  `:postsubmit`.
480
481  If --host-unit-test-only or --smart-testing-local was applied, it doesn't
482  intend to be a test_mapping test.
483  If any test mapping options is specified, the atest command must also be
484  set to run tests in test mapping files.
485
486  Args:
487      args: arg parsed object.
488
489  Returns:
490      True if the args indicates atest shall run tests in test mapping. False
491      otherwise.
492  """
493  if args.host_unit_test_only:
494    return False
495  if any((args.test_mapping, args.include_subdirs, not args.tests)):
496    return True
497  # ':postsubmit' implicitly indicates running in test-mapping mode.
498  return all((len(args.tests) == 1, args.tests[0][0] == ':'))
499
500
501@atest_decorator.static_var('cached_has_colors', {})
502def _has_colors(stream):
503  """Check the output stream is colorful.
504
505  Args:
506      stream: The standard file stream.
507
508  Returns:
509      True if the file stream can interpreter the ANSI color code.
510  """
511  cached_has_colors = _has_colors.cached_has_colors
512  if stream in cached_has_colors:
513    return cached_has_colors[stream]
514  cached_has_colors[stream] = True
515  # Following from Python cookbook, #475186
516  if not hasattr(stream, 'isatty'):
517    cached_has_colors[stream] = False
518    return False
519  if not stream.isatty():
520    # Auto color only on TTYs
521    cached_has_colors[stream] = False
522    return False
523  # curses.tigetnum() cannot be used for telling supported color numbers
524  # because it does not come with the prebuilt py3-cmd.
525  return cached_has_colors[stream]
526
527
528def colorize(text, color, bp_color=None):
529  """Convert to colorful string with ANSI escape code.
530
531  Args:
532      text: A string to print.
533      color: Forground(Text) color which is an ANSI code shift for colorful
534        print. They are defined in constants_default.py.
535      bp_color: Backgroud color which is an ANSI code shift for colorful print.
536
537  Returns:
538      Colorful string with ANSI escape code.
539  """
540  clr_pref = '\033[1;'
541  clr_suff = '\033[0m'
542  has_colors = _has_colors(_original_sys_stdout)
543  if has_colors:
544    background_color = ''
545    if bp_color:
546      # Foreground(Text) ranges from 30-37
547      text_color = 30 + color
548      # Background ranges from 40-47
549      background_color = ';%d' % (40 + bp_color)
550    else:
551      text_color = 30 + color
552    clr_str = '%s%d%sm%s%s' % (
553        clr_pref,
554        text_color,
555        background_color,
556        text,
557        clr_suff,
558    )
559  else:
560    clr_str = text
561  return clr_str
562
563
564def mark_red(text):
565  """Wrap colorized function and print in red."""
566  return colorize(text, constants.RED)
567
568
569def mark_yellow(text):
570  """Wrap colorized function and print in yellow."""
571  return colorize(text, constants.YELLOW)
572
573
574def mark_green(text):
575  """Wrap colorized function and print in green."""
576  return colorize(text, constants.GREEN)
577
578
579def mark_magenta(text):
580  """Wrap colorized function and print in magenta."""
581  return colorize(text, constants.MAGENTA)
582
583
584def mark_cyan(text):
585  """Wrap colorized function and print in cyan."""
586  return colorize(text, constants.CYAN)
587
588
589def mark_blue(text):
590  """Wrap colorized function and print in blue."""
591  return colorize(text, constants.BLUE)
592
593
594def colorful_print(text, color, bp_color=None, auto_wrap=True):
595  """Print out the text with color.
596
597  Args:
598      text: A string to print.
599      color: Forground(Text) color which is an ANSI code shift for colorful
600        print. They are defined in constants_default.py.
601      bp_color: Backgroud color which is an ANSI code shift for colorful print.
602      auto_wrap: If True, Text wraps while print.
603  """
604  output = colorize(text, color, bp_color)
605  if auto_wrap:
606    print(output)
607  else:
608    print(output, end='')
609
610
611def _print_to_console(
612    prefix: str, color: int, msg: Any, *fmt_args: list[Any]
613) -> None:
614  """Print a message to the console.
615
616  Args:
617    msg: The message to format.
618    *fmt_args: Format arguments for the message.
619  """
620  if not fmt_args:
621    evaluated_msg = str(msg)
622  else:
623    try:
624      evaluated_msg = msg % fmt_args
625    except (TypeError, ValueError):
626      traceback.print_exc()
627      return
628  colorful_print(f'{prefix}{evaluated_msg}', color)
629
630
631def print_and_log_error(msg, *fmt_args):
632  """Print error message to the console and log it.
633
634  Args:
635    msg: The message to print.
636    *fmt_args: Format arguments for the message.
637  """
638  logging.error(msg, *fmt_args)
639  _print_to_console('Error: ', constants.RED, msg, *fmt_args)
640
641
642def print_and_log_warning(msg, *fmt_args):
643  """Print warning message to the console and log it.
644
645  Args:
646    msg: The message to print.
647    *fmt_args: Format arguments for the message.
648  """
649  logging.warning(msg, *fmt_args)
650  _print_to_console('Warning: ', constants.YELLOW, msg, *fmt_args)
651
652
653def print_and_log_info(msg, *fmt_args):
654  """Print info message to the console and log it.
655
656  Args:
657    msg: The message to print.
658    *fmt_args: Format arguments for the message.
659  """
660  logging.info(msg, *fmt_args)
661  _print_to_console('Info: ', constants.WHITE, msg, *fmt_args)
662
663
664def get_terminal_size():
665  """Get terminal size and return a tuple.
666
667  Returns:
668      2 integers: the size of X(columns) and Y(lines/rows).
669  """
670  # Determine the width of the terminal. We'll need to clear this many
671  # characters when carriage returning. Set default value as 80.
672  columns, rows = shutil.get_terminal_size(
673      fallback=(_DEFAULT_TERMINAL_WIDTH, _DEFAULT_TERMINAL_HEIGHT)
674  )
675  return columns, rows
676
677
678def _get_hashed_file_name(main_file_name):
679  """Convert the input string to a md5-hashed string.
680
681  If file_extension is
682
683     given, returns $(hashed_string).$(file_extension), otherwise
684     $(hashed_string).cache.
685
686  Args:
687      main_file_name: The input string need to be hashed.
688
689  Returns:
690      A string as hashed file name with .cache file extension.
691  """
692  hashed_fn = hashlib.md5(str(main_file_name).encode())
693  hashed_name = hashed_fn.hexdigest()
694  return hashed_name + '.cache'
695
696
697def md5sum(filename):
698  """Generate MD5 checksum of a file.
699
700  Args:
701      name: A string of a filename.
702
703  Returns:
704      A string of hashed MD5 checksum.
705  """
706  filename = Path(filename)
707  if not filename.is_file():
708    return ''
709  with open(filename, 'rb') as target:
710    content = target.read()
711  if not isinstance(content, bytes):
712    content = content.encode('utf-8')
713  return hashlib.md5(content).hexdigest()
714
715
716def check_md5(check_file, missing_ok=False):
717  """Method equivalent to 'md5sum --check /file/to/check'.
718
719  Args:
720      check_file: A string of filename that stores filename and its md5
721        checksum.
722      missing_ok: A boolean that considers OK even when the check_file does not
723        exist. Using missing_ok=True allows ignoring md5 check especially for
724        initial run that the check_file has not yet generated. Using
725        missing_ok=False ensures the consistency of files, and guarantees the
726        process is successfully completed.
727
728  Returns:
729      When missing_ok is True (soft check):
730        - True if the checksum is consistent with the actual MD5, even the
731          check_file is missing or not a valid JSON.
732        - False when the checksum is inconsistent with the actual MD5.
733      When missing_ok is False (ensure the process completed properly):
734        - True if the checksum is consistent with the actual MD5.
735        - False otherwise.
736  """
737  if not Path(check_file).is_file():
738    if not missing_ok:
739      logging.debug('Unable to verify: %s not found.', check_file)
740    return missing_ok
741  content = load_json_safely(check_file)
742  if content:
743    for filename, md5 in content.items():
744      if md5sum(filename) != md5:
745        logging.debug('%s has altered.', filename)
746        return False
747    return True
748  return False
749
750
751def save_md5(filenames, save_file):
752  """Method equivalent to 'md5sum file1 file2 > /file/to/check'
753
754  Args:
755      filenames: A list of filenames.
756      save_file: Filename for storing files and their md5 checksums.
757  """
758  data = {}
759  for f in filenames:
760    name = Path(f)
761    if not name.is_file():
762      print_and_log_warning(' ignore %s: not a file.', name)
763    data.update({str(name): md5sum(name)})
764  with open(save_file, 'w+', encoding='utf-8') as _file:
765    json.dump(data, _file)
766
767
768def get_cache_root():
769  """Get the root path dir for cache.
770
771  Use branch and target information as cache_root.
772  The path will look like:
773     $(ANDROID_PRODUCT_OUT)/atest_cache/$CACHE_VERSION
774
775  Returns:
776      A string of the path of the root dir of cache.
777  """
778  # Note that the cache directory is stored in the build output directory. We
779  # do this because this directory is periodically cleaned and don't have to
780  # worry about the files growing without bound. The files are also much
781  # smaller than typical build output and less of an issue. Use build out to
782  # save caches which is next to atest_bazel_workspace which is easy for user
783  # to manually clean up if need. Use product out folder's base name as part
784  # of directory because of there may be different module-info in the same
785  # branch but different lunch target.
786  return os.path.join(
787      get_build_out_dir(),
788      'atest_cache',
789      f'ver_{CACHE_VERSION}',
790      os.path.basename(
791          os.environ.get(
792              constants.ANDROID_PRODUCT_OUT, constants.ANDROID_PRODUCT_OUT
793          )
794      ),
795  )
796
797
798def get_test_info_cache_path(test_reference, cache_root=None):
799  """Get the cache path of the desired test_infos.
800
801  Args:
802      test_reference: A string of the test.
803      cache_root: Folder path where stores caches.
804
805  Returns:
806      A string of the path of test_info cache.
807  """
808  if not cache_root:
809    cache_root = get_cache_root()
810  return os.path.join(cache_root, _get_hashed_file_name(test_reference))
811
812
813def update_test_info_cache(test_reference, test_infos, cache_root=None):
814  """Update cache content which stores a set of test_info objects through
815
816     pickle module, each test_reference will be saved as a cache file.
817
818  Args:
819      test_reference: A string referencing a test.
820      test_infos: A set of TestInfos.
821      cache_root: Folder path for saving caches.
822  """
823  if not cache_root:
824    cache_root = get_cache_root()
825  if not os.path.isdir(cache_root):
826    os.makedirs(cache_root)
827  cache_path = get_test_info_cache_path(test_reference, cache_root)
828  # Save test_info to files.
829  try:
830    with open(cache_path, 'wb') as test_info_cache_file:
831      logging.debug('Saving cache %s.', cache_path)
832      pickle.dump(test_infos, test_info_cache_file, protocol=2)
833  except (pickle.PicklingError, TypeError, IOError) as err:
834    # Won't break anything, just log this error, and collect the exception
835    # by metrics.
836    logging.debug('Exception raised: %s', err)
837    metrics_utils.handle_exc_and_send_exit_event(constants.ACCESS_CACHE_FAILURE)
838
839
840def load_test_info_cache(test_reference, cache_root=None):
841  """Load cache by test_reference to a set of test_infos object.
842
843  Args:
844      test_reference: A string referencing a test.
845      cache_root: Folder path for finding caches.
846
847  Returns:
848      A list of TestInfo namedtuple if cache found, else None.
849  """
850  if not cache_root:
851    cache_root = get_cache_root()
852
853  cache_file = get_test_info_cache_path(test_reference, cache_root)
854  if os.path.isfile(cache_file):
855    logging.debug('Loading cache %s.', cache_file)
856    try:
857      with open(cache_file, 'rb') as config_dictionary_file:
858        return pickle.load(config_dictionary_file, encoding='utf-8')
859    except (
860        pickle.UnpicklingError,
861        ValueError,
862        TypeError,
863        EOFError,
864        IOError,
865        ImportError,
866    ) as err:
867      # Won't break anything, just remove the old cache, log this error,
868      # and collect the exception by metrics.
869      logging.debug('Exception raised: %s', err)
870      os.remove(cache_file)
871      metrics_utils.handle_exc_and_send_exit_event(
872          constants.ACCESS_CACHE_FAILURE
873      )
874  return None
875
876
877def clean_test_info_caches(tests, cache_root=None):
878  """Clean caches of input tests.
879
880  Args:
881      tests: A list of test references.
882      cache_root: Folder path for finding caches.
883  """
884  if not cache_root:
885    cache_root = get_cache_root()
886  for test in tests:
887    cache_file = get_test_info_cache_path(test, cache_root)
888    if os.path.isfile(cache_file):
889      logging.debug('Removing cache: %s', cache_file)
890      try:
891        os.remove(cache_file)
892      except IOError as err:
893        logging.debug('Exception raised: %s', err)
894        metrics_utils.handle_exc_and_send_exit_event(
895            constants.ACCESS_CACHE_FAILURE
896        )
897
898
899def get_modified_files(root_dir):
900  """Get the git modified files.
901
902  The git path here is git top level of the root_dir. It's inevitable to utilise
903  different commands to fulfill 2 scenario:
904
905      1. locate unstaged/staged files
906      2. locate committed files but not yet merged.
907  the 'git_status_cmd' fulfils the former while the 'find_modified_files'
908  fulfils the latter.
909
910  Args:
911      root_dir: the root where it starts finding.
912
913  Returns:
914      A set of modified files altered since last commit.
915  """
916  modified_files = set()
917  try:
918    # TODO: (@jimtang) abandon using git command within Atest.
919    find_git_cmd = f'cd {root_dir}; git rev-parse --show-toplevel 2>/dev/null'
920    git_paths = (
921        subprocess.check_output(find_git_cmd, shell=True).decode().splitlines()
922    )
923    for git_path in git_paths:
924      # Find modified files from git working tree status.
925      git_status_cmd = (
926          "repo forall {} -c git status --short | awk '{{print $NF}}'"
927      ).format(git_path)
928      modified_wo_commit = (
929          subprocess.check_output(git_status_cmd, shell=True)
930          .decode()
931          .rstrip()
932          .splitlines()
933      )
934      for change in modified_wo_commit:
935        modified_files.add(os.path.normpath('{}/{}'.format(git_path, change)))
936      # Find modified files that are committed but not yet merged.
937      find_modified_files = _FIND_MODIFIED_FILES_CMDS.format(git_path)
938      commit_modified_files = (
939          subprocess.check_output(find_modified_files, shell=True)
940          .decode()
941          .splitlines()
942      )
943      for line in commit_modified_files:
944        modified_files.add(os.path.normpath('{}/{}'.format(git_path, line)))
945  except (OSError, subprocess.CalledProcessError) as err:
946    logging.debug('Exception raised: %s', err)
947  return modified_files
948
949
950def delimiter(char, length=_DEFAULT_TERMINAL_WIDTH, prenl=0, postnl=0):
951  """A handy delimiter printer.
952
953  Args:
954      char: A string used for delimiter.
955      length: An integer for the replication.
956      prenl: An integer that insert '\n' before delimiter.
957      postnl: An integer that insert '\n' after delimiter.
958
959  Returns:
960      A string of delimiter.
961  """
962  return prenl * '\n' + char * length + postnl * '\n'
963
964
965def find_files(path, file_name=constants.TEST_MAPPING, followlinks=False):
966  """Find all files with given name under the given path.
967
968  Args:
969      path: A string of path in source.
970      file_name: The file name pattern for finding matched files.
971      followlinks: A boolean to indicate whether to follow symbolic links.
972
973  Returns:
974      A list of paths of the files with the matching name under the given
975      path.
976  """
977  match_files = []
978  for root, _, filenames in os.walk(path, followlinks=followlinks):
979    try:
980      for filename in fnmatch.filter(filenames, file_name):
981        match_files.append(os.path.join(root, filename))
982    except re.error as e:
983      msg = 'Unable to locate %s among %s' % (file_name, filenames)
984      logging.debug(msg)
985      logging.debug('Exception: %s', e)
986      metrics.AtestExitEvent(
987          duration=metrics_utils.convert_duration(0),
988          exit_code=ExitCode.COLLECT_ONLY_FILE_NOT_FOUND,
989          stacktrace=msg,
990          logs=str(e),
991      )
992  return match_files
993
994
995def extract_zip_text(zip_path):
996  """Extract the text files content for input zip file.
997
998  Args:
999      zip_path: The file path of zip.
1000
1001  Returns:
1002      The string in input zip file.
1003  """
1004  content = ''
1005  try:
1006    with zipfile.ZipFile(zip_path) as zip_file:
1007      for filename in zip_file.namelist():
1008        if os.path.isdir(filename):
1009          continue
1010        # Force change line if multiple text files in zip
1011        content = content + '\n'
1012        # read the file
1013        with zip_file.open(filename) as extract_file:
1014          for line in extract_file:
1015            if matched_tf_error_log(line.decode()):
1016              content = content + line.decode()
1017  except zipfile.BadZipfile as err:
1018    logging.debug('Exception raised: %s', err)
1019  return content
1020
1021
1022def matched_tf_error_log(content):
1023  """Check if the input content matched tradefed log pattern.
1024
1025  The format will look like this. 05-25 17:37:04 W/XXXXXX 05-25 17:37:04
1026  E/XXXXXX
1027
1028  Args:
1029      content: Log string.
1030
1031  Returns:
1032      True if the content matches the regular expression for tradefed error or
1033      warning log.
1034  """
1035  reg = (
1036      '^((0[1-9])|(1[0-2]))-((0[1-9])|([12][0-9])|(3[0-1])) '
1037      '(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9]) (E|W/)'
1038  )
1039  if re.search(reg, content):
1040    return True
1041  return False
1042
1043
1044def read_test_record(path):
1045  """A Helper to read test record proto.
1046
1047  Args:
1048      path: The proto file path.
1049
1050  Returns:
1051      The test_record proto instance.
1052  """
1053  with open(path, 'rb') as proto_file:
1054    msg = test_record_pb2.TestRecord()
1055    msg.ParseFromString(proto_file.read())
1056  return msg
1057
1058
1059def has_python_module(module_name):
1060  """Detect if the module can be loaded without importing it in real.
1061
1062  Args:
1063      cmd: A string of the tested module name.
1064
1065  Returns:
1066      True if found, False otherwise.
1067  """
1068  return bool(importlib.util.find_spec(module_name))
1069
1070
1071def load_json_safely(jsonfile):
1072  """Load the given json file as an object.
1073
1074  Args:
1075      jsonfile: The json file path.
1076
1077  Returns:
1078      The content of the give json file. Null dict when:
1079      1. the given path doesn't exist.
1080      2. the given path is not a json or invalid format.
1081  """
1082  if isinstance(jsonfile, bytes):
1083    jsonfile = jsonfile.decode('utf-8')
1084  if Path(jsonfile).is_file():
1085    try:
1086      with open(jsonfile, 'r', encoding='utf-8') as cache:
1087        return json.load(cache)
1088    except json.JSONDecodeError:
1089      logging.debug('Exception happened while loading %s.', jsonfile)
1090  else:
1091    logging.debug('%s: File not found.', jsonfile)
1092  return {}
1093
1094
1095def get_atest_version():
1096  """Get atest version.
1097
1098  Returns:
1099      Version string from the VERSION file, e.g. prebuilt
1100          2022-11-24_9314547  (<release_date>_<build_id>)
1101
1102      If VERSION does not exist (src or local built):
1103          2022-11-24_5d448c50 (<commit_date>_<commit_id>)
1104
1105      If the git command fails for unexpected reason:
1106          2022-11-24_unknown  (<today_date>_unknown)
1107  """
1108  try:
1109    with importlib.resources.as_file(
1110        importlib.resources.files('atest').joinpath('VERSION')
1111    ) as version_file_path:
1112      return version_file_path.read_text(encoding='utf-8')
1113  except (ModuleNotFoundError, FileNotFoundError):
1114    logging.debug(
1115        'Failed to load package resource atest/VERSION, possibly due to running'
1116        ' from atest-dev, atest-src, a prebuilt without embedded launcher, or a'
1117        ' prebuilt not created by the asuite release tool. Falling back to'
1118        ' legacy source search.'
1119    )
1120    version_file = Path(__file__).resolve().parent.joinpath('VERSION')
1121    if Path(version_file).is_file():
1122      return open(version_file, encoding='utf-8').read()
1123
1124  # Try fetching commit date (%ci) and commit hash (%h).
1125  git_cmd = 'git log -1 --pretty=format:"%ci;%h"'
1126  try:
1127    # commit date/hash are only available when running from the source
1128    # and the local built.
1129    result = subprocess.run(
1130        git_cmd,
1131        shell=True,
1132        check=False,
1133        capture_output=True,
1134        cwd=Path(os.getenv(constants.ANDROID_BUILD_TOP), '').joinpath(
1135            'tools/asuite/atest'
1136        ),
1137    )
1138    if result.stderr:
1139      raise subprocess.CalledProcessError(returncode=0, cmd=git_cmd)
1140    raw_date, commit = result.stdout.decode().split(';')
1141    date = datetime.datetime.strptime(raw_date, '%Y-%m-%d %H:%M:%S %z').date()
1142  # atest_dir doesn't exist will throw FileNotFoundError.
1143  except (subprocess.CalledProcessError, FileNotFoundError):
1144    # Use today as the commit date for unexpected conditions.
1145    date = datetime.datetime.today().date()
1146    commit = 'unknown'
1147  return f'{date}_{commit}'
1148
1149
1150def get_manifest_branch(show_aosp=False):
1151  """Get the manifest branch.
1152
1153  Args:
1154      show_aosp: A boolean that shows 'aosp' prefix by checking the 'remote'
1155        attribute.
1156
1157  Returns:
1158      The value of 'revision' of the included xml or default.xml.
1159
1160      None when no ANDROID_BUILD_TOP or unable to access default.xml.
1161  """
1162  #      (portal xml)                            (default xml)
1163  # +--------------------+ _get_include() +-----------------------------+
1164  # | .repo/manifest.xml |--------------->| .repo/manifests/default.xml |
1165  # +--------------------+                +---------------+-------------+
1166  #                          <default revision="master" |
1167  #                                   remote="aosp"     | _get_revision()
1168  #                                   sync-j="4"/>      V
1169  #                                                 +--------+
1170  #                                                 | master |
1171  #                                                 +--------+
1172  build_top = os.getenv(constants.ANDROID_BUILD_TOP)
1173  if not build_top:
1174    return None
1175  portal_xml = Path(build_top).joinpath('.repo', 'manifest.xml')
1176  default_xml = Path(build_top).joinpath('.repo/manifests', 'default.xml')
1177
1178  def _get_revision(xml):
1179    try:
1180      xml_root = ET.parse(xml).getroot()
1181    except (IOError, OSError, ET.ParseError):
1182      # TODO(b/274989179) Change back to warning once warning if not going
1183      # to be treat as test failure. Or test_get_manifest_branch unit test
1184      # could be fix if return None if portal_xml or default_xml not
1185      # exist.
1186      logging.info('%s could not be read.', xml)
1187      return ''
1188    default_tags = xml_root.findall('./default')
1189    if default_tags:
1190      prefix = ''
1191      for tag in default_tags:
1192        branch = tag.attrib.get('revision')
1193        if show_aosp and tag.attrib.get('remote') == 'aosp':
1194          prefix = 'aosp-'
1195        return f'{prefix}{branch}'
1196    return ''
1197
1198  def _get_include(xml):
1199    try:
1200      xml_root = ET.parse(xml).getroot()
1201    except (IOError, OSError, ET.ParseError):
1202      # TODO(b/274989179) Change back to warning once warning if not going
1203      # to be treat as test failure. Or test_get_manifest_branch unit test
1204      # could be fix if return None if portal_xml or default_xml not
1205      # exist.
1206      logging.info('%s could not be read.', xml)
1207      return Path()
1208    include_tags = xml_root.findall('./include')
1209    if include_tags:
1210      for tag in include_tags:
1211        name = tag.attrib.get('name')
1212        if name:
1213          return Path(build_top).joinpath('.repo/manifests', name)
1214    return default_xml
1215
1216  # 1. Try getting revision from .repo/manifests/default.xml
1217  if default_xml.is_file():
1218    return _get_revision(default_xml)
1219  # 2. Try getting revision from the included xml of .repo/manifest.xml
1220  include_xml = _get_include(portal_xml)
1221  if include_xml.is_file():
1222    return _get_revision(include_xml)
1223  # 3. Try getting revision directly from manifest.xml (unlikely to happen)
1224  return _get_revision(portal_xml)
1225
1226
1227def get_build_target():
1228  """Get the build target form system environment TARGET_PRODUCT."""
1229  build_target = '%s-%s-%s' % (
1230      os.getenv(constants.ANDROID_TARGET_PRODUCT, None),
1231      os.getenv('TARGET_RELEASE', None),
1232      os.getenv(constants.TARGET_BUILD_VARIANT, None),
1233  )
1234  return build_target
1235
1236
1237def has_wildcard(test_name):
1238  """Tell whether the test_name(either a list or string) contains wildcard
1239
1240  symbols.
1241
1242  Args:
1243      test_name: A list or a str.
1244
1245  Return:
1246      True if test_name contains wildcard, False otherwise.
1247  """
1248  if isinstance(test_name, str):
1249    return any(char in test_name for char in _WILDCARD_CHARS)
1250  if isinstance(test_name, list):
1251    for name in test_name:
1252      if has_wildcard(name):
1253        return True
1254  return False
1255
1256
1257def is_build_file(path):
1258  """If input file is one of an android build file.
1259
1260  Args:
1261      path: A string of file path.
1262
1263  Return:
1264      True if path is android build file, False otherwise.
1265  """
1266  return bool(os.path.splitext(path)[-1] in _ANDROID_BUILD_EXT)
1267
1268
1269def quote(input_str):
1270  """If the input string -- especially in custom args -- contains shell-aware
1271
1272  characters, insert a pair of "\" to the input string.
1273
1274  e.g. unit(test|testing|testing) -> 'unit(test|testing|testing)'
1275
1276  Args:
1277      input_str: A string from user input.
1278
1279  Returns: A string with single quotes if regex chars were detected.
1280  """
1281  if has_chars(input_str, _REGEX_CHARS):
1282    return "'" + input_str + "'"
1283  return input_str
1284
1285
1286def has_chars(input_str, chars):
1287  """Check if the input string contains one of the designated characters.
1288
1289  Args:
1290      input_str: A string from user input.
1291      chars: An iterable object.
1292
1293  Returns:
1294      True if the input string contains one of the special chars.
1295  """
1296  for char in chars:
1297    if char in input_str:
1298      return True
1299  return False
1300
1301
1302def prompt_with_yn_result(msg, default=True):
1303  """Prompt message and get yes or no result.
1304
1305  Args:
1306      msg: The question you want asking.
1307      default: boolean to True/Yes or False/No
1308
1309  Returns:
1310      default value if get KeyboardInterrupt or ValueError exception.
1311  """
1312  suffix = '[Y/n]: ' if default else '[y/N]: '
1313  try:
1314    return strtobool(input(msg + suffix))
1315  except (ValueError, KeyboardInterrupt):
1316    return default
1317
1318
1319def strtobool(val):
1320  """Convert a string representation of truth to True or False.
1321
1322  Args:
1323      val: a string of input value.
1324
1325  Returns:
1326      True when values are 'y', 'yes', 't', 'true', 'on', and '1';
1327      False when 'n', 'no', 'f', 'false', 'off', and '0'.
1328      Raises ValueError if 'val' is anything else.
1329  """
1330  if val.lower() in ('y', 'yes', 't', 'true', 'on', '1'):
1331    return True
1332  if val.lower() in ('n', 'no', 'f', 'false', 'off', '0'):
1333    return False
1334  raise ValueError('invalid truth value %r' % (val,))
1335
1336
1337def get_android_junit_config_filters(test_config):
1338  """Get the dictionary of a input config for junit config's filters
1339
1340  Args:
1341      test_config: The path of the test config.
1342
1343  Returns:
1344      A dictionary include all the filters in the input config.
1345  """
1346  filter_dict = {}
1347  xml_root = ET.parse(test_config).getroot()
1348  option_tags = xml_root.findall('.//option')
1349  for tag in option_tags:
1350    name = tag.attrib['name'].strip()
1351    if name in constants.SUPPORTED_FILTERS:
1352      filter_values = filter_dict.get(name, [])
1353      value = tag.attrib['value'].strip()
1354      filter_values.append(value)
1355      filter_dict.update({name: filter_values})
1356  return filter_dict
1357
1358
1359def get_config_parameter(test_config):
1360  """Get all the parameter values for the input config
1361
1362  Args:
1363      test_config: The path of the test config.
1364
1365  Returns:
1366      A set include all the parameters of the input config.
1367  """
1368  parameters = set()
1369  xml_root = ET.parse(test_config).getroot()
1370  option_tags = xml_root.findall('.//option')
1371  for tag in option_tags:
1372    name = tag.attrib['name'].strip()
1373    if name == constants.CONFIG_DESCRIPTOR:
1374      key = tag.attrib['key'].strip()
1375      if key == constants.PARAMETER_KEY:
1376        value = tag.attrib['value'].strip()
1377        parameters.add(value)
1378  return parameters
1379
1380
1381def get_config_device(test_config):
1382  """Get all the device names from the input config
1383
1384  Args:
1385      test_config: The path of the test config.
1386
1387  Returns:
1388      A set include all the device name of the input config.
1389  """
1390  devices = set()
1391  try:
1392    xml_root = ET.parse(test_config).getroot()
1393    device_tags = xml_root.findall('.//device')
1394    for tag in device_tags:
1395      name = tag.attrib['name'].strip()
1396      devices.add(name)
1397  except ET.ParseError as e:
1398    colorful_print('Config has invalid format.', constants.RED)
1399    colorful_print('File %s : %s' % (test_config, str(e)), constants.YELLOW)
1400    sys.exit(ExitCode.CONFIG_INVALID_FORMAT)
1401  return devices
1402
1403
1404def get_mainline_param(test_config):
1405  """Get all the mainline-param values for the input config
1406
1407  Args:
1408      test_config: The path of the test config.
1409
1410  Returns:
1411      A set include all the parameters of the input config.
1412  """
1413  mainline_param = set()
1414  xml_root = ET.parse(test_config).getroot()
1415  option_tags = xml_root.findall('.//option')
1416  for tag in option_tags:
1417    name = tag.attrib['name'].strip()
1418    if name == constants.CONFIG_DESCRIPTOR:
1419      key = tag.attrib['key'].strip()
1420      if key == constants.MAINLINE_PARAM_KEY:
1421        value = tag.attrib['value'].strip()
1422        mainline_param.add(value)
1423  return mainline_param
1424
1425
1426def get_adb_devices():
1427  """Run `adb devices` and return a list of devices.
1428
1429  Returns:
1430      A list of devices. e.g.
1431      ['127.0.0.1:40623', '127.0.0.1:40625']
1432  """
1433  probe_cmd = 'adb devices | egrep -v "^List|^$"||true'
1434  suts = subprocess.check_output(probe_cmd, shell=True).decode().splitlines()
1435  return [sut.split('\t')[0] for sut in suts]
1436
1437
1438def get_android_config():
1439  """Get Android config as "printconfig" shows.
1440
1441  Returns:
1442      A dict of Android configurations.
1443  """
1444  dump_cmd = get_build_cmd(dump=True)
1445  raw_config = subprocess.check_output(dump_cmd).decode('utf-8')
1446  android_config = {}
1447  for element in raw_config.splitlines():
1448    if not element.startswith('='):
1449      key, value = tuple(element.split('=', 1))
1450      android_config.setdefault(key, value)
1451  return android_config
1452
1453
1454def get_config_gtest_args(test_config):
1455  """Get gtest's module-name and device-path option from the input config
1456
1457  Args:
1458      test_config: The path of the test config.
1459
1460  Returns:
1461      A string of gtest's module name.
1462      A string of gtest's device path.
1463  """
1464  module_name = ''
1465  device_path = ''
1466  xml_root = ET.parse(test_config).getroot()
1467  option_tags = xml_root.findall('.//option')
1468  for tag in option_tags:
1469    name = tag.attrib['name'].strip()
1470    value = tag.attrib['value'].strip()
1471    if name == 'native-test-device-path':
1472      device_path = value
1473    elif name == 'module-name':
1474      module_name = value
1475  return module_name, device_path
1476
1477
1478def get_arch_name(module_name, is_64=False):
1479  """Get the arch folder name for the input module.
1480
1481  Scan the test case folders to get the matched arch folder name.
1482
1483  Args:
1484      module_name: The module_name of test
1485      is_64: If need 64 bit arch name, False otherwise.
1486
1487  Returns:
1488      A string of the arch name.
1489  """
1490  arch_32 = ['arm', 'x86']
1491  arch_64 = ['arm64', 'x86_64']
1492  arch_list = arch_32
1493  if is_64:
1494    arch_list = arch_64
1495  test_case_root = os.path.join(
1496      os.environ.get(constants.ANDROID_TARGET_OUT_TESTCASES, ''), module_name
1497  )
1498  if not os.path.isdir(test_case_root):
1499    logging.debug('%s does not exist.', test_case_root)
1500    return ''
1501  for f in os.listdir(test_case_root):
1502    if f in arch_list:
1503      return f
1504  return ''
1505
1506
1507def copy_single_arch_native_symbols(
1508    symbol_root, module_name, device_path, is_64=False
1509):
1510  """Copy symbol files for native tests which belong to input arch.
1511
1512  Args:
1513      module_name: The module_name of test
1514      device_path: The device path define in test config.
1515      is_64: True if need to copy 64bit symbols, False otherwise.
1516  """
1517  src_symbol = os.path.join(symbol_root, 'data', 'nativetest', module_name)
1518  if is_64:
1519    src_symbol = os.path.join(symbol_root, 'data', 'nativetest64', module_name)
1520  dst_symbol = os.path.join(
1521      symbol_root,
1522      device_path[1:],
1523      module_name,
1524      get_arch_name(module_name, is_64),
1525  )
1526  if os.path.isdir(src_symbol):
1527    # TODO: Use shutil.copytree(src, dst, dirs_exist_ok=True) after
1528    #  python3.8
1529    if os.path.isdir(dst_symbol):
1530      shutil.rmtree(dst_symbol)
1531    shutil.copytree(src_symbol, dst_symbol)
1532
1533
1534def copy_native_symbols(module_name, device_path):
1535  """Copy symbol files for native tests to match with tradefed file structure.
1536
1537  The original symbols will locate at
1538  $(PRODUCT_OUT)/symbols/data/nativetest(64)/$(module)/$(stem).
1539  From TF, the test binary will locate at
1540  /data/local/tmp/$(module)/$(arch)/$(stem).
1541  In order to make trace work need to copy the original symbol to
1542  $(PRODUCT_OUT)/symbols/data/local/tmp/$(module)/$(arch)/$(stem)
1543
1544  Args:
1545      module_name: The module_name of test
1546      device_path: The device path define in test config.
1547  """
1548  symbol_root = os.path.join(
1549      os.environ.get(constants.ANDROID_PRODUCT_OUT, ''), 'symbols'
1550  )
1551  if not os.path.isdir(symbol_root):
1552    logging.debug('Symbol dir:%s not exist, skip copy symbols.', symbol_root)
1553    return
1554  # Copy 32 bit symbols
1555  if get_arch_name(module_name, is_64=False):
1556    copy_single_arch_native_symbols(
1557        symbol_root, module_name, device_path, is_64=False
1558    )
1559  # Copy 64 bit symbols
1560  if get_arch_name(module_name, is_64=True):
1561    copy_single_arch_native_symbols(
1562        symbol_root, module_name, device_path, is_64=True
1563    )
1564
1565
1566def get_config_preparer_options(test_config, class_name):
1567  """Get all the parameter values for the input config
1568
1569  Args:
1570      test_config: The path of the test config.
1571      class_name: A string of target_preparer
1572
1573  Returns:
1574      A set include all the parameters of the input config.
1575  """
1576  options = {}
1577  xml_root = ET.parse(test_config).getroot()
1578  option_tags = xml_root.findall(
1579      './/target_preparer[@class="%s"]/option' % class_name
1580  )
1581  for tag in option_tags:
1582    name = tag.attrib['name'].strip()
1583    value = tag.attrib['value'].strip()
1584    options[name] = value
1585  return options
1586
1587
1588def get_verify_key(tests, extra_args):
1589  """Compose test command key.
1590
1591  Args:
1592      test_name: A list of input tests.
1593      extra_args: Dict of extra args to add to test run.
1594
1595  Returns:
1596      A composed test commands.
1597  """
1598  # test_commands is a concatenated string of sorted test_ref+extra_args.
1599  # For example, "ITERATIONS=5 hello_world_test"
1600  test_commands = tests
1601  for key, value in extra_args.items():
1602    test_commands.append('%s=%s' % (key, str(value)))
1603  test_commands.sort()
1604  return ' '.join(test_commands)
1605
1606
1607def save_build_files_timestamp():
1608  """Method that generate timestamp of Android.{bp,mk} files.
1609
1610  The checksum of build files are stores in
1611      $ANDROID_HOST_OUT/indices/buildfiles.stp
1612  """
1613  plocate_db = get_index_path(constants.LOCATE_CACHE)
1614  plocate_db_exist = plocate_db.is_file()
1615  logging.debug(
1616      'Build files timestamp db file %s exists: %s',
1617      plocate_db,
1618      plocate_db_exist,
1619  )
1620
1621  if plocate_db_exist:
1622    cmd = f'locate -d{plocate_db} --existing ' r'--regex "/Android\.(bp|mk)$"'
1623    results = subprocess.getoutput(cmd)
1624    if results:
1625      timestamp = {}
1626      for build_file in results.splitlines():
1627        timestamp.update({build_file: Path(build_file).stat().st_mtime})
1628
1629      timestamp_file = get_index_path(constants.BUILDFILES_STP)
1630      logging.debug('Writing to build files timestamp db %s', timestamp_file)
1631      with open(timestamp_file, 'w', encoding='utf-8') as _file:
1632        json.dump(timestamp, _file)
1633
1634
1635def run_multi_proc(func, *args, **kwargs):
1636  """Start a process with multiprocessing and return Process object.
1637
1638  Args:
1639      func: A string of function name which will be the target name.
1640        args/kwargs: check doc page:
1641      https://docs.python.org/3.8/library/multiprocessing.html#process-and-exceptions
1642
1643  Returns:
1644      multiprocessing.Process object.
1645  """
1646  proc = Process(target=func, *args, **kwargs)
1647  proc.start()
1648  return proc
1649
1650
1651def start_threading(target, *args, **kwargs):
1652  """Start a Thread-based parallelism.
1653
1654  Args:
1655      func: A string of function name which will be the target name.
1656        args/kwargs: check doc page:
1657      https://docs.python.org/3/library/threading.html#threading.Thread
1658
1659  Returns:
1660      threading.Thread object.
1661  """
1662  proc = Thread(target=target, *args, **kwargs)
1663  proc.start()
1664  return proc
1665
1666
1667def get_prebuilt_sdk_tools_dir():
1668  """Get the path for the prebuilt sdk tools root dir.
1669
1670  Returns: The absolute path of prebuilt sdk tools directory.
1671  """
1672  build_top = Path(os.environ.get(constants.ANDROID_BUILD_TOP, ''))
1673  return build_top.joinpath(
1674      'prebuilts/sdk/tools/', str(platform.system()).lower(), 'bin'
1675  )
1676
1677
1678def is_writable(path):
1679  """Check if the given path is writable.
1680
1681  Returns: True if input path is writable, False otherwise.
1682  """
1683  if not os.path.exists(path):
1684    return is_writable(os.path.dirname(path))
1685  return os.access(path, os.W_OK)
1686
1687
1688def get_misc_dir():
1689  """Get the path for the ATest data root dir.
1690
1691  Returns: The absolute path of the ATest data root dir.
1692  """
1693  home_dir = os.path.expanduser('~')
1694  if is_writable(home_dir):
1695    return home_dir
1696  return get_build_out_dir()
1697
1698
1699def get_config_folder() -> Path:
1700  """Returns the config folder path where upload config is stored."""
1701  return Path(get_misc_dir()).joinpath('.atest')
1702
1703
1704def get_full_annotation_class_name(module_info, class_name):
1705  """Get fully qualified class name from a class name.
1706
1707  If the given keyword(class_name) is "smalltest", this method can search
1708  among source codes and grep the accurate annotation class name:
1709
1710      androidx.test.filters.SmallTest
1711
1712  Args:
1713      module_info: A dict of module_info.
1714      class_name: A string of class name.
1715
1716  Returns:
1717      A string of fully qualified class name, empty string otherwise.
1718  """
1719  fullname_re = re.compile(
1720      r'import\s+(?P<fqcn>{})(|;)$'.format(class_name), re.I
1721  )
1722  keyword_re = re.compile(
1723      r'import\s+(?P<fqcn>.*\.{})(|;)$'.format(class_name), re.I
1724  )
1725  build_top = Path(os.environ.get(constants.ANDROID_BUILD_TOP, ''))
1726  for f in module_info.get(constants.MODULE_SRCS, []):
1727    full_path = build_top.joinpath(f)
1728    with open(full_path, 'r', encoding='utf-8') as cache:
1729      for line in cache.readlines():
1730        # Accept full class name.
1731        match = fullname_re.match(line)
1732        if match:
1733          return match.group('fqcn')
1734        # Search annotation class from keyword.
1735        match = keyword_re.match(line)
1736        if match:
1737          return match.group('fqcn')
1738  return ''
1739
1740
1741def has_mixed_type_filters(test_infos):
1742  """There are different types in a test module.
1743
1744  Dict test_to_types is mapping module name and the set of types.
1745  For example,
1746  {
1747      'module_1': {'wildcard class_method'},
1748      'module_2': {'wildcard class_method', 'regular class_method'},
1749      'module_3': set()
1750      }
1751
1752  Args:
1753      test_infos: A set of TestInfos.
1754
1755  Returns:
1756      True if more than one filter type in a test module, False otherwise.
1757  """
1758  test_to_types = {}
1759  for test_info in test_infos:
1760    filters = test_info.data.get(constants.TI_FILTER, [])
1761    filter_types = set()
1762    for flt in filters:
1763      filter_types |= get_filter_types(flt.to_list_of_tf_strings())
1764    filter_types |= test_to_types.get(test_info.test_name, set())
1765    test_to_types[test_info.test_name] = filter_types
1766  for _, types in test_to_types.items():
1767    if len(types) > 1:
1768      return True
1769  return False
1770
1771
1772def get_filter_types(tf_filter_set):
1773  """Get filter types.
1774
1775  Args:
1776      tf_filter_set: A list of tf filter strings.
1777
1778  Returns:
1779      A set of FilterType.
1780  """
1781  type_set = set()
1782  for tf_filter in tf_filter_set:
1783    if _WILDCARD_FILTER_RE.match(tf_filter):
1784      logging.debug(
1785          'Filter and type: (%s, %s)',
1786          tf_filter,
1787          FilterType.WILDCARD_FILTER.value,
1788      )
1789      type_set.add(FilterType.WILDCARD_FILTER.value)
1790    if _REGULAR_FILTER_RE.match(tf_filter):
1791      logging.debug(
1792          'Filter and type: (%s, %s)',
1793          tf_filter,
1794          FilterType.REGULAR_FILTER.value,
1795      )
1796      type_set.add(FilterType.REGULAR_FILTER.value)
1797  return type_set
1798
1799
1800def has_command(cmd: str) -> bool:
1801  """Detect if the command is available in PATH.
1802
1803  Args:
1804      cmd: A string of the tested command.
1805
1806  Returns:
1807      True if found, False otherwise.
1808  """
1809  return bool(shutil.which(cmd))
1810
1811
1812# pylint: disable=anomalous-backslash-in-string,too-many-branches
1813def get_bp_content(filename: Path, module_type: str) -> Dict:
1814  """Get essential content info from an Android.bp.
1815
1816  By specifying module_type (e.g. 'android_test', 'android_app'), this method
1817  can parse the given starting point and grab 'name', 'instrumentation_for' and
1818  'manifest'.
1819
1820  Returns:
1821      A dict of mapping test module and target module; e.g.
1822      {
1823       'FooUnitTests':
1824           {'manifest': 'AndroidManifest.xml', 'target_module': 'Foo'},
1825       'Foo':
1826           {'manifest': 'AndroidManifest-common.xml', 'target_module': ''}
1827      }
1828      Null dict if there is no content of the given module_type.
1829  """
1830  build_file = Path(filename)
1831  if not any((build_file.suffix == '.bp', build_file.is_file())):
1832    return {}
1833  start_from = re.compile(f'^{module_type}\s*\{{')
1834  end_with = re.compile(r'^\}$')
1835  context_re = re.compile(
1836      r'\s*(?P<key>(name|manifest|instrumentation_for))\s*:'
1837      r'\s*\"(?P<value>.*)\"\s*,',
1838      re.M,
1839  )
1840  with open(build_file, 'r', encoding='utf-8') as cache:
1841    data = cache.readlines()
1842  content_dict = {}
1843  start_recording = False
1844  for _line in data:
1845    line = _line.strip()
1846    if re.match(start_from, line):
1847      start_recording = True
1848      _dict = {}
1849      continue
1850    if start_recording:
1851      if not re.match(end_with, line):
1852        match = re.match(context_re, line)
1853        if match:
1854          _dict.update({match.group('key'): match.group('value')})
1855      else:
1856        start_recording = False
1857        module_name = _dict.get('name')
1858        if module_name:
1859          content_dict.update({
1860              module_name: {
1861                  'manifest': _dict.get('manifest', 'AndroidManifest.xml'),
1862                  'target_module': _dict.get('instrumentation_for', ''),
1863              }
1864          })
1865  return content_dict
1866
1867
1868def get_manifest_info(manifest: Path) -> Dict[str, Any]:
1869  """Get the essential info from the given manifest file.
1870
1871  This method cares only three attributes:
1872
1873      * package
1874      * targetPackage
1875      * persistent
1876  For an instrumentation test, the result will be like:
1877  {
1878      'package': 'com.android.foo.tests.unit',
1879      'targetPackage': 'com.android.foo',
1880      'persistent': False
1881  }
1882  For a target module of the instrumentation test:
1883  {
1884      'package': 'com.android.foo',
1885      'targetPackage': '',
1886      'persistent': True
1887  }
1888  """
1889  mdict = {'package': '', 'target_package': '', 'persistent': False}
1890  try:
1891    xml_root = ET.parse(manifest).getroot()
1892  except (ET.ParseError, FileNotFoundError):
1893    return mdict
1894  manifest_package_re = re.compile(r'[a-z][\w]+(\.[\w]+)*')
1895  # 1. Must probe 'package' name from the top.
1896  for item in xml_root.findall('.'):
1897    if 'package' in item.attrib.keys():
1898      pkg = item.attrib.get('package')
1899      match = manifest_package_re.match(pkg)
1900      if match:
1901        mdict['package'] = pkg
1902        break
1903  for item in xml_root.findall('*'):
1904    # 2. Probe 'targetPackage' in 'instrumentation' tag.
1905    if item.tag == 'instrumentation':
1906      for key, value in item.attrib.items():
1907        if 'targetPackage' in key:
1908          mdict['target_package'] = value
1909          break
1910    # 3. Probe 'persistent' in any tags.
1911    for key, value in item.attrib.items():
1912      if 'persistent' in key:
1913        mdict['persistent'] = value.lower() == 'true'
1914        break
1915  return mdict
1916
1917
1918# pylint: disable=broad-except
1919def generate_print_result_html(result_file: Path):
1920  """Generate a html that collects all log files."""
1921  result_file = Path(result_file)
1922  search_dir = Path(result_file).parent.joinpath('log')
1923  result_html = Path(search_dir, 'test_logs.html')
1924  try:
1925    logs = sorted(find_files(str(search_dir), file_name='*', followlinks=True))
1926    with open(result_html, 'w', encoding='utf-8') as cache:
1927      cache.write('<!DOCTYPE html><html><body>')
1928      result = load_json_safely(result_file)
1929      if result:
1930        cache.write(f'<h1>{"atest " + result.get("args")}</h1>')
1931        timestamp = datetime.datetime.fromtimestamp(result_file.stat().st_ctime)
1932        cache.write(f'<h2>{timestamp}</h2>')
1933      for log in logs:
1934        cache.write(
1935            f'<p><a href="{urllib.parse.quote(log)}">'
1936            f'{html.escape(Path(log).name)}</a></p>'
1937        )
1938      cache.write('</body></html>')
1939    print(
1940        f'\n{_HTML_LOG_PRINT_PREFIX}\n{mark_magenta(f"file://{result_html}")}\n'
1941    )
1942    send_tradeded_elapsed_time_metric(search_dir)
1943  except Exception as e:
1944    logging.debug('Did not generate log html for reason: %s', e)
1945
1946
1947def send_tradeded_elapsed_time_metric(search_dir: Path):
1948  """Method which sends Tradefed elapsed time to the metrics."""
1949  test, prep, teardown = get_tradefed_invocation_time(search_dir)
1950  metrics.LocalDetectEvent(
1951      detect_type=DetectType.TF_TOTAL_RUN_MS, result=test + prep + teardown
1952  )
1953  metrics.LocalDetectEvent(
1954      detect_type=DetectType.TF_PREPARATION_MS, result=prep
1955  )
1956  metrics.LocalDetectEvent(detect_type=DetectType.TF_TEST_MS, result=test)
1957  metrics.LocalDetectEvent(
1958      detect_type=DetectType.TF_TEARDOWN_MS, result=teardown
1959  )
1960
1961
1962def get_tradefed_invocation_time(search_dir: Path) -> Tuple[int, int, int]:
1963  """Return a tuple of testing, preparation and teardown time."""
1964  test, prep, teardown = 0, 0, 0
1965  end_host_log_files = find_files(
1966      path=search_dir, file_name='end_host_log_*.txt', followlinks=True
1967  )
1968  for log in end_host_log_files:
1969    with open(log, 'r', encoding='utf-8') as cache:
1970      contents = cache.read().splitlines()
1971
1972    parse_test_time, parse_prep_time = False, False
1973    # ============================================
1974    # ================= Results ==================
1975    # =============== Consumed Time ==============
1976    #     x86_64 HelloWorldTests: 1s
1977    #     x86_64 hallo-welt: 866 ms
1978    # Total aggregated tests run time: 1s
1979    # ============== Modules Preparation Times ==============
1980    #     x86_64 HelloWorldTests => prep = 2483 ms || clean = 294 ms
1981    #     x86_64 hallo-welt => prep = 1845 ms || clean = 292 ms
1982    # Total preparation time: 4s  ||  Total tear down time: 586 ms
1983    # =======================================================
1984    # =============== Summary ===============
1985    # Total Run time: 6s
1986    # 2/2 modules completed
1987    # Total Tests       : 3
1988    # PASSED            : 3
1989    # FAILED            : 0
1990    # ============== End of Results ==============
1991    # ============================================
1992    for line in contents:
1993      if re.match(r'[=]+.*consumed.*time.*[=]+', line, re.I):
1994        parse_test_time, parse_prep_time = True, False
1995        continue
1996      if re.match(r'[=]+.*preparation.*time.*[=]+', line, re.I):
1997        parse_test_time, parse_prep_time = False, True
1998        continue
1999      # Close parsing when `Total` keyword starts at the beginning.
2000      if re.match(r'^(Total.*)', line, re.I):
2001        parse_test_time, parse_prep_time = False, False
2002        continue
2003      if parse_test_time:
2004        match = re.search(r'^[\s]+\w.*:\s+(?P<timestr>.*)$', line, re.I)
2005        if match:
2006          test += convert_timestr_to_ms(match.group('timestr'))
2007        continue
2008      if parse_prep_time:
2009        # SuiteResultReporter.java defines elapsed prep time only in ms.
2010        match = re.search(
2011            r'prep = (?P<prep>\d+ ms) \|\| clean = (?P<clean>\d+ ms)$',
2012            line,
2013            re.I,
2014        )
2015        if match:
2016          prep += convert_timestr_to_ms(match.group('prep'))
2017          teardown += convert_timestr_to_ms(match.group('clean'))
2018        continue
2019
2020  return test, prep, teardown
2021
2022
2023def convert_timestr_to_ms(time_string: str = None) -> int:
2024  """Convert time string to an integer in millisecond.
2025
2026  Possible time strings are:
2027      1h 21m 15s
2028      1m 5s
2029      25s
2030  If elapsed time is less than 1 sec, the time will be in millisecond.
2031      233 ms
2032  """
2033  if not time_string:
2034    return 0
2035
2036  hours, minutes, seconds = 0, 0, 0
2037  # Extract hour(<h>), minute(<m>), second(<s>), or millisecond(<ms>).
2038  match = re.match(
2039      r'(((?P<h>\d+)h\s+)?(?P<m>\d+)m\s+)?(?P<s>\d+)s|(?P<ms>\d+)\s*ms',
2040      time_string,
2041  )
2042  if match:
2043    hours = int(match.group('h')) if match.group('h') else 0
2044    minutes = int(match.group('m')) if match.group('m') else 0
2045    seconds = int(match.group('s')) if match.group('s') else 0
2046    milliseconds = int(match.group('ms')) if match.group('ms') else 0
2047
2048  return (
2049      hours * 3600 * 1000 + minutes * 60 * 1000 + seconds * 1000 + milliseconds
2050  )
2051
2052
2053# pylint: disable=broad-except
2054def prompt_suggestions(result_file: Path):
2055  """Generate suggestions when detecting keywords in logs."""
2056  result_file = Path(result_file)
2057  search_dir = Path(result_file).parent.joinpath('log')
2058  logs = sorted(find_files(str(search_dir), file_name='*'))
2059  for log in logs:
2060    for keyword, suggestion in SUGGESTIONS.items():
2061      try:
2062        with open(log, 'r', encoding='utf-8') as cache:
2063          content = cache.read()
2064          if keyword in content:
2065            colorful_print('[Suggestion] ' + suggestion, color=constants.RED)
2066            break
2067      # If the given is not a plain text, just ignore it.
2068      except Exception:
2069        pass
2070
2071
2072# pylint: disable=invalid-name
2073def get_rbe_and_customized_out_state() -> int:
2074  """Return decimal state of RBE and customized out.
2075
2076  Customizing out dir (OUT_DIR/OUT_DIR_COMMON_BASE) dramatically slows down
2077  the RBE performance; by collecting the combined state of the two states,
2078  we can profile the performance relationship between RBE and the build time.
2079
2080  Returns:
2081      An integer that describes the combined state.
2082  """
2083  #    RBE  | out_dir |  decimal
2084  # --------+---------+---------
2085  #     0   |    0    |    0
2086  #     0   |    1    |    1
2087  #     1   |    0    |    2
2088  #     1   |    1    |    3    --> Caution for poor performance.
2089  ON = '1'
2090  OFF = '0'
2091  # 1. ensure RBE is enabled during the build.
2092  actual_out_dir = get_build_out_dir()
2093  log_path = actual_out_dir.joinpath('soong.log')
2094  rbe_enabled = not bool(
2095      subprocess.call(f'grep -q USE_RBE=true {log_path}'.split())
2096  )
2097  rbe_state = ON if rbe_enabled else OFF
2098
2099  # 2. The customized out path will be different from the regular one.
2100  regular_out_dir = Path(os.getenv(constants.ANDROID_BUILD_TOP), 'out')
2101  customized_out = OFF if actual_out_dir == regular_out_dir else ON
2102
2103  return int(rbe_state + customized_out, 2)
2104
2105
2106def build_files_integrity_is_ok() -> bool:
2107  """Return Whether the integrity of build files is OK."""
2108  # 0. Missing timestamp file or plocate.db means a fresh repo sync.
2109  timestamp_file = get_index_path(constants.BUILDFILES_STP)
2110  locate_cache = get_index_path(constants.LOCATE_CACHE)
2111  if not timestamp_file.is_file():
2112    logging.debug('timestamp_file %s is missing', timestamp_file)
2113    return False
2114  if not locate_cache.is_file():
2115    logging.debug('locate_cache file %s is missing', locate_cache)
2116    return False
2117
2118  # 1. Ensure no build files were added/deleted.
2119  recorded_amount = len(load_json_safely(timestamp_file).keys())
2120  cmd_out = subprocess.getoutput(
2121      f'locate -e -d{locate_cache} --regex ' r'"/Android\.(bp|mk)$" | wc -l'
2122  )
2123  if int(cmd_out) != recorded_amount:
2124    logging.debug(
2125        'Some build files are added/deleted. Recorded number of files: %s,'
2126        ' actual: %s',
2127        recorded_amount,
2128        cmd_out,
2129    )
2130    return False
2131
2132  # 2. Ensure the consistency of all build files.
2133  for file, timestamp in load_json_safely(timestamp_file).items():
2134    if Path(file).exists() and Path(file).stat().st_mtime != timestamp:
2135      logging.debug(
2136          'A build file is changed: %s. Recorded timestamp: %s, actual'
2137          ' timestamp: %s',
2138          file,
2139          timestamp,
2140          Path(file).stat().st_mtime,
2141      )
2142      return False
2143  return True
2144
2145
2146def _build_env_profiling() -> BuildEnvProfiler:
2147  """Determine the status profile before build.
2148
2149  The BuildEnvProfiler object can help use determine whether a build is:
2150      1. clean build. (empty out/ dir)
2151      2. Build files Integrity (Android.bp/Android.mk changes).
2152      3. Environment variables consistency.
2153      4. New Ninja file generated. (mtime of soong/build.ninja)
2154
2155  Returns:
2156      the BuildProfile object.
2157  """
2158  out_dir = get_build_out_dir()
2159  ninja_file = out_dir.joinpath('soong/build.ninja')
2160  mtime = ninja_file.stat().st_mtime if ninja_file.is_file() else 0
2161  variables_file = out_dir.joinpath('soong/soong.environment.used.build')
2162
2163  return BuildEnvProfiler(
2164      ninja_file=ninja_file,
2165      ninja_file_mtime=mtime,
2166      variable_file=variables_file,
2167      variable_file_md5=md5sum(variables_file),
2168      clean_out=not ninja_file.exists(),
2169      build_files_integrity=build_files_integrity_is_ok(),
2170  )
2171
2172
2173def _send_build_condition_metrics(
2174    build_profile: BuildEnvProfiler, cmd: List[str]
2175):
2176  """Send build conditions by comparing build env profilers."""
2177
2178  # when build module-info.json only, 'module-info.json' will be
2179  # the last element.
2180  m_mod_info_only = 'module-info.json' in cmd.pop()
2181
2182  def ninja_file_is_changed(env_profiler: BuildEnvProfiler) -> bool:
2183    """Determine whether the ninja file had been renewal."""
2184    if not env_profiler.ninja_file.is_file():
2185      return True
2186    return (
2187        env_profiler.ninja_file.stat().st_mtime != env_profiler.ninja_file_mtime
2188    )
2189
2190  def env_var_is_changed(env_profiler: BuildEnvProfiler) -> bool:
2191    """Determine whether soong-related variables had changed."""
2192    return md5sum(env_profiler.variable_file) != env_profiler.variable_file_md5
2193
2194  def send_data(detect_type, value=1):
2195    """A simple wrapper of metrics.LocalDetectEvent."""
2196    metrics.LocalDetectEvent(detect_type=detect_type, result=value)
2197
2198  send_data(DetectType.RBE_STATE, get_rbe_and_customized_out_state())
2199
2200  # Determine the correct detect type before profiling.
2201  # (build module-info.json or build dependencies.)
2202  clean_out = (
2203      DetectType.MODULE_INFO_CLEAN_OUT
2204      if m_mod_info_only
2205      else DetectType.BUILD_CLEAN_OUT
2206  )
2207  ninja_generation = (
2208      DetectType.MODULE_INFO_GEN_NINJA
2209      if m_mod_info_only
2210      else DetectType.BUILD_GEN_NINJA
2211  )
2212  bpmk_change = (
2213      DetectType.MODULE_INFO_BPMK_CHANGE
2214      if m_mod_info_only
2215      else DetectType.BUILD_BPMK_CHANGE
2216  )
2217  env_change = (
2218      DetectType.MODULE_INFO_ENV_CHANGE
2219      if m_mod_info_only
2220      else DetectType.BUILD_ENV_CHANGE
2221  )
2222  src_change = (
2223      DetectType.MODULE_INFO_SRC_CHANGE
2224      if m_mod_info_only
2225      else DetectType.BUILD_SRC_CHANGE
2226  )
2227  other = (
2228      DetectType.MODULE_INFO_OTHER
2229      if m_mod_info_only
2230      else DetectType.BUILD_OTHER
2231  )
2232  incremental = (
2233      DetectType.MODULE_INFO_INCREMENTAL
2234      if m_mod_info_only
2235      else DetectType.BUILD_INCREMENTAL
2236  )
2237
2238  if build_profile.clean_out:
2239    send_data(clean_out)
2240  else:
2241    send_data(incremental)
2242
2243  if ninja_file_is_changed(build_profile):
2244    send_data(ninja_generation)
2245
2246  other_condition = True
2247  if not build_profile.build_files_integrity:
2248    send_data(bpmk_change)
2249    other_condition = False
2250  if env_var_is_changed(build_profile):
2251    send_data(env_change)
2252    other_condition = False
2253  if bool(get_modified_files(os.getcwd())):
2254    send_data(src_change)
2255    other_condition = False
2256  if other_condition:
2257    send_data(other)
2258