• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2016 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5# repohooks/pre-upload.py currently does not run pylint. But for developers who
6# want to check their code manually we disable several harmless pylint warnings
7# which just distract from more serious remaining issues.
8#
9# The instance variables _host and _install_paths are not defined in __init__().
10# pylint: disable=attribute-defined-outside-init
11#
12# Many short variable names don't follow the naming convention.
13# pylint: disable=invalid-name
14#
15# _parse_result() and _dir_size() don't access self and could be functions.
16# pylint: disable=no-self-use
17
18from collections import namedtuple
19import errno
20import glob
21import hashlib
22import logging
23import os
24import pipes
25import re
26import shutil
27import stat
28import subprocess
29import tempfile
30import time
31import urlparse
32
33from autotest_lib.client.bin import utils as client_utils
34from autotest_lib.client.common_lib import error
35from autotest_lib.server import test
36from autotest_lib.server import utils
37from autotest_lib.server.cros.tradefed import cts_expected_failure_parser
38from autotest_lib.server.cros.tradefed import tradefed_chromelogin as login
39from autotest_lib.server.cros.tradefed import tradefed_constants as constants
40from autotest_lib.server.cros.tradefed import tradefed_utils
41from autotest_lib.server.cros.tradefed import tradefed_prerequisite
42from autotest_lib.server.autotest import OFFLOAD_ENVVAR
43
44# TODO(kinaba): Move to tradefed_utils together with the setup/cleanup methods.
45MediaAsset = namedtuple('MediaAssetInfo', ['uri', 'localpath'])
46
47
48class TradefedTest(test.test):
49    """Base class to prepare DUT to run tests via tradefed."""
50    version = 1
51
52    # Default and upperbounds of max_retry, based on board and revision
53    # after branching (that is, 'y' of R74-12345.y.z).
54    #
55    # By default, 0<=y<1 does 5 retries and 1<=y does 10. The |max_retry|
56    # parameter in control files can override the count, within the
57    # _BRANCH_MAX_RETRY limit below.
58    _BRANCH_DEFAULT_RETRY = [(0, 5), (1, 10)]  # dev=5, beta=stable=10
59    _BRANCH_MAX_RETRY = [(0, 12), (1, 30),      # dev=12, beta=30, stable=99
60        (constants.APPROXIMATE_STABLE_BRANCH_NUMBER, 99)]
61    # TODO(kinaba): betty-arcnext
62    _BOARD_MAX_RETRY = {'betty': 0}
63
64    _SHARD_CMD = None
65    _board_arch = None
66    _board_name = None
67    _release_branch_number = None  # The 'y' of OS version Rxx-xxxxx.y.z
68    _android_version = None
69    _first_api_level = None
70    _num_media_bundles = 0
71    _abilist = []
72
73    # A job will be aborted after 16h. Subtract 30m for setup/teardown.
74    _MAX_LAB_JOB_LENGTH_IN_SEC = 16 * 60 * 60 - 30 * 60
75    _job_deadline = None
76
77    def _log_java_version(self):
78        """Log java version to debug failures due to version mismatch."""
79        utils.run(
80            'java',
81            args=('-version',),
82            ignore_status=False,
83            verbose=True,
84            stdout_tee=utils.TEE_TO_LOGS,
85            stderr_tee=utils.TEE_TO_LOGS)
86
87    def initialize(self,
88                   bundle=None,
89                   uri=None,
90                   host=None,
91                   hosts=None,
92                   max_retry=None,
93                   load_waivers=True,
94                   retry_manual_tests=False,
95                   warn_on_test_retry=True,
96                   hard_reboot_on_failure=False,
97                   use_jdk9=False):
98        """Sets up the tools and binary bundles for the test."""
99        if utils.is_in_container() and not client_utils.is_moblab():
100            self._job_deadline = time.time() + self._MAX_LAB_JOB_LENGTH_IN_SEC
101
102        self._install_paths = []
103        # TODO(pwang): Remove host if we enable multiple hosts everywhere.
104        self._hosts = [host] if host else hosts
105        for host in self._hosts:
106            logging.info('Hostname: %s', host.host_port)
107        self._verify_hosts()
108
109        self._max_retry = self._get_max_retry(max_retry)
110        self._warn_on_test_retry = warn_on_test_retry
111        # Tests in the lab run within individual lxc container instances.
112        if utils.is_in_container():
113            cache_root = constants.TRADEFED_CACHE_CONTAINER
114        else:
115            cache_root = constants.TRADEFED_CACHE_LOCAL
116
117        # The content of the cache survives across jobs.
118        self._safe_makedirs(cache_root)
119        self._tradefed_cache = os.path.join(cache_root, 'cache')
120        self._tradefed_cache_lock = os.path.join(cache_root, 'lock')
121        self._tradefed_cache_dirty = os.path.join(cache_root, 'dirty')
122        # The content of the install location does not survive across jobs and
123        # is isolated (by using a unique path)_against other autotest instances.
124        # This is not needed for the lab, but if somebody wants to run multiple
125        # TradedefTest instance.
126        self._tradefed_install = tempfile.mkdtemp(
127            prefix=constants.TRADEFED_PREFIX)
128        # Under lxc the cache is shared between multiple autotest/tradefed
129        # instances. We need to synchronize access to it. All binaries are
130        # installed through the (shared) cache into the local (unshared)
131        # lxc/autotest instance storage.
132        # If clearing the cache it must happen before all downloads.
133        self._clean_download_cache_if_needed()
134        # Set permissions (rwxr-xr-x) to the executable binaries.
135        permission = (
136            stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH
137            | stat.S_IXOTH)
138        self._install_files(constants.ADB_DIR, constants.ADB_FILES,
139                            permission)
140        self._install_files(constants.SDK_TOOLS_DIR,
141                            constants.SDK_TOOLS_FILES, permission)
142
143        # If use_jdk9 is set true, use jdk9 than default jdk8.
144        if use_jdk9:
145            if utils.is_in_container() and not client_utils.is_moblab():
146                logging.info('Lab: switching to JDK9')
147                try:
148                    os.environ['JAVA_HOME'] = '/usr/lib/jvm/jdk-9.0.4'
149                    os.environ['PATH'] = os.environ['JAVA_HOME']\
150                                      + '/bin:' + os.environ['PATH']
151                    logging.info(
152                            subprocess.check_output(['java', '-version'],
153                                                    stderr=subprocess.STDOUT))
154                except OSError:
155                    logging.error('Can\'t change current PATH directory')
156            else:
157                logging.info('Non-lab environment: should be using JDK9+')
158
159        # Install the tradefed bundle.
160        bundle_install_path = self._install_bundle(
161                self._get_latest_bundle_url(bundle) if uri == 'LATEST' else (
162                        uri or self._get_default_bundle_url(bundle)))
163        self._repository = os.path.join(bundle_install_path,
164                                        self._get_tradefed_base_dir())
165
166        # Load expected test failures to exclude them from re-runs.
167        self._waivers = set()
168        if load_waivers:
169            self._waivers.update(
170                    self._get_expected_failures('expectations', bundle))
171        if not retry_manual_tests:
172            self._waivers.update(
173                    self._get_expected_failures('manual_tests', bundle))
174
175        # Load modules with no tests.
176        self._notest_modules = self._get_expected_failures('notest_modules',
177                bundle)
178        self._hard_reboot_on_failure = hard_reboot_on_failure
179
180    def postprocess(self):
181        """Postprocess: synchronous offloads and performance data"""
182        self._output_perf()
183        self._prepare_synchronous_offloads()
184
185    def _output_perf(self):
186        """Output performance values."""
187        base = self._default_tradefed_base_dir()
188        path = tradefed_utils.get_test_result_xml_path(base)
189        if path:
190            for metric in tradefed_utils.get_perf_metrics_from_test_result_xml(
191                path, self.resultsdir):
192                self.output_perf_value(**metric)
193
194    def _prepare_synchronous_offloads(self):
195        """
196        Copy files needed for APFE to synchronous offload dir,  with some
197        structure to make the post-job postprocessing simpler.
198        """
199        testname = os.path.basename(self.outputdir)
200        # This is yyyy.mm.dd_hh.mm.ss  (start time)
201        timestamp_pattern = ("[0-9][0-9][0-9][0-9].[0-9][0-9].[0-9][0-9]" +
202                             "_[0-9][0-9].[0-9][0-9].[0-9][0-9]")
203        time_glob = os.path.join(
204            self._default_tradefed_base_dir(), timestamp_pattern
205        )
206        for dirpath in glob.glob(time_glob):
207            timestamp = os.path.basename(dirpath)
208            locs = [os.path.join(dirpath, f) for f in ["test_result.xml",
209                                                       "testResult.xml"]]
210            for f in locs:
211                if os.path.exists(f):
212                    subdirs = self._subdirs(f, testname, timestamp)
213                    self._copy_to_offload_dir(f, subdirs)
214        for z in glob.glob(time_glob+".zip"):
215            self._copy_to_offload_dir(z, self._subdirs(z, testname))
216
217    def _copy_to_offload_dir(self, src_path, subdirs, recursive=True):
218        target = os.path.join(os.getenv(OFFLOAD_ENVVAR), *subdirs)
219        self._safe_makedirs(target)
220        if not recursive or os.path.isfile(src_path):
221            return shutil.copy2(src_path, str(target))
222        return shutil.copytree(src_path, str(target))
223
224    def _subdirs(self, path, testname, timestamp=""):
225        # CTS results from bvt-arc suites need to be sent to the
226        # specially-designated bucket for early EDI entries in APFE,
227        # but only there.
228        dest = "BVT" if 'bvt-arc' in path else "CTS"
229        return ["APFE", dest, testname, timestamp]
230
231    def cleanup(self):
232        """Cleans up any dirtied state."""
233        self._kill_adb_server()
234
235        if hasattr(self, '_tradefed_install'):
236            logging.info('Cleaning up %s.', self._tradefed_install)
237            try:
238                shutil.rmtree(self._tradefed_install)
239            except IOError:
240                pass
241
242    def _kill_adb_server(self):
243        # Kill any lingering adb servers.
244        try:
245            self._run_adb_cmd(verbose=True, args=('kill-server',),
246                timeout=constants.ADB_KILL_SERVER_TIMEOUT_SECONDS)
247        except error.CmdTimeoutError as e:
248            logging.warn(e)
249            # `adb kill-server` sometimes hangs up. Kill it more brutally.
250            try:
251                client_utils.system(
252                    'killall adb',
253                    ignore_status=True,
254                    timeout=constants.ADB_KILL_SERVER_TIMEOUT_SECONDS)
255            except error.CmdTimeoutError as e:
256                # The timeout is ignored, since the only known failure pattern
257                # b/142828365 is due to a zombie process that does not prevent
258                # starting a new server with a new adb key.
259                logging.warn(e)
260        except (error.CmdError, AttributeError):
261            pass
262
263    def _verify_hosts(self):
264        """Verify all hosts' ChromeOS consistency."""
265        # Check release builder path. E.g. cave-release/R66-10435.0.0
266        release_builder_path = set(host.get_release_builder_path()
267                                   for host in self._hosts)
268        if len(release_builder_path) > 1:
269            raise error.TestFail('Hosts\' CHROMEOS_RELEASE_BUILDER_PATH is '
270                                 'different: %s', release_builder_path)
271
272        # Check ChromeOS ARC VERSION. E.g.
273        arc_version = set(host.get_arc_version() for host in self._hosts)
274        if len(arc_version) > 1:
275            raise error.TestFail('Hosts\' CHROMEOS_ARC_VERSION is different: '
276                                 '%s', arc_version)
277
278        # Check ChromeOS model for unibuild.
279        # TODO(pwang): Adding a check if we found how to detect host's model.
280
281    def _verify_arc_hosts(self):
282        """Verify all hosts' Android configuration consistency.
283
284        This method should only be called after all hosts' Android has been
285        successfully booted up."""
286        # Check all hosts have same Android fingerprint.
287        fingerprint = set(self._run_adb_cmd(
288            host,
289            args=('shell', 'getprop', 'ro.build.fingerprint')).stdout
290            for host in self._hosts)
291        if len(fingerprint) > 1:
292            raise error.TestFail('Hosts\' supported fingerprint is different: '
293                                 '%s', fingerprint)
294
295    def _calculate_test_count_factor(self, bundle):
296        """ Calculate the multiplicative factor for the test case number.
297
298        The value equals to the times each test case is run, which is determined
299        by the intersection of the supported ABIs of the CTS/GTS bundle and that
300        of the tested device."""
301        arm_abis = set(('armeabi-v7a', 'arm64-v8a'))
302        x86_abis = set(('x86', 'x86_64'))
303        if bundle == 'arm':
304            tradefed_abis = arm_abis
305        elif bundle == 'x86':
306            tradefed_abis = x86_abis
307        else:
308            tradefed_abis = arm_abis | x86_abis
309        self._test_count_factor = len(set(self._get_abilist()) & tradefed_abis)
310        # Avoid setting timeout=0 (None) in any cases.
311        self._timeout_factor = max(1, self._test_count_factor)
312
313    def _get_adb_targets(self):
314        """Get a list of adb targets."""
315        return [self._get_adb_target(host) for host in self._hosts]
316
317    def _get_adb_target(self, host):
318        """Get the adb target format.
319
320        This method is slightly different from host.host_port as we need to
321        explicitly specify the port so the serial name of adb target would
322        match."""
323        return '{}:{}'.format(host.hostname, host.port)
324
325    def _run_adb_cmd(self, host=None, **kwargs):
326        """Running adb command.
327
328        @param host: DUT that want to connect to. (None if the adb command is
329                     intended to run in the server. eg. keygen)
330        """
331        # As of N, tradefed could not specify which adb socket to use, which use
332        # tcp:localhost:5037 by default.
333        adb_global_option = ('-H', 'localhost', '-P', '5037')
334        if host:
335            host_port = self._get_adb_target(host)
336            adb_global_option = ('-s', host_port)
337        kwargs['args'] = adb_global_option + kwargs.get('args', ())
338        result = self._run('adb', **kwargs)
339        logging.info('adb %s:\n%s', ' '.join(kwargs.get('args')),
340                     result.stdout + result.stderr)
341        return result
342
343    def _try_adb_connect(self, host):
344        """Attempts to connect to adb on the DUT.
345
346        @param host: DUT that need to be connected.
347        @return boolean indicating if adb connected successfully.
348        """
349        # Add ADB_TRACE=all for debugging adb connection failures.
350        env = os.environ.copy()
351        env['ADB_TRACE'] = 'all'
352        try:
353            # This may fail return failure due to a race condition in adb
354            # connect (b/29370989). If adb is already connected, this command
355            # will immediately return success.
356            host_port = self._get_adb_target(host)
357            result = self._run_adb_cmd(
358                host, args=('connect', host_port), verbose=True, env=env,
359                ignore_status=True,
360                timeout=constants.ADB_CONNECT_TIMEOUT_SECONDS)
361            if result.exit_status != 0:
362                return False
363
364            result = self._run_adb_cmd(host, args=('devices',), env=env,
365                timeout=constants.ADB_CONNECT_TIMEOUT_SECONDS)
366            if not re.search(r'{}\s+(device|unauthorized)'.format(
367                    re.escape(host_port)), result.stdout):
368                logging.info('No result found in with pattern: %s',
369                             r'{}\s+(device|unauthorized)'.format(
370                                 re.escape(host_port)))
371                return False
372
373            # Actually test the connection with an adb command as there can be
374            # a race between detecting the connected device and actually being
375            # able to run a commmand with authenticated adb.
376            result = self._run_adb_cmd(
377                host, args=('shell', 'exit'), env=env, ignore_status=True,
378                timeout=constants.ADB_CONNECT_TIMEOUT_SECONDS)
379            return result.exit_status == 0
380        except error.CmdTimeoutError as e:
381            logging.warning(e)
382            return False
383
384    def _android_shell(self, host, command):
385        """Run a command remotely on the device in an android shell
386
387        This function is strictly for internal use only, as commands do not run
388        in a fully consistent Android environment. Prefer adb shell instead.
389        """
390        host.run('android-sh -c ' + pipes.quote(command))
391
392    def _connect_adb(self, host):
393        """Sets up ADB connection to the ARC container.
394
395        @param host: DUT that should be connected to.
396        """
397        logging.info('Setting up adb connection.')
398
399        # adbd may take some time to come up. Repeatedly try to connect to adb.
400        utils.poll_for_condition(
401            lambda: self._try_adb_connect(host),
402            timeout=constants.ADB_READY_TIMEOUT_SECONDS,
403            sleep_interval=constants.ADB_POLLING_INTERVAL_SECONDS)
404
405        logging.info('Successfully setup adb connection.')
406
407    def _wait_for_arc_boot(self, host):
408        """Wait until ARC is fully booted.
409
410        Tests for the presence of the intent helper app to determine whether ARC
411        has finished booting.
412        @param host: DUT that need to be connected to.
413        """
414
415        def _intent_helper_running():
416            result = self._run_adb_cmd(
417                host,
418                args=('shell', 'pgrep', '-f', 'org.chromium.arc.intent_helper'),
419                ignore_status=True)
420            return bool(result.stdout)
421
422        utils.poll_for_condition(
423            _intent_helper_running,
424            exception=error.TestFail(
425                'Error: Timed out waiting for intent helper.'),
426            timeout=constants.ARC_READY_TIMEOUT_SECONDS,
427            sleep_interval=constants.ARC_POLLING_INTERVAL_SECONDS)
428
429    def _disable_adb_install_dialog(self, host):
430        """Disables a dialog shown on adb install execution.
431
432        By default, on adb install execution, "Allow Google to regularly check
433        device activity ... " dialog is shown. It requires manual user action
434        so that tests are blocked at the point.
435        This method disables it.
436        """
437        logging.info('Disabling the adb install dialog.')
438        result = self._run_adb_cmd(
439            host,
440            verbose=True,
441            args=('shell', 'settings', 'put', 'global',
442                  'verifier_verify_adb_installs', '0'))
443        logging.info('Disable adb dialog: %s', result.stdout)
444
445        # Android "RescueParty" feature can reset the above settings when the
446        # device crashes often. Disable the rescue during testing.
447        # Keeping only for P and below since R has SELinux restrictions.
448        if self._get_android_version() < 29:
449            self._android_shell(host, 'setprop persist.sys.disable_rescue true')
450
451    def _ready_arc(self):
452        """Ready ARC and adb in parallel for running tests via tradefed."""
453        key_path = os.path.join(self.tmpdir, 'test_key')
454        with open(key_path, 'w') as f:
455            f.write(constants.PRIVATE_KEY)
456        os.environ['ADB_VENDOR_KEYS'] = key_path
457
458        for _ in range(2):
459            try:
460                # Kill existing adb server to ensure that the env var is picked
461                # up, and reset any previous bad state.
462                self._kill_adb_server()
463
464                # TODO(pwang): connect_adb takes 10+ seconds on a single DUT.
465                #              Parallelize it if it becomes a bottleneck.
466                for host in self._hosts:
467                    self._connect_adb(host)
468                    self._disable_adb_install_dialog(host)
469                    self._wait_for_arc_boot(host)
470                self._verify_arc_hosts()
471                return
472            except (utils.TimeoutError, error.CmdTimeoutError):
473                logging.error('Failed to set up adb connection. Retrying...')
474        raise error.TestFail('Error: Failed to set up adb connection')
475
476    def _safe_makedirs(self, path):
477        """Creates a directory at |path| and its ancestors.
478
479        Unlike os.makedirs(), ignore errors even if directories exist.
480        """
481        try:
482            os.makedirs(path)
483        except OSError as e:
484            if not (e.errno == errno.EEXIST and os.path.isdir(path)):
485                raise
486
487    def _unzip(self, filename):
488        """Unzip the file.
489
490        The destination directory name will be the stem of filename.
491        E.g., _unzip('foo/bar/baz.zip') will create directory at
492        'foo/bar/baz', and then will inflate zip's content under the directory.
493        If here is already a directory at the stem, that directory will be used.
494
495        @param filename: Path to the zip archive.
496        @return Path to the inflated directory.
497        """
498        destination = os.path.splitext(filename)[0]
499        if os.path.isdir(destination):
500            logging.info('Skipping unzip %s, reusing content of %s', filename,
501                         destination)
502            return destination
503        tmp = tempfile.mkdtemp(dir=os.path.dirname(filename))
504        logging.info('Begin unzip %s', filename)
505        try:
506            utils.run('unzip', args=('-d', tmp, filename))
507        except:
508            logging.error('Failed unzip, cleaning up.')
509            # Clean up just created files.
510            shutil.rmtree(tmp, ignore_errors=True)
511            raise
512        logging.info('End unzip %s', filename)
513        try:
514            os.renames(tmp, destination)
515        except:
516            logging.error('Failed rename, cleaning up.')
517            shutil.rmtree(destination, ignore_errors=True)
518            shutil.rmtree(tmp, ignore_errors=True)
519            raise
520        return destination
521
522    def _dir_size(self, directory):
523        """Compute recursive size in bytes of directory."""
524        size = 0
525        for root, _, files in os.walk(directory):
526            for name in files:
527                try:
528                    size += os.path.getsize(os.path.join(root, name))
529                except OSError:
530                    logging.error('Inaccessible path (crbug/793696): %s/%s',
531                                  root, name)
532        return size
533
534    def _invalidate_download_cache(self):
535        """Marks the download cache for deferred deletion.
536
537        Used to make cache file operations atomic across failures and reboots.
538        The caller is responsible to hold the lock to the cache.
539        """
540        if not os.path.exists(self._tradefed_cache_dirty):
541            os.mkdir(self._tradefed_cache_dirty)
542
543    def _validate_download_cache(self):
544        """Validates and unmarks the download cache from deletion.
545
546        Used to make cache file operations atomic across failures and reboots.
547        The caller is responsible to hold the lock to the cache.
548        """
549        shutil.rmtree(self._tradefed_cache_dirty, ignore_errors=True)
550
551    def _clean_download_cache_if_needed(self, force=False):
552        """Invalidates cache to prevent it from growing too large."""
553        # If the cache is large enough to hold a working set, we can simply
554        # delete everything without thrashing.
555        # TODO(ihf): Investigate strategies like LRU.
556        clean = force
557        with tradefed_utils.lock(self._tradefed_cache_lock):
558            size = self._dir_size(self._tradefed_cache)
559            if size > constants.TRADEFED_CACHE_MAX_SIZE:
560                logging.info(
561                    'Current cache size=%d got too large. Clearing %s.', size,
562                    self._tradefed_cache)
563                clean = True
564            else:
565                logging.info('Current cache size=%d of %s.', size,
566                             self._tradefed_cache)
567            if os.path.exists(self._tradefed_cache_dirty):
568                logging.info('Found dirty cache.')
569                clean = True
570            if clean:
571                logging.warning('Cleaning download cache.')
572                shutil.rmtree(self._tradefed_cache, ignore_errors=True)
573                self._safe_makedirs(self._tradefed_cache)
574                shutil.rmtree(self._tradefed_cache_dirty, ignore_errors=True)
575
576    def _download_to_cache(self, uri):
577        """Downloads the uri from the storage server.
578
579        It always checks the cache for available binaries first and skips
580        download if binaries are already in cache.
581
582        The caller of this function is responsible for holding the cache lock.
583
584        @param uri: The Google Storage, dl.google.com or local uri.
585        @return Path to the downloaded object, name.
586        """
587        # We are hashing the uri instead of the binary. This is acceptable, as
588        # the uris are supposed to contain version information and an object is
589        # not supposed to be changed once created.
590        output_dir = os.path.join(self._tradefed_cache,
591                                  hashlib.md5(uri).hexdigest())
592        # Check for existence of cache entry. We check for directory existence
593        # instead of file existence, so that _install_bundle can delete original
594        # zip files to save disk space.
595        if os.path.exists(output_dir):
596            # TODO(crbug.com/800657): Mitigation for the invalid state. Normally
597            # this should not happen, but when a lock is force borken due to
598            # high IO load, multiple processes may enter the critical section
599            # and leave a bad state permanently.
600            if os.listdir(output_dir):
601                logging.info('Skipping download of %s, reusing content of %s.',
602                             uri, output_dir)
603                return os.path.join(output_dir,
604                    os.path.basename(urlparse.urlparse(uri).path))
605            logging.error('Empty cache entry detected %s', output_dir)
606        return self._download_to_dir(uri, output_dir)
607
608    def _download_to_dir(self, uri, output_dir):
609        """Downloads the gs|http|https|file uri from the storage server.
610
611        @param uri: The Google Storage, dl.google.com or local uri.
612        @output_dir: The directory where the downloaded file should be placed.
613        @return Path to the downloaded object, name.
614        """
615        # Split uri into 3 pieces for use by gsutil and also by wget.
616        parsed = urlparse.urlparse(uri)
617        filename = os.path.basename(parsed.path)
618        output = os.path.join(output_dir, filename)
619
620        self._safe_makedirs(output_dir)
621        if parsed.scheme not in ['gs', 'http', 'https', 'file']:
622            raise error.TestFail(
623                'Error: Unknown download scheme %s' % parsed.scheme)
624        if parsed.scheme in ['http', 'https']:
625            logging.info('Using wget to download %s to %s.', uri, output_dir)
626            # We are downloading 1 file at a time, hence using -O over -P.
627            utils.run(
628                'wget',
629                args=('--report-speed=bits', '-O', output, uri),
630                verbose=True)
631            return output
632
633        if parsed.scheme in ['file']:
634            logging.info('Copy the local file from %s to %s.', parsed.path,
635                         output_dir)
636            utils.run(
637                'cp',
638                args=('-f', parsed.path, output),
639                verbose=True)
640            return output
641
642        # If the machine can access to the storage server directly,
643        # defer to "gsutil" for downloading.
644        logging.info('Downloading %s directly to %s.', uri, output)
645        # b/17445576: gsutil rsync of individual files is not implemented.
646        res = utils.run('gsutil',
647                        args=('cp', uri, output),
648                        verbose=True,
649                        ignore_status=True)
650        if not res or res.exit_status != 0:
651            logging.warning('Retrying download...')
652            utils.run('gsutil', args=('cp', uri, output), verbose=True)
653        return output
654
655    def _instance_copyfile(self, cache_path):
656        """Makes a copy of a file from the (shared) cache to a wholy owned
657        local instance. Also copies one level of cache directoy (MD5 named).
658        """
659        filename = os.path.basename(cache_path)
660        dirname = os.path.basename(os.path.dirname(cache_path))
661        instance_dir = os.path.join(self._tradefed_install, dirname)
662        # Make sure destination directory is named the same.
663        self._safe_makedirs(instance_dir)
664        instance_path = os.path.join(instance_dir, filename)
665        shutil.copyfile(cache_path, instance_path)
666        return instance_path
667
668    def _instance_copytree(self, cache_path):
669        """Makes a copy of a directory from the (shared and writable) cache to
670        a wholy owned local instance.
671
672        TODO(ihf): Consider using cp -al to only copy links. Not sure if this
673        is really a benefit across the container boundary, but it is risky due
674        to the possibility of corrupting the original files by an lxc instance.
675        """
676        # We keep the top 2 names from the cache_path = .../dir1/dir2.
677        dir2 = os.path.basename(cache_path)
678        dir1 = os.path.basename(os.path.dirname(cache_path))
679        instance_path = os.path.join(self._tradefed_install, dir1, dir2)
680        logging.info('Copying %s to instance %s', cache_path, instance_path)
681        shutil.copytree(cache_path, instance_path)
682        return instance_path
683
684    def _install_bundle(self, gs_uri):
685        """Downloads a zip file, installs it and returns the local path.
686
687        @param gs_uri: GS bucket that contains the necessary files.
688        """
689        if not gs_uri.endswith('.zip'):
690            raise error.TestFail('Error: Not a .zip file %s.', gs_uri)
691        # Atomic write through of file.
692        with tradefed_utils.lock(self._tradefed_cache_lock):
693            # Atomic operations.
694            self._invalidate_download_cache()
695            # Download is lazy (cache_path may not actually exist if
696            # cache_unzipped does).
697            cache_path = self._download_to_cache(gs_uri)
698            # Unzip is lazy as well (but cache_unzipped guaranteed to
699            # exist).
700            cache_unzipped = self._unzip(cache_path)
701            # To save space we delete the original zip file. This works as
702            # _download only checks existence of the cache directory for
703            # lazily skipping download, and unzip itself will bail if the
704            # unzipped destination exists. Hence we don't need the original
705            # anymore.
706            if os.path.exists(cache_path):
707                logging.info('Deleting original %s', cache_path)
708                os.remove(cache_path)
709            # Erase dirty marker from disk.
710            self._validate_download_cache()
711            # We always copy files to give tradefed a clean copy of the
712            # bundle.
713            unzipped_local = self._instance_copytree(cache_unzipped)
714        return unzipped_local
715
716    def _install_files(self, gs_dir, files, permission):
717        """Installs binary tools."""
718        for filename in files:
719            gs_uri = os.path.join(gs_dir, filename)
720            # Atomic write through of file.
721            with tradefed_utils.lock(self._tradefed_cache_lock):
722                # We don't want to leave a corrupt cache for other jobs.
723                self._invalidate_download_cache()
724                cache_path = self._download_to_cache(gs_uri)
725                # Mark cache as clean again.
726                self._validate_download_cache()
727                # This only affects the current job, so not part of cache
728                # validation.
729                local = self._instance_copyfile(cache_path)
730            os.chmod(local, permission)
731            # Keep track of PATH.
732            self._install_paths.append(os.path.dirname(local))
733
734    def _prepare_media(self, media_asset):
735        """Downloads and offers the cached media files to tradefed."""
736        if media_asset.uri:
737            media = self._install_bundle(media_asset.uri)
738            if os.path.islink(media_asset.localpath):
739                os.unlink(media_asset.localpath)
740            if os.path.isdir(media_asset.localpath):
741                shutil.rmtree(media_asset.localpath)
742            self._safe_makedirs(os.path.dirname(media_asset.localpath))
743            os.symlink(media, media_asset.localpath)
744
745            logging.info('Offered %s as a media directory in %s',
746                    media, media_asset.localpath)
747
748        # Records the number of existing media bundles, to check later.
749        if os.path.isdir(media_asset.localpath):
750            self._num_media_bundles = len(
751                    os.listdir(media_asset.localpath))
752
753    def _cleanup_media(self, media_asset):
754        """Clean up the local copy of cached media files."""
755        self._fail_on_unexpected_media_download(media_asset)
756        if os.path.islink(media_asset.localpath):
757            path = os.readlink(media_asset.localpath)
758            os.unlink(media_asset.localpath)
759            if os.path.isdir(path):
760                logging.info('Cleaning up media files in %s', path)
761                shutil.rmtree(path)
762
763    def _fail_on_unexpected_media_download(self, media_asset):
764        if os.path.isdir(media_asset.localpath):
765            contents = os.listdir(media_asset.localpath)
766            if len(contents) > self._num_media_bundles:
767                raise error.TestFail(
768                    'Failed: Unexpected media bundle was added %s' % contents)
769
770    def _run(self, *args, **kwargs):
771        """Executes the given command line.
772
773        To support SDK tools, such as adb or aapt, this adds _install_paths
774        to the extra_paths. Before invoking this, ensure _install_files() has
775        been called.
776        """
777        kwargs['extra_paths'] = (
778            kwargs.get('extra_paths', []) + self._install_paths)
779        return utils.run(*args, **kwargs)
780
781    def _collect_tradefed_global_log(self, result, destination):
782        """Collects the tradefed global log.
783
784        @param result: The result object from utils.run.
785        @param destination: Autotest result directory (destination of logs).
786        """
787        match = re.search(r'Saved log to /tmp/(tradefed_global_log_.*\.txt)',
788                          result.stdout)
789        if not match:
790            logging.debug(result.stdout)
791            logging.error('no tradefed_global_log file is found')
792            return
793
794        name = match.group(1)
795        dest = os.path.join(destination, 'logs', 'tmp')
796        self._safe_makedirs(dest)
797        shutil.copy(os.path.join('/tmp', name), os.path.join(dest, name))
798
799    def _get_expected_failures(self, directory, bundle_abi):
800        """Return a list of expected failures or no test module.
801
802        @param directory: A directory with expected no tests or failures files.
803        @param bundle_abi: 'arm' or 'x86' if the test is for the particular ABI.
804                           None otherwise (like GTS, built for multi-ABI.)
805        @return: A list of expected failures or no test modules for the current
806                 testing device.
807        """
808        # Load waivers and manual tests so TF doesn't re-run them.
809        expected_fail_files = []
810        test_board = self._get_board_name()
811        test_arch = self._get_board_arch()
812        sdk_ver = self._get_android_version()
813        first_api_level = self._get_first_api_level()
814        expected_fail_dir = os.path.join(self.bindir, directory)
815        if os.path.exists(expected_fail_dir):
816            expected_fail_files += glob.glob(expected_fail_dir + '/*.yaml')
817
818        waivers = cts_expected_failure_parser.ParseKnownCTSFailures(
819            expected_fail_files)
820        return waivers.find_waivers(test_arch, test_board, bundle_abi, sdk_ver,
821                                    first_api_level)
822
823    def _get_abilist(self):
824        """Return the abilist supported by calling adb command.
825
826        This method should only be called after the android environment is
827        successfully initialized."""
828        if not self._abilist:
829            for _ in range(3):
830                abilist_str = self._run_adb_cmd(
831                    self._hosts[0],
832                    args=('shell', 'getprop',
833                          'ro.product.cpu.abilist')).stdout.strip()
834                if abilist_str:
835                    self._abilist = abilist_str.split(',')
836                    break
837                else:
838                    # TODO(kinaba): Sometimes getprop returns an empty string.
839                    # Investigate why. For now we mitigate the bug by retries.
840                    logging.error('Empty abilist.')
841        return self._abilist
842
843    def _get_release_branch_number(self):
844        """Returns the DUT branch number (z of Rxx-yyyyy.z.w) or 0 on error."""
845        if not self._release_branch_number:
846            ver = (self._hosts[0].get_release_version() or '').split('.')
847            self._release_branch_number = (int(ver[1]) if len(ver) >= 3 else 0)
848        return self._release_branch_number
849
850    def _get_board_arch(self):
851        """Return target DUT arch name."""
852        if not self._board_arch:
853            self._board_arch = ('arm' if self._hosts[0].get_cpu_arch() == 'arm'
854                else 'x86')
855        return self._board_arch
856
857    def _get_board_name(self):
858        """Return target DUT board name."""
859        if not self._board_name:
860            self._board_name = self._hosts[0].get_board().split(':')[1]
861        return self._board_name
862
863    def _get_android_version(self):
864        """Return target DUT Android SDK version"""
865        # TODO(kinaba): factor this out to server/hosts/cros_host.py
866        if not self._android_version:
867            self._android_version = self._hosts[0].run(
868                'grep ANDROID_SDK /etc/lsb-release',
869                ignore_status=True).stdout.rstrip().split('=')[1]
870        return int(self._android_version)
871
872    def _get_first_api_level(self):
873        """Return target DUT Android first API level."""
874        if not self._first_api_level:
875            self._first_api_level = self._hosts[0].get_arc_first_api_level()
876        return int(self._first_api_level)
877
878    def _get_max_retry(self, max_retry):
879        """Return the maximum number of retries.
880
881        @param max_retry: max_retry specified in the control file.
882        @return: number of retries for this specific host.
883        """
884        if max_retry is None:
885            max_retry = self._get_branch_retry(self._BRANCH_DEFAULT_RETRY)
886        candidate = [max_retry]
887        candidate.append(self._get_board_retry())
888        candidate.append(self._get_branch_retry(self._BRANCH_MAX_RETRY))
889        return min(x for x in candidate if x is not None)
890
891    def _get_board_retry(self):
892        """Return the maximum number of retries for DUT board name.
893
894        @return: number of max_retry or None.
895        """
896        board = self._get_board_name()
897        if board in self._BOARD_MAX_RETRY:
898            return self._BOARD_MAX_RETRY[board]
899        logging.info('No board retry specified for board: %s', board)
900        return None
901
902    def _get_branch_retry(self, table):
903        """Returns the retry count for DUT branch number defined in |table|."""
904        number = self._get_release_branch_number()
905        for lowerbound, retry in reversed(table):
906            if lowerbound <= number:
907                return retry
908        logging.warning('Could not establish channel. Using retry=0.')
909        return 0
910
911    def _run_commands(self, commands, **kwargs):
912        """Run commands on all the hosts."""
913        for host in self._hosts:
914            for command in commands:
915                logging.info('RUN: %s\n', command)
916                output = host.run(command, **kwargs)
917                logging.info('END: %s\n', command)
918                logging.debug(output)
919
920    def _override_powerd_prefs(self):
921        """Overrides powerd prefs to prevent screen from turning off, complying
922        with CTS requirements.
923
924        This is a remote version of PowerPrefChanger which ensures overrided
925        policies won't persist across reboots by bind-mounting onto the config
926        directory.
927        """
928        pref_dir = constants.POWERD_PREF_DIR
929        temp_dir = constants.POWERD_TEMP_DIR
930        commands = (
931                'cp -r %s %s' % (pref_dir, temp_dir),
932                'echo 1 > %s/ignore_external_policy' % temp_dir,
933                'echo 0 | tee %s/{,un}plugged_{dim,off,suspend}_ms' % temp_dir,
934                'mount --bind %s %s' % (temp_dir, pref_dir),
935                'restart powerd',
936        )
937        try:
938            self._run_commands(commands)
939        except (error.AutoservRunError, error.AutoservSSHTimeout):
940            logging.warning('Failed to override powerd policy, tests depending '
941                            'on screen being always on may fail.')
942
943    def _restore_powerd_prefs(self):
944        """Restores powerd prefs overrided by _override_powerd_prefs()."""
945        pref_dir = constants.POWERD_PREF_DIR
946        temp_dir = constants.POWERD_TEMP_DIR
947        commands = (
948                'umount %s' % pref_dir,
949                'restart powerd',
950                'rm -rf %s' % temp_dir,
951        )
952        try:
953            self._run_commands(commands)
954        except (error.AutoservRunError, error.AutoservSSHTimeout):
955            logging.warning('Failed to restore powerd policy, overrided policy '
956                            'will persist until device reboot.')
957
958    def _clean_crash_logs(self):
959        try:
960            self._run_commands(['rm -f /home/chronos/crash/*'])
961        except (error.AutoservRunError, error.AutoservSSHTimeout):
962            logging.warning('Failed to clean up crash logs.')
963
964    def _run_and_parse_tradefed(self, command):
965        """Kick off the tradefed command.
966
967        @param command: Lists of command tokens.
968        @raise TestFail: when a test failure is detected.
969        @return: tuple of (tests, pass, fail, notexecuted) counts.
970        """
971        target_argument = []
972        for host in self._hosts:
973            target_argument += ['-s', self._get_adb_target(host)]
974        shard_argument = []
975        if len(self._hosts) > 1:
976            if self._SHARD_CMD:
977                shard_argument = [self._SHARD_CMD, str(len(self._hosts))]
978            else:
979                logging.warning('cts-tradefed shard command isn\'t defined, '
980                                'falling back to use single device.')
981        command = command + target_argument + shard_argument
982
983        try:
984            output = self._run_tradefed(command)
985        except Exception as e:
986            self._log_java_version()
987            if not isinstance(e, error.CmdTimeoutError):
988                # In case this happened due to file corruptions, try to
989                # force to recreate the cache.
990                logging.error('Failed to run tradefed! Cleaning up now.')
991                self._clean_download_cache_if_needed(force=True)
992            raise
993
994        result_destination = self._default_tradefed_base_dir()
995        # Gather the global log first. Datetime parsing below can abort the test
996        # if tradefed startup had failed. Even then the global log is useful.
997        self._collect_tradefed_global_log(output, result_destination)
998        # Result parsing must come after all other essential operations as test
999        # warnings, errors and failures can be raised here.
1000        base = self._default_tradefed_base_dir()
1001        path = tradefed_utils.get_test_result_xml_path(base)
1002        return tradefed_utils.parse_tradefed_testresults_xml(
1003            test_result_xml_path=path,
1004            waivers=self._waivers)
1005
1006    def _setup_result_directories(self):
1007        """Sets up the results and logs directories for tradefed.
1008
1009        Tradefed saves the logs and results at:
1010          self._repository/results/$datetime/
1011          self._repository/results/$datetime.zip
1012          self._repository/logs/$datetime/
1013        Because other tools rely on the currently chosen Google storage paths
1014        we need to keep destination_results in:
1015          self.resultsdir/android-cts/results/$datetime/
1016          self.resultsdir/android-cts/results/$datetime.zip
1017          self.resultsdir/android-cts/results/logs/$datetime/
1018        To bridge between them, create symlinks from the former to the latter.
1019        """
1020        logging.info('Setting up tradefed results and logs directories.')
1021
1022        results_destination = self._default_tradefed_base_dir()
1023        logs_destination = os.path.join(results_destination, 'logs')
1024        directory_mapping = [
1025            (os.path.join(self._repository, 'results'), results_destination),
1026            (os.path.join(self._repository, 'logs'), logs_destination),
1027        ]
1028
1029        for (tradefed_path, final_path) in directory_mapping:
1030            if os.path.exists(tradefed_path):
1031                shutil.rmtree(tradefed_path)
1032            self._safe_makedirs(final_path)
1033            os.symlink(final_path, tradefed_path)
1034
1035    def _default_tradefed_base_dir(self):
1036        return os.path.join(self.resultsdir, self._get_tradefed_base_dir())
1037
1038    def _install_plan(self, subplan):
1039        """Copy test subplan to CTS-TF.
1040
1041        @param subplan: CTS subplan to be copied into TF.
1042        """
1043        logging.info('Install subplan: %s', subplan)
1044        subplans_tf_dir = os.path.join(self._repository, 'subplans')
1045        if not os.path.exists(subplans_tf_dir):
1046            os.makedirs(subplans_tf_dir)
1047        test_subplan_file = os.path.join(self.bindir, 'subplans',
1048                                         '%s.xml' % subplan)
1049        try:
1050            shutil.copy(test_subplan_file, subplans_tf_dir)
1051        except (shutil.Error, OSError, IOError) as e:
1052            raise error.TestFail(
1053                'Error: failed to copy test subplan %s to CTS bundle. %s' %
1054                (test_subplan_file, e))
1055
1056    def _should_skip_test(self, _bundle):
1057        """Some tests are expected to fail and are skipped.
1058
1059        Subclasses should override with specific details.
1060        """
1061        return False
1062
1063    def _should_reboot(self, steps):
1064        """Oracle to decide if DUT should reboot or just restart Chrome.
1065
1066        For now we will not reboot after the first two iterations, but on all
1067        iterations afterward as before. In particular this means that most CTS
1068        tests will now not get a "clean" machine, but one on which tests ran
1069        before. But we will still reboot after persistent failures, hopefully
1070        not causing too many flakes down the line.
1071        """
1072        if steps < 3:
1073            return False
1074        return True
1075
1076    def _copy_extra_artifacts_dut(self, extra_artifacts, host, output_dir):
1077        """ Upload the custom artifacts """
1078        self._safe_makedirs(output_dir)
1079
1080        for artifact in extra_artifacts:
1081            logging.info('Copying extra artifacts from "%s" to "%s".',
1082                         artifact, output_dir)
1083            try:
1084                self._run_adb_cmd(host, verbose=True, timeout=120,
1085                                  args=('pull', artifact, output_dir))
1086            except:
1087                # Maybe ADB connection failed, or the artifacts don't exist.
1088                logging.exception('Copying extra artifacts failed.')
1089
1090    def _copy_extra_artifacts_host(self, extra_artifacts, host, output_dir):
1091        """ Upload the custom artifacts """
1092        self._safe_makedirs(output_dir)
1093
1094        for artifact in extra_artifacts:
1095            logging.info('Copying extra artifacts from "%s" to "%s".',
1096                         artifact, output_dir)
1097            for extracted_path in glob.glob(artifact):
1098                logging.info('... %s', extracted_path)
1099                # Move it not to collect it again in future retries.
1100                shutil.move(extracted_path, output_dir)
1101
1102    def _run_tradefed_list_results(self):
1103        """Run the `tradefed list results` command.
1104
1105        @return: tuple of the last (session_id, pass, fail, all_done?).
1106        """
1107
1108        # Fix b/143580192: We set the timeout to 3 min. It never takes more than
1109        # 10s on light disk load.
1110        output = self._run_tradefed_with_timeout(['list', 'results'], 180)
1111
1112        # Parses the last session from the output that looks like:
1113        #
1114        # Session  Pass  Fail  Modules Complete ...
1115        # 0        90    10    1 of 2
1116        # 1        199   1     2 of 2
1117        # ...
1118        lastmatch = None
1119        for m in re.finditer(r'^(\d+)\s+(\d+)\s+(\d+)\s+(\d+) of (\d+)',
1120                             output.stdout, re.MULTILINE):
1121            session, passed, failed, done, total = map(int,
1122                                                       m.group(1, 2, 3, 4, 5))
1123            lastmatch = (session, passed, failed, done == total)
1124        return lastmatch
1125
1126    def _tradefed_retry_command(self, template, session_id):
1127        raise NotImplementedError('Subclass should override this function')
1128
1129    def _tradefed_run_command(self, template):
1130        raise NotImplementedError('Subclass should override this function')
1131
1132    def _tradefed_cmd_path(self):
1133        raise NotImplementedError('Subclass should override this function')
1134
1135    def _tradefed_env(self):
1136        return None
1137
1138    def _run_tradefed_with_timeout(self, command, timeout):
1139        tradefed = self._tradefed_cmd_path()
1140        with tradefed_utils.adb_keepalive(self._get_adb_targets(),
1141                                          self._install_paths):
1142            logging.info('RUN(timeout=%d): %s', timeout,
1143                         ' '.join([tradefed] + command))
1144            output = self._run(
1145                tradefed,
1146                args=tuple(command),
1147                env=self._tradefed_env(),
1148                timeout=timeout,
1149                verbose=True,
1150                ignore_status=False,
1151                # Make sure to tee tradefed stdout/stderr to autotest logs
1152                # continuously during the test run.
1153                stdout_tee=utils.TEE_TO_LOGS,
1154                stderr_tee=utils.TEE_TO_LOGS)
1155            logging.info('END: %s\n', ' '.join([tradefed] + command))
1156        return output
1157
1158    def _run_tradefed(self, command):
1159        timeout = self._timeout * self._timeout_factor
1160        if self._job_deadline is not None:
1161            clipped = int(min(timeout, self._job_deadline - time.time()))
1162            # Even the shortest tradefed run takes 1.5 minutes. Took 2x'ed
1163            # value as a threshold that a meaningful test can run.
1164            if clipped < 3 * 60:
1165                raise error.TestError(
1166                        'Hitting job time limit: only %s seconds left' %
1167                        clipped)
1168            timeout = clipped
1169        return self._run_tradefed_with_timeout(command, timeout)
1170
1171    def _run_tradefed_with_retries(self,
1172                                   test_name,
1173                                   run_template,
1174                                   retry_template,
1175                                   timeout,
1176                                   media_asset=None,
1177                                   enable_default_apps=False,
1178                                   target_module=None,
1179                                   target_plan=None,
1180                                   executable_test_count=None,
1181                                   bundle=None,
1182                                   extra_artifacts=[],
1183                                   extra_artifacts_host=[],
1184                                   cts_uri=None,
1185                                   login_precondition_commands=[],
1186                                   precondition_commands=[],
1187                                   prerequisites=[]):
1188        """Run CTS/GTS with retry logic.
1189
1190        We first kick off the specified module. Then rerun just the failures
1191        on the next MAX_RETRY iterations.
1192        """
1193        for prereq in prerequisites:
1194            result = tradefed_prerequisite.check(prereq, self._hosts)
1195            if not result[0]:
1196                raise error.TestError(result[1])
1197
1198        # On dev and beta channels timeouts are sharp, lenient on stable.
1199        self._timeout = timeout
1200        if (self._get_release_branch_number() >=
1201                constants.APPROXIMATE_STABLE_BRANCH_NUMBER):
1202            self._timeout += 3600
1203
1204        if self._should_skip_test(bundle):
1205            logging.warning('Skipped test %s', ' '.join(test_name))
1206            return
1207
1208        steps = -1  # For historic reasons the first iteration is not counted.
1209        self.summary = ''
1210        accurate = []
1211        board = self._get_board_name()
1212        session_id = None
1213        toggle_ndk = board == 'rammus-arc-r' # Toggle to ndk translation for this board
1214        nativebridge64_experiment = (self._get_release_branch_number() == 0)
1215
1216        self._setup_result_directories()
1217        if media_asset:
1218            self._prepare_media(media_asset)
1219
1220        # This loop retries failures. For this reason please do not raise
1221        # TestFail in this loop if you suspect the failure might be fixed
1222        # in the next loop iteration.
1223        while steps < self._max_retry:
1224            steps += 1
1225            keep_media = media_asset and media_asset.uri and steps >= 1
1226            self._run_commands(login_precondition_commands, ignore_status=True)
1227            # TODO(kinaba): Make it a general config (per-model choice
1228            # of tablet,clamshell,default) if the code below works.
1229            if utils.is_in_container() and not client_utils.is_moblab():
1230                # Force all hatch devices run the test in laptop mode,
1231                # regardless of their physical placement.
1232                if board == 'hatch' or board == 'hatch-arc-r':
1233                    self._run_commands(
1234                        ['inject_powerd_input_event --code=tablet --value=0'],
1235                        ignore_status=True)
1236            with login.login_chrome(
1237                    hosts=self._hosts,
1238                    board=board,
1239                    dont_override_profile=keep_media,
1240                    enable_default_apps=enable_default_apps,
1241                    toggle_ndk=toggle_ndk,
1242                    nativebridge64=nativebridge64_experiment) as current_logins:
1243                if self._should_reboot(steps):
1244                    # TODO(rohitbm): Evaluate if power cycle really helps with
1245                    # Bluetooth test failures, and then make the implementation
1246                    # more strict by first running complete restart and reboot
1247                    # retries and then perform power cycle.
1248                    #
1249                    # Currently, (steps + 1 == self._max_retry) means that
1250                    # hard_reboot is attempted after "this" cycle failed. Then,
1251                    # the last remaining 1 step will be run on the rebooted DUT.
1252                    hard_reboot = (self._hard_reboot_on_failure
1253                        and steps + 1 == self._max_retry)
1254                    for current_login in current_logins:
1255                        current_login.need_reboot(hard_reboot=hard_reboot)
1256                self._ready_arc()
1257                self._calculate_test_count_factor(bundle)
1258                self._run_commands(precondition_commands, ignore_status=True)
1259
1260                # Run tradefed.
1261                if session_id == None:
1262                    if target_plan is not None:
1263                        self._install_plan(target_plan)
1264
1265                    logging.info('Running %s:', test_name)
1266                    command = self._tradefed_run_command(run_template)
1267                else:
1268                    logging.info('Retrying failures of %s with session_id %d:',
1269                                 test_name, session_id)
1270                    command = self._tradefed_retry_command(retry_template,
1271                                                           session_id)
1272
1273                if media_asset and media_asset.uri:
1274                    # Clean-up crash logs from previous sessions to ensure
1275                    # enough disk space for 16GB storage devices: b/156075084.
1276                    if not keep_media:
1277                        self._clean_crash_logs()
1278                # TODO(b/137917339): Only prevent screen from turning off for
1279                # media tests. Remove this check once the GPU issue is fixed.
1280                keep_screen_on = (media_asset and media_asset.uri) or (
1281                        target_module and "Media" in target_module)
1282                if keep_screen_on:
1283                    self._override_powerd_prefs()
1284                try:
1285                    waived_tests, acc = self._run_and_parse_tradefed(command)
1286                finally:
1287                    if keep_screen_on:
1288                        self._restore_powerd_prefs()
1289                if media_asset:
1290                    self._fail_on_unexpected_media_download(media_asset)
1291                result = self._run_tradefed_list_results()
1292                if not result:
1293                    logging.error('Did not find any test results. Retry.')
1294                    for current_login in current_logins:
1295                        current_login.need_reboot()
1296                    continue
1297
1298                last_waived = len(waived_tests)
1299                last_session_id, last_passed, last_failed, last_all_done =\
1300                    result
1301
1302                if last_failed > last_waived or not utils.is_in_container():
1303                    for host in self._hosts:
1304                        dir_name = "%s-step%02d" % (host.hostname, steps)
1305                        output_dir = os.path.join(
1306                            self.resultsdir, 'extra_artifacts', dir_name)
1307                        self._copy_extra_artifacts_dut(
1308                            extra_artifacts, host, output_dir)
1309                        self._copy_extra_artifacts_host(
1310                            extra_artifacts_host, host, output_dir)
1311
1312                if last_passed + last_failed > 0:
1313                    # At least one test had run, which means the media push step
1314                    # of tradefed didn't fail. To free up the storage earlier,
1315                    # delete the copy on the server side. See crbug.com/970881
1316                    if media_asset:
1317                        self._cleanup_media(media_asset)
1318
1319                # If the result is |acc|urate according to the log, or the
1320                # inaccuracy is recognized by tradefed (not all_done), then
1321                # it is fine.
1322                accurate.append(acc or not last_all_done)
1323                if last_failed < last_waived:
1324                    logging.error(
1325                        'Error: Internal waiver bookkeeping has become '
1326                        'inconsistent (f=%d, w=%d)', last_failed, last_waived)
1327
1328                msg = 'run' if session_id == None else ' retry'
1329                msg += '(p=%s, f=%s, w=%s)' % (last_passed, last_failed,
1330                                               last_waived)
1331                self.summary += msg
1332                logging.info('RESULT: %s %s', msg, result)
1333
1334                # Overwrite last_all_done if the executed test count is equal
1335                # to the known test count of the job.
1336                if (not last_all_done and executable_test_count != None and
1337                    (last_passed + last_failed in executable_test_count)):
1338                    logging.warning('Overwriting all_done as True, since the '
1339                                    'explicitly set executable_test_count '
1340                                    'tests have run.')
1341                    last_all_done = True
1342
1343                # Check for no-test modules. We use the "all_done" indicator
1344                # provided by list_results to decide if there are outstanding
1345                # modules to iterate over (similar to missing tests just on a
1346                # per-module basis).
1347                notest = (last_passed + last_failed == 0 and last_all_done)
1348                if target_module in self._notest_modules:
1349                    if notest:
1350                        logging.info('Package has no tests as expected.')
1351                        return
1352                    else:
1353                        # We expected no tests, but the new bundle drop must
1354                        # have added some for us. Alert us to the situation.
1355                        raise error.TestFail(
1356                            'Failed: Remove module %s from '
1357                            'notest_modules directory!' % target_module)
1358                elif notest:
1359                    logging.error('Did not find any tests in module. Hoping '
1360                                  'this is transient. Retry after reboot.')
1361                    for current_login in current_logins:
1362                        current_login.need_reboot()
1363                    continue
1364
1365                # After the no-test check, commit the pass/fail count.
1366                waived = last_waived
1367                session_id, passed, failed, all_done =\
1368                    last_session_id, last_passed, last_failed, last_all_done
1369
1370                # Check if all the tests passed.
1371                if failed <= waived and all_done:
1372                    break
1373
1374                # TODO(b/127908450) Tradefed loses track of not-executed tests
1375                # when the commandline pattern included '*', and retry run for
1376                # them wrongly declares all tests passed. This is misleading.
1377                # Rather, we give up the retry and report the result as FAIL.
1378                if not all_done and '*' in ''.join(run_template):
1379                    break
1380
1381        if session_id == None:
1382            raise error.TestFail('Error: Could not find any tests in module.')
1383
1384        if failed <= waived and all_done:
1385            if not all(accurate):
1386                raise error.TestFail(
1387                    'Failed: Not all tests were executed. After %d '
1388                    'retries passing %d tests, waived=%d. %s' % (
1389                        steps, passed, waived, self.summary))
1390            # TODO(ihf): Make this error.TestPass('...') once
1391            # available.
1392            if steps > 0 and self._warn_on_test_retry:
1393                raise error.TestWarn(
1394                    'Passed: after %d retries passing %d tests, '
1395                    'waived=%d. %s' % (steps, passed, waived,
1396                                       self.summary))
1397            return
1398
1399        raise error.TestFail(
1400            'Failed: after %d retries giving up. '
1401            'passed=%d, failed=%d, waived=%d%s%s. %s' %
1402            (steps, passed, failed, waived, '' if all_done else ', notexec>=1',
1403             '' if all(accurate) else ', Tests may not be accurate.',
1404             self.summary))
1405