• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2016 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5# repohooks/pre-upload.py currently does not run pylint. But for developers who
6# want to check their code manually we disable several harmless pylint warnings
7# which just distract from more serious remaining issues.
8#
9# The instance variables _host and _install_paths are not defined in __init__().
10# pylint: disable=attribute-defined-outside-init
11#
12# Many short variable names don't follow the naming convention.
13# pylint: disable=invalid-name
14#
15# _parse_result() and _dir_size() don't access self and could be functions.
16# pylint: disable=no-self-use
17
18from collections import namedtuple
19import errno
20import glob
21import hashlib
22import logging
23import os
24import pipes
25import re
26import shutil
27import stat
28import tempfile
29import urlparse
30
31from autotest_lib.client.bin import utils as client_utils
32from autotest_lib.client.common_lib import error
33from autotest_lib.client.common_lib.cros import dev_server
34from autotest_lib.server import test
35from autotest_lib.server import utils
36from autotest_lib.server.cros.tradefed import cts_expected_failure_parser
37from autotest_lib.server.cros.tradefed import tradefed_chromelogin as login
38from autotest_lib.server.cros.tradefed import tradefed_constants as constants
39from autotest_lib.server.cros.tradefed import tradefed_utils
40from autotest_lib.server.cros.tradefed import tradefed_prerequisite
41
42# TODO(kinaba): Move to tradefed_utils together with the setup/cleanup methods.
43MediaAsset = namedtuple('MediaAssetInfo', ['uri', 'localpath'])
44
45
46class TradefedTest(test.test):
47    """Base class to prepare DUT to run tests via tradefed."""
48    version = 1
49
50    # Default and upperbounds of max_retry, based on board and revision
51    # after branching (that is, 'y' of R74-12345.y.z).
52    #
53    # By default, 0<=y<1 does 5 retries and 1<=y does 10. The |max_retry|
54    # parameter in control files can override the count, within the
55    # _BRANCH_MAX_RETRY limit below.
56    _BRANCH_DEFAULT_RETRY = [(0, 5), (1, 10)]  # dev=5, beta=stable=10
57    _BRANCH_MAX_RETRY = [(0, 5), (1, 30),      # dev=5, beta=30, stable=99
58        (constants.APPROXIMATE_STABLE_BRANCH_NUMBER, 99)]
59    # TODO(kinaba): betty-arcnext
60    _BOARD_MAX_RETRY = {'betty': 0}
61
62    _SHARD_CMD = None
63    _board_arch = None
64    _board_name = None
65    _release_branch_number = None  # The 'y' of OS version Rxx-xxxxx.y.z
66    _android_version = None
67    _num_media_bundles = 0
68    _abilist = []
69
70    def _log_java_version(self):
71        """Quick sanity and spew of java version installed on the server."""
72        utils.run(
73            'java',
74            args=('-version',),
75            ignore_status=False,
76            verbose=True,
77            stdout_tee=utils.TEE_TO_LOGS,
78            stderr_tee=utils.TEE_TO_LOGS)
79
80    def initialize(self,
81                   bundle=None,
82                   uri=None,
83                   host=None,
84                   hosts=None,
85                   max_retry=None,
86                   load_waivers=True,
87                   retry_manual_tests=False,
88                   warn_on_test_retry=True,
89                   hard_reboot_on_failure=False):
90        """Sets up the tools and binary bundles for the test."""
91        self._install_paths = []
92        # TODO(pwang): Remove host if we enable multiple hosts everywhere.
93        self._hosts = [host] if host else hosts
94        for host in self._hosts:
95            logging.info('Hostname: %s', host.host_port)
96        self._verify_hosts()
97
98        self._max_retry = self._get_max_retry(max_retry)
99        self._warn_on_test_retry = warn_on_test_retry
100        # Tests in the lab run within individual lxc container instances.
101        if utils.is_in_container():
102            cache_root = constants.TRADEFED_CACHE_CONTAINER
103        else:
104            cache_root = constants.TRADEFED_CACHE_LOCAL
105
106        # TODO(ihf): reevaluate this again when we run out of memory. We could
107        # for example use 32 bit java on the first run but not during retries.
108        # b/62895114. If select_32bit_java gets deleted for good also remove it
109        # from the base image.
110        # Try to save server memory (crbug.com/717413).
111        # select_32bit_java()
112
113        # The content of the cache survives across jobs.
114        self._safe_makedirs(cache_root)
115        self._tradefed_cache = os.path.join(cache_root, 'cache')
116        self._tradefed_cache_lock = os.path.join(cache_root, 'lock')
117        self._tradefed_cache_dirty = os.path.join(cache_root, 'dirty')
118        # The content of the install location does not survive across jobs and
119        # is isolated (by using a unique path)_against other autotest instances.
120        # This is not needed for the lab, but if somebody wants to run multiple
121        # TradedefTest instance.
122        self._tradefed_install = tempfile.mkdtemp(
123            prefix=constants.TRADEFED_PREFIX)
124        # Under lxc the cache is shared between multiple autotest/tradefed
125        # instances. We need to synchronize access to it. All binaries are
126        # installed through the (shared) cache into the local (unshared)
127        # lxc/autotest instance storage.
128        # If clearing the cache it must happen before all downloads.
129        self._clean_download_cache_if_needed()
130        # Set permissions (rwxr-xr-x) to the executable binaries.
131        permission = (
132            stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH
133            | stat.S_IXOTH)
134        self._install_files(constants.ADB_DIR, constants.ADB_FILES,
135                            permission)
136        self._install_files(constants.SDK_TOOLS_DIR,
137                            constants.SDK_TOOLS_FILES, permission)
138
139        # Install the tradefed bundle.
140        bundle_install_path = self._install_bundle(
141            uri or self._get_default_bundle_url(bundle))
142        self._repository = os.path.join(bundle_install_path,
143                                        self._get_tradefed_base_dir())
144
145        # Load expected test failures to exclude them from re-runs.
146        self._waivers = set()
147        if load_waivers:
148            self._waivers.update(
149                    self._get_expected_failures('expectations', bundle))
150        if not retry_manual_tests:
151            self._waivers.update(
152                    self._get_expected_failures('manual_tests', bundle))
153
154        # Load modules with no tests.
155        self._notest_modules = self._get_expected_failures('notest_modules',
156                bundle)
157        self._hard_reboot_on_failure = hard_reboot_on_failure
158
159    def postprocess(self):
160        """Postprocess: output performance values."""
161        path = tradefed_utils.get_test_result_xml_path(
162            os.path.join(self.resultsdir,
163                         self._get_tradefed_base_dir()))
164        if path:
165            for metric in tradefed_utils.get_perf_metrics_from_test_result_xml(
166                path, self.resultsdir):
167                self.output_perf_value(**metric)
168
169    def cleanup(self):
170        """Cleans up any dirtied state."""
171        # Kill any lingering adb servers.
172        for host in self._hosts:
173            try:
174                self._run_adb_cmd(host, verbose=True, args=('kill-server',))
175            except (error.CmdError, AttributeError):
176                pass
177
178        if hasattr(self, '_tradefed_install'):
179            logging.info('Cleaning up %s.', self._tradefed_install)
180            try:
181                shutil.rmtree(self._tradefed_install)
182            except IOError:
183                pass
184
185    def _verify_hosts(self):
186        """Verify all hosts' ChromeOS consistency."""
187        # Check release builder path. E.g. cave-release/R66-10435.0.0
188        release_builder_path = set(host.get_release_builder_path()
189                                   for host in self._hosts)
190        if len(release_builder_path) > 1:
191            raise error.TestFail('Hosts\' CHROMEOS_RELEASE_BUILDER_PATH is '
192                                 'different: %s', release_builder_path)
193
194        # Check ChromeOS ARC VERSION. E.g.
195        arc_version = set(host.get_arc_version() for host in self._hosts)
196        if len(arc_version) > 1:
197            raise error.TestFail('Hosts\' CHROMEOS_ARC_VERSION is different: '
198                                 '%s', arc_version)
199
200        # Check ChromeOS model for unibuild.
201        # TODO(pwang): Adding a check if we found how to detect host's model.
202
203    def _verify_arc_hosts(self):
204        """Verify all hosts' Android configuration consistency.
205
206        This method should only be called after all hosts' Android has been
207        successfully booted up."""
208        # Check all hosts have same Android fingerprint.
209        fingerprint = set(self._run_adb_cmd(
210            host,
211            args=('shell', 'getprop', 'ro.build.fingerprint')).stdout
212            for host in self._hosts)
213        if len(fingerprint) > 1:
214            raise error.TestFail('Hosts\' supported fingerprint is different: '
215                                 '%s', fingerprint)
216
217    def _calculate_test_count_factor(self, bundle):
218        """ Calculate the multiplicative factor for the test case number.
219
220        The value equals to the times each test case is run, which is determined
221        by the intersection of the supported ABIs of the CTS/GTS bundle and that
222        of the tested device."""
223        arm_abis = set(('armeabi-v7a', 'arm64-v8a'))
224        x86_abis = set(('x86', 'x86_64'))
225        if bundle == 'arm':
226            tradefed_abis = arm_abis
227        elif bundle == 'x86':
228            tradefed_abis = x86_abis
229        else:
230            tradefed_abis = arm_abis | x86_abis
231        self._test_count_factor = len(set(self._get_abilist()) & tradefed_abis)
232        # Avoid setting timeout=0 (None) in any cases.
233        self._timeout_factor = max(1, self._test_count_factor)
234
235    def _get_adb_targets(self):
236        """Get a list of adb targets."""
237        return [self._get_adb_target(host) for host in self._hosts]
238
239    def _get_adb_target(self, host):
240        """Get the adb target format.
241
242        This method is slightly different from host.host_port as we need to
243        explicitly specify the port so the serial name of adb target would
244        match."""
245        return '{}:{}'.format(host.hostname, host.port)
246
247    def _run_adb_cmd(self, host=None, **kwargs):
248        """Running adb command.
249
250        @param host: DUT that want to connect to. (None if the adb command is
251                     intended to run in the server. eg. keygen)
252        """
253        # As of N, tradefed could not specify which adb socket to use, which use
254        # tcp:localhost:5037 by default.
255        adb_global_option = ('-H', 'localhost', '-P', '5037')
256        if host:
257            host_port = self._get_adb_target(host)
258            adb_global_option = ('-s', host_port)
259        kwargs['args'] = adb_global_option + kwargs.get('args', ())
260        result = self._run('adb', **kwargs)
261        logging.info('adb %s:\n%s', ' '.join(kwargs.get('args')),
262                     result.stdout + result.stderr)
263        return result
264
265    def _try_adb_connect(self, host):
266        """Attempts to connect to adb on the DUT.
267
268        @param host: DUT that need to be connected.
269        @return boolean indicating if adb connected successfully.
270        """
271        # Add ADB_TRACE=all for debugging adb connection failures.
272        env = os.environ.copy()
273        env['ADB_TRACE'] = 'all'
274        try:
275            # This may fail return failure due to a race condition in adb
276            # connect (b/29370989). If adb is already connected, this command
277            # will immediately return success.
278            host_port = self._get_adb_target(host)
279            result = self._run_adb_cmd(
280                host, args=('connect', host_port), verbose=True, env=env,
281                ignore_status=True,
282                timeout=constants.ADB_CONNECT_TIMEOUT_SECONDS)
283            if result.exit_status != 0:
284                return False
285
286            result = self._run_adb_cmd(host, args=('devices',), env=env,
287                timeout=constants.ADB_CONNECT_TIMEOUT_SECONDS)
288            if not re.search(r'{}\s+(device|unauthorized)'.format(
289                    re.escape(host_port)), result.stdout):
290                logging.info('No result found in with pattern: %s',
291                             r'{}\s+(device|unauthorized)'.format(
292                                 re.escape(host_port)))
293                return False
294
295            # Actually test the connection with an adb command as there can be
296            # a race between detecting the connected device and actually being
297            # able to run a commmand with authenticated adb.
298            result = self._run_adb_cmd(
299                host, args=('shell', 'exit'), env=env, ignore_status=True,
300                timeout=constants.ADB_CONNECT_TIMEOUT_SECONDS)
301            return result.exit_status == 0
302        except error.CmdTimeoutError as e:
303            logging.warning(e)
304            return False
305
306    def _android_shell(self, host, command):
307        """Run a command remotely on the device in an android shell
308
309        This function is strictly for internal use only, as commands do not run
310        in a fully consistent Android environment. Prefer adb shell instead.
311        """
312        host.run('android-sh -c ' + pipes.quote(command))
313
314    def _write_android_file(self, host, filename, data):
315        """Writes a file to a location relative to the android container.
316
317        This is an internal function used to bootstrap adb.
318        Tests should use adb push to write files.
319        """
320        android_cmd = 'echo %s > %s' % (pipes.quote(data),
321                                        pipes.quote(filename))
322        self._android_shell(host, android_cmd)
323
324    def _connect_adb(self, host, pubkey):
325        """Sets up ADB connection to the ARC container.
326
327        @param host: DUT that should be connected to.
328        @param pubkey: public key that adb keygen && adb pubkey generated.
329        """
330        logging.info('Setting up adb connection.')
331        # Push keys for adb.
332        self._write_android_file(host, constants.ANDROID_ADB_KEYS_PATH, pubkey)
333        self._android_shell(
334            host, 'restorecon ' + pipes.quote(constants.ANDROID_ADB_KEYS_PATH))
335
336        # This starts adbd.
337        self._android_shell(host, 'setprop sys.usb.config adb')
338
339        # Also let it be automatically started upon reboot.
340        self._android_shell(host, 'setprop persist.sys.usb.config adb')
341
342        # adbd may take some time to come up. Repeatedly try to connect to adb.
343        utils.poll_for_condition(
344            lambda: self._try_adb_connect(host),
345            timeout=constants.ADB_READY_TIMEOUT_SECONDS,
346            sleep_interval=constants.ADB_POLLING_INTERVAL_SECONDS)
347
348        logging.info('Successfully setup adb connection.')
349
350    def _wait_for_arc_boot(self, host):
351        """Wait until ARC is fully booted.
352
353        Tests for the presence of the intent helper app to determine whether ARC
354        has finished booting.
355        @param host: DUT that need to be connected to.
356        """
357
358        def _intent_helper_running():
359            result = self._run_adb_cmd(
360                host,
361                args=('shell', 'pgrep', '-f', 'org.chromium.arc.intent_helper'),
362                ignore_status=True)
363            return bool(result.stdout)
364
365        utils.poll_for_condition(
366            _intent_helper_running,
367            exception=error.TestFail(
368                'Error: Timed out waiting for intent helper.'),
369            timeout=constants.ARC_READY_TIMEOUT_SECONDS,
370            sleep_interval=constants.ARC_POLLING_INTERVAL_SECONDS)
371
372    def _disable_adb_install_dialog(self, host):
373        """Disables a dialog shown on adb install execution.
374
375        By default, on adb install execution, "Allow Google to regularly check
376        device activity ... " dialog is shown. It requires manual user action
377        so that tests are blocked at the point.
378        This method disables it.
379        """
380        logging.info('Disabling the adb install dialog.')
381        result = self._run_adb_cmd(
382            host,
383            verbose=True,
384            args=('shell', 'settings', 'put', 'global',
385                  'verifier_verify_adb_installs', '0'))
386        logging.info('Disable adb dialog: %s', result.stdout)
387
388        # Android "RescueParty" feature can reset the above settings when the
389        # device crashes often. Disable the rescue during testing.
390        self._android_shell(host, 'setprop persist.sys.disable_rescue true')
391
392    def _ready_arc(self):
393        """Ready ARC and adb in parallel for running tests via tradefed."""
394        # Generate the adb keys on server.
395        key_path = os.path.join(self.tmpdir, 'test_key')
396        self._run_adb_cmd(verbose=True, args=('keygen', pipes.quote(key_path)))
397        os.environ['ADB_VENDOR_KEYS'] = key_path
398        pubkey = self._run_adb_cmd(verbose=True,
399                args=('pubkey', pipes.quote(key_path))).stdout
400
401        for _ in range(2):
402            try:
403                # Kill existing adb server to ensure that the env var is picked
404                # up, and reset any previous bad state.
405                self._run_adb_cmd(verbose=True, args=('kill-server',))
406
407                # TODO(pwang): connect_adb takes 10+ seconds on a single DUT.
408                #              Parallelize it if it becomes a bottleneck.
409                for host in self._hosts:
410                    self._connect_adb(host, pubkey)
411                    self._disable_adb_install_dialog(host)
412                    self._wait_for_arc_boot(host)
413                self._verify_arc_hosts()
414                return
415            except (utils.TimeoutError, error.CmdTimeoutError):
416                logging.error('Failed to set up adb connection. Retrying...')
417        raise error.TestFail('Error: Failed to set up adb connection')
418
419    def _safe_makedirs(self, path):
420        """Creates a directory at |path| and its ancestors.
421
422        Unlike os.makedirs(), ignore errors even if directories exist.
423        """
424        try:
425            os.makedirs(path)
426        except OSError as e:
427            if not (e.errno == errno.EEXIST and os.path.isdir(path)):
428                raise
429
430    def _unzip(self, filename):
431        """Unzip the file.
432
433        The destination directory name will be the stem of filename.
434        E.g., _unzip('foo/bar/baz.zip') will create directory at
435        'foo/bar/baz', and then will inflate zip's content under the directory.
436        If here is already a directory at the stem, that directory will be used.
437
438        @param filename: Path to the zip archive.
439        @return Path to the inflated directory.
440        """
441        destination = os.path.splitext(filename)[0]
442        if os.path.isdir(destination):
443            logging.info('Skipping unzip %s, reusing content of %s', filename,
444                         destination)
445            return destination
446        tmp = tempfile.mkdtemp(dir=os.path.dirname(filename))
447        logging.info('Begin unzip %s', filename)
448        try:
449            utils.run('unzip', args=('-d', tmp, filename))
450        except:
451            logging.error('Failed unzip, cleaning up.')
452            # Clean up just created files.
453            shutil.rmtree(tmp, ignore_errors=True)
454            raise
455        logging.info('End unzip %s', filename)
456        try:
457            os.renames(tmp, destination)
458        except:
459            logging.error('Failed rename, cleaning up.')
460            shutil.rmtree(destination, ignore_errors=True)
461            shutil.rmtree(tmp, ignore_errors=True)
462            raise
463        return destination
464
465    def _dir_size(self, directory):
466        """Compute recursive size in bytes of directory."""
467        size = 0
468        for root, _, files in os.walk(directory):
469            for name in files:
470                try:
471                    size += os.path.getsize(os.path.join(root, name))
472                except OSError:
473                    logging.error('Inaccessible path (crbug/793696): %s/%s',
474                                  root, name)
475        return size
476
477    def _invalidate_download_cache(self):
478        """Marks the download cache for deferred deletion.
479
480        Used to make cache file operations atomic across failures and reboots.
481        The caller is responsible to hold the lock to the cache.
482        """
483        if not os.path.exists(self._tradefed_cache_dirty):
484            os.mkdir(self._tradefed_cache_dirty)
485
486    def _validate_download_cache(self):
487        """Validates and unmarks the download cache from deletion.
488
489        Used to make cache file operations atomic across failures and reboots.
490        The caller is responsible to hold the lock to the cache.
491        """
492        shutil.rmtree(self._tradefed_cache_dirty, ignore_errors=True)
493
494    def _clean_download_cache_if_needed(self, force=False):
495        """Invalidates cache to prevent it from growing too large."""
496        # If the cache is large enough to hold a working set, we can simply
497        # delete everything without thrashing.
498        # TODO(ihf): Investigate strategies like LRU.
499        clean = force
500        with tradefed_utils.lock(self._tradefed_cache_lock):
501            size = self._dir_size(self._tradefed_cache)
502            if size > constants.TRADEFED_CACHE_MAX_SIZE:
503                logging.info(
504                    'Current cache size=%d got too large. Clearing %s.', size,
505                    self._tradefed_cache)
506                clean = True
507            else:
508                logging.info('Current cache size=%d of %s.', size,
509                             self._tradefed_cache)
510            if os.path.exists(self._tradefed_cache_dirty):
511                logging.info('Found dirty cache.')
512                clean = True
513            if clean:
514                logging.warning('Cleaning download cache.')
515                shutil.rmtree(self._tradefed_cache, ignore_errors=True)
516                self._safe_makedirs(self._tradefed_cache)
517                shutil.rmtree(self._tradefed_cache_dirty, ignore_errors=True)
518
519    def _download_to_cache(self, uri):
520        """Downloads the uri from the storage server.
521
522        It always checks the cache for available binaries first and skips
523        download if binaries are already in cache.
524
525        The caller of this function is responsible for holding the cache lock.
526
527        @param uri: The Google Storage or dl.google.com uri.
528        @return Path to the downloaded object, name.
529        """
530        # We are hashing the uri instead of the binary. This is acceptable, as
531        # the uris are supposed to contain version information and an object is
532        # not supposed to be changed once created.
533        output_dir = os.path.join(self._tradefed_cache,
534                                  hashlib.md5(uri).hexdigest())
535        # Check for existence of cache entry. We check for directory existence
536        # instead of file existence, so that _install_bundle can delete original
537        # zip files to save disk space.
538        if os.path.exists(output_dir):
539            # TODO(crbug.com/800657): Mitigation for the invalid state. Normally
540            # this should not happen, but when a lock is force borken due to
541            # high IO load, multiple processes may enter the critical section
542            # and leave a bad state permanently.
543            if os.listdir(output_dir):
544                logging.info('Skipping download of %s, reusing content of %s.',
545                             uri, output_dir)
546                return os.path.join(output_dir,
547                    os.path.basename(urlparse.urlparse(uri).path))
548            logging.error('Empty cache entry detected %s', output_dir)
549        return self._download_to_dir(uri, output_dir)
550
551    def _download_to_dir(self, uri, output_dir):
552        """Downloads the gs|http|https uri from the storage server.
553
554        @param uri: The Google Storage or dl.google.com uri.
555        @output_dir: The directory where the downloaded file should be placed.
556        @return Path to the downloaded object, name.
557        """
558        # Split uri into 3 pieces for use by gsutil and also by wget.
559        parsed = urlparse.urlparse(uri)
560        filename = os.path.basename(parsed.path)
561        output = os.path.join(output_dir, filename)
562
563        self._safe_makedirs(output_dir)
564        if parsed.scheme not in ['gs', 'http', 'https']:
565            raise error.TestFail(
566                'Error: Unknown download scheme %s' % parsed.scheme)
567        if parsed.scheme in ['http', 'https']:
568            logging.info('Using wget to download %s to %s.', uri, output_dir)
569            # We are downloading 1 file at a time, hence using -O over -P.
570            utils.run(
571                'wget',
572                args=('--report-speed=bits', '-O', output, uri),
573                verbose=True)
574            return output
575
576        if not client_utils.is_moblab():
577            # If the machine can access to the storage server directly,
578            # defer to "gsutil" for downloading.
579            logging.info('Not in lab. Downloading %s directly to %s.',
580                         uri, output)
581            # b/17445576: gsutil rsync of individual files is not implemented.
582            utils.run('gsutil', args=('cp', uri, output), verbose=True)
583            return output
584
585        # We are in the moblab. Because the machine cannot access the storage
586        # server directly, use dev server to proxy.
587        logging.info('In lab. Downloading %s by staging to %s.',
588                     uri, output)
589
590        dirname = os.path.dirname(parsed.path)
591        archive_url = '%s://%s%s' % (parsed.scheme, parsed.netloc, dirname)
592
593        # First, request the devserver to download files into the lab network.
594        # TODO(ihf): Switch stage_artifacts to honor rsync. Then we don't have
595        # to shuffle files inside of tarballs.
596        info = self._hosts[0].host_info_store.get()
597        ds = dev_server.ImageServer.resolve(info.build)
598        ds.stage_artifacts(
599            info.build, files=[filename], archive_url=archive_url)
600
601        # Then download files from the dev server.
602        # TODO(ihf): use rsync instead of wget. Are there 3 machines involved?
603        # Itself, dev_server plus DUT? Or is there just no rsync in moblab?
604        ds_src = '/'.join([ds.url(), 'static', dirname, filename])
605        logging.info('dev_server URL: %s', ds_src)
606        # Calls into DUT to pull uri from dev_server.
607        utils.run(
608            'wget',
609            args=('--report-speed=bits', '-O', output, ds_src),
610            verbose=True)
611        return output
612
613    def _instance_copyfile(self, cache_path):
614        """Makes a copy of a file from the (shared) cache to a wholy owned
615        local instance. Also copies one level of cache directoy (MD5 named).
616        """
617        filename = os.path.basename(cache_path)
618        dirname = os.path.basename(os.path.dirname(cache_path))
619        instance_dir = os.path.join(self._tradefed_install, dirname)
620        # Make sure destination directory is named the same.
621        self._safe_makedirs(instance_dir)
622        instance_path = os.path.join(instance_dir, filename)
623        shutil.copyfile(cache_path, instance_path)
624        return instance_path
625
626    def _instance_copytree(self, cache_path):
627        """Makes a copy of a directory from the (shared and writable) cache to
628        a wholy owned local instance.
629
630        TODO(ihf): Consider using cp -al to only copy links. Not sure if this
631        is really a benefit across the container boundary, but it is risky due
632        to the possibility of corrupting the original files by an lxc instance.
633        """
634        # We keep the top 2 names from the cache_path = .../dir1/dir2.
635        dir2 = os.path.basename(cache_path)
636        dir1 = os.path.basename(os.path.dirname(cache_path))
637        instance_path = os.path.join(self._tradefed_install, dir1, dir2)
638        logging.info('Copying %s to instance %s', cache_path, instance_path)
639        shutil.copytree(cache_path, instance_path)
640        return instance_path
641
642    def _install_bundle(self, gs_uri):
643        """Downloads a zip file, installs it and returns the local path.
644
645        @param gs_uri: GS bucket that contains the necessary files.
646        """
647        if not gs_uri.endswith('.zip'):
648            raise error.TestFail('Error: Not a .zip file %s.', gs_uri)
649        # Atomic write through of file.
650        with tradefed_utils.lock(self._tradefed_cache_lock):
651            # Atomic operations.
652            self._invalidate_download_cache()
653            # Download is lazy (cache_path may not actually exist if
654            # cache_unzipped does).
655            cache_path = self._download_to_cache(gs_uri)
656            # Unzip is lazy as well (but cache_unzipped guaranteed to
657            # exist).
658            cache_unzipped = self._unzip(cache_path)
659            # To save space we delete the original zip file. This works as
660            # _download only checks existence of the cache directory for
661            # lazily skipping download, and unzip itself will bail if the
662            # unzipped destination exists. Hence we don't need the original
663            # anymore.
664            if os.path.exists(cache_path):
665                logging.info('Deleting original %s', cache_path)
666                os.remove(cache_path)
667            # Erase dirty marker from disk.
668            self._validate_download_cache()
669            # We always copy files to give tradefed a clean copy of the
670            # bundle.
671            unzipped_local = self._instance_copytree(cache_unzipped)
672        return unzipped_local
673
674    def _install_files(self, gs_dir, files, permission):
675        """Installs binary tools."""
676        for filename in files:
677            gs_uri = os.path.join(gs_dir, filename)
678            # Atomic write through of file.
679            with tradefed_utils.lock(self._tradefed_cache_lock):
680                # We don't want to leave a corrupt cache for other jobs.
681                self._invalidate_download_cache()
682                cache_path = self._download_to_cache(gs_uri)
683                # Mark cache as clean again.
684                self._validate_download_cache()
685                # This only affects the current job, so not part of cache
686                # validation.
687                local = self._instance_copyfile(cache_path)
688            os.chmod(local, permission)
689            # Keep track of PATH.
690            self._install_paths.append(os.path.dirname(local))
691
692    def _prepare_media(self, media_asset):
693        """Downloads and offers the cached media files to tradefed."""
694        if media_asset.uri:
695            media = self._install_bundle(media_asset.uri)
696            if os.path.islink(media_asset.localpath):
697                os.unlink(media_asset.localpath)
698            if os.path.isdir(media_asset.localpath):
699                shutil.rmtree(media_asset.localpath)
700            self._safe_makedirs(os.path.dirname(media_asset.localpath))
701            os.symlink(media, media_asset.localpath)
702
703            logging.info('Offered %s as a media directory in %s',
704                    media, media_asset.localpath)
705
706        # Records the number of existing media bundles, to check later.
707        if os.path.isdir(media_asset.localpath):
708            self._num_media_bundles = len(
709                    os.listdir(media_asset.localpath))
710
711    def _cleanup_media(self, media_asset):
712        """Clean up the local copy of cached media files."""
713        self._fail_on_unexpected_media_download(media_asset)
714        if os.path.islink(media_asset.localpath):
715            path = os.readlink(media_asset.localpath)
716            os.unlink(media_asset.localpath)
717            if os.path.isdir(path):
718                logging.info('Cleaning up media files in %s', path)
719                shutil.rmtree(path)
720
721    def _fail_on_unexpected_media_download(self, media_asset):
722        if os.path.isdir(media_asset.localpath):
723            contents = os.listdir(media_asset.localpath)
724            if len(contents) > self._num_media_bundles:
725                raise error.TestFail(
726                    'Failed: Unexpected media bundle was added %s' % contents)
727
728    def _run(self, *args, **kwargs):
729        """Executes the given command line.
730
731        To support SDK tools, such as adb or aapt, this adds _install_paths
732        to the extra_paths. Before invoking this, ensure _install_files() has
733        been called.
734        """
735        kwargs['extra_paths'] = (
736            kwargs.get('extra_paths', []) + self._install_paths)
737        return utils.run(*args, **kwargs)
738
739    def _collect_tradefed_global_log(self, result, destination):
740        """Collects the tradefed global log.
741
742        @param result: The result object from utils.run.
743        @param destination: Autotest result directory (destination of logs).
744        """
745        match = re.search(r'Saved log to /tmp/(tradefed_global_log_.*\.txt)',
746                          result.stdout)
747        if not match:
748            logging.error('no tradefed_global_log file is found')
749            return
750
751        name = match.group(1)
752        dest = os.path.join(destination, 'logs', 'tmp')
753        self._safe_makedirs(dest)
754        shutil.copy(os.path.join('/tmp', name), os.path.join(dest, name))
755
756    def _get_expected_failures(self, directory, bundle_abi):
757        """Return a list of expected failures or no test module.
758
759        @param directory: A directory with expected no tests or failures files.
760        @param bundle_abi: 'arm' or 'x86' if the test is for the particular ABI.
761                           None otherwise (like GTS, built for multi-ABI.)
762        @return: A list of expected failures or no test modules for the current
763                 testing device.
764        """
765        # Load waivers and manual tests so TF doesn't re-run them.
766        expected_fail_files = []
767        test_board = self._get_board_name()
768        test_arch = self._get_board_arch()
769        sdk_ver = self._get_android_version()
770        expected_fail_dir = os.path.join(self.bindir, directory)
771        if os.path.exists(expected_fail_dir):
772            expected_fail_files += glob.glob(expected_fail_dir + '/*.yaml')
773
774        waivers = cts_expected_failure_parser.ParseKnownCTSFailures(
775            expected_fail_files)
776        return waivers.find_waivers(test_arch, test_board, bundle_abi, sdk_ver)
777
778    def _get_abilist(self):
779        """Return the abilist supported by calling adb command.
780
781        This method should only be called after the android environment is
782        successfully initialized."""
783        if not self._abilist:
784            for _ in range(3):
785                abilist_str = self._run_adb_cmd(
786                    self._hosts[0],
787                    args=('shell', 'getprop',
788                          'ro.product.cpu.abilist')).stdout.strip()
789                if abilist_str:
790                    self._abilist = abilist_str.split(',')
791                    break
792                else:
793                    # TODO(kinaba): Sometimes getprop returns an empty string.
794                    # Investigate why. For now we mitigate the bug by retries.
795                    logging.error('Empty abilist.')
796        return self._abilist
797
798    def _get_release_branch_number(self):
799        """Returns the DUT branch number (z of Rxx-yyyyy.z.w) or 0 on error."""
800        if not self._release_branch_number:
801            ver = (self._hosts[0].get_release_version() or '').split('.')
802            self._release_branch_number = (int(ver[1]) if len(ver) >= 3 else 0)
803        return self._release_branch_number
804
805    def _get_board_arch(self):
806        """Return target DUT arch name."""
807        if not self._board_arch:
808            self._board_arch = ('arm' if self._hosts[0].get_cpu_arch() == 'arm'
809                else 'x86')
810        return self._board_arch
811
812    def _get_board_name(self):
813        """Return target DUT board name."""
814        if not self._board_name:
815            self._board_name = self._hosts[0].get_board().split(':')[1]
816        return self._board_name
817
818    def _get_android_version(self):
819        """Return target DUT Android SDK version"""
820        # TODO(kinaba): factor this out to server/hosts/cros_host.py
821        if not self._android_version:
822            self._android_version = self._hosts[0].run(
823                'grep ANDROID_SDK /etc/lsb-release',
824                ignore_status=True).stdout.rstrip().split('=')[1]
825        return self._android_version
826
827    def _get_max_retry(self, max_retry):
828        """Return the maximum number of retries.
829
830        @param max_retry: max_retry specified in the control file.
831        @return: number of retries for this specific host.
832        """
833        if max_retry is None:
834            max_retry = self._get_branch_retry(self._BRANCH_DEFAULT_RETRY)
835        candidate = [max_retry]
836        candidate.append(self._get_board_retry())
837        candidate.append(self._get_branch_retry(self._BRANCH_MAX_RETRY))
838        return min(x for x in candidate if x is not None)
839
840    def _get_board_retry(self):
841        """Return the maximum number of retries for DUT board name.
842
843        @return: number of max_retry or None.
844        """
845        board = self._get_board_name()
846        if board in self._BOARD_MAX_RETRY:
847            return self._BOARD_MAX_RETRY[board]
848        logging.info('No board retry specified for board: %s', board)
849        return None
850
851    def _get_branch_retry(self, table):
852        """Returns the retry count for DUT branch number defined in |table|."""
853        number = self._get_release_branch_number()
854        for lowerbound, retry in reversed(table):
855            if lowerbound <= number:
856                return retry
857        logging.warning('Could not establish channel. Using retry=0.')
858        return 0
859
860    def _run_commands(self, commands, **kwargs):
861        """Run commands on all the hosts."""
862        for host in self._hosts:
863            for command in commands:
864                logging.info('RUN: %s\n', command)
865                output = host.run(command, **kwargs)
866                logging.info('END: %s\n', command)
867                logging.debug(output)
868
869    def _override_powerd_prefs(self):
870        """Overrides powerd prefs to prevent screen from turning off, complying
871        with CTS requirements.
872
873        This is a remote version of PowerPrefChanger which ensures overrided
874        policies won't persist across reboots by bind-mounting onto the config
875        directory.
876        """
877        pref_dir = constants.POWERD_PREF_DIR
878        temp_dir = constants.POWERD_TEMP_DIR
879        commands = (
880                'cp -r %s %s' % (pref_dir, temp_dir),
881                'echo 1 > %s/ignore_external_policy' % temp_dir,
882                'echo 0 | tee %s/{,un}plugged_{dim,off,suspend}_ms' % temp_dir,
883                'mount --bind %s %s' % (temp_dir, pref_dir),
884                'restart powerd',
885        )
886        try:
887            self._run_commands(commands)
888        except (error.AutoservRunError, error.AutoservSSHTimeout):
889            logging.warning('Failed to override powerd policy, tests depending '
890                            'on screen being always on may fail.')
891
892    def _restore_powerd_prefs(self):
893        """Restores powerd prefs overrided by _override_powerd_prefs()."""
894        pref_dir = constants.POWERD_PREF_DIR
895        temp_dir = constants.POWERD_TEMP_DIR
896        commands = (
897                'umount %s' % pref_dir,
898                'restart powerd',
899                'rm -rf %s' % temp_dir,
900        )
901        try:
902            self._run_commands(commands)
903        except (error.AutoservRunError, error.AutoservSSHTimeout):
904            logging.warning('Failed to restore powerd policy, overrided policy '
905                            'will persist until device reboot.')
906
907    def _run_and_parse_tradefed(self, command):
908        """Kick off the tradefed command.
909
910        @param command: Lists of command tokens.
911        @raise TestFail: when a test failure is detected.
912        @return: tuple of (tests, pass, fail, notexecuted) counts.
913        """
914        target_argument = []
915        for host in self._hosts:
916            target_argument += ['-s', self._get_adb_target(host)]
917        shard_argument = []
918        if len(self._hosts) > 1:
919            if self._SHARD_CMD:
920                shard_argument = [self._SHARD_CMD, str(len(self._hosts))]
921            else:
922                logging.warning('cts-tradefed shard command isn\'t defined, '
923                                'falling back to use single device.')
924        command = command + target_argument + shard_argument
925
926        try:
927            output = self._run_tradefed(command)
928        except Exception as e:
929            self._log_java_version()
930            if not isinstance(e, error.CmdTimeoutError):
931                # In case this happened due to file corruptions, try to
932                # force to recreate the cache.
933                logging.error('Failed to run tradefed! Cleaning up now.')
934                self._clean_download_cache_if_needed(force=True)
935            raise
936
937        result_destination = os.path.join(self.resultsdir,
938                                          self._get_tradefed_base_dir())
939        # Gather the global log first. Datetime parsing below can abort the test
940        # if tradefed startup had failed. Even then the global log is useful.
941        self._collect_tradefed_global_log(output, result_destination)
942        # Result parsing must come after all other essential operations as test
943        # warnings, errors and failures can be raised here.
944        return tradefed_utils.parse_tradefed_result(output.stdout,
945                                                    self._waivers)
946
947    def _setup_result_directories(self):
948        """Sets up the results and logs directories for tradefed.
949
950        Tradefed saves the logs and results at:
951          self._repository/results/$datetime/
952          self._repository/results/$datetime.zip
953          self._repository/logs/$datetime/
954        Because other tools rely on the currently chosen Google storage paths
955        we need to keep destination_results in:
956          self.resultsdir/android-cts/results/$datetime/
957          self.resultsdir/android-cts/results/$datetime.zip
958          self.resultsdir/android-cts/results/logs/$datetime/
959        To bridge between them, create symlinks from the former to the latter.
960        """
961        logging.info('Setting up tradefed results and logs directories.')
962
963        results_destination = os.path.join(self.resultsdir,
964                                           self._get_tradefed_base_dir())
965        logs_destination = os.path.join(results_destination, 'logs')
966        directory_mapping = [
967            (os.path.join(self._repository, 'results'), results_destination),
968            (os.path.join(self._repository, 'logs'), logs_destination),
969        ]
970
971        for (tradefed_path, final_path) in directory_mapping:
972            if os.path.exists(tradefed_path):
973                shutil.rmtree(tradefed_path)
974            self._safe_makedirs(final_path)
975            os.symlink(final_path, tradefed_path)
976
977    def _install_plan(self, subplan):
978        """Copy test subplan to CTS-TF.
979
980        @param subplan: CTS subplan to be copied into TF.
981        """
982        logging.info('Install subplan: %s', subplan)
983        subplans_tf_dir = os.path.join(self._repository, 'subplans')
984        if not os.path.exists(subplans_tf_dir):
985            os.makedirs(subplans_tf_dir)
986        test_subplan_file = os.path.join(self.bindir, 'subplans',
987                                         '%s.xml' % subplan)
988        try:
989            shutil.copy(test_subplan_file, subplans_tf_dir)
990        except (shutil.Error, OSError, IOError) as e:
991            raise error.TestFail(
992                'Error: failed to copy test subplan %s to CTS bundle. %s' %
993                (test_subplan_file, e))
994
995    def _should_skip_test(self, _bundle):
996        """Some tests are expected to fail and are skipped.
997
998        Subclasses should override with specific details.
999        """
1000        return False
1001
1002    def _should_reboot(self, steps):
1003        """Oracle to decide if DUT should reboot or just restart Chrome.
1004
1005        For now we will not reboot after the first two iterations, but on all
1006        iterations afterward as before. In particular this means that most CTS
1007        tests will now not get a "clean" machine, but one on which tests ran
1008        before. But we will still reboot after persistent failures, hopefully
1009        not causing too many flakes down the line.
1010        """
1011        if steps < 3:
1012            return False
1013        return True
1014
1015    def _copy_extra_artifacts_dut(self, extra_artifacts, host, output_dir):
1016        """ Upload the custom artifacts """
1017        self._safe_makedirs(output_dir)
1018
1019        for artifact in extra_artifacts:
1020            logging.info('Copying extra artifacts from "%s" to "%s".',
1021                         artifact, output_dir)
1022            try:
1023                self._run_adb_cmd(host, verbose=True, timeout=120,
1024                                  args=('pull', artifact, output_dir))
1025            except:
1026                # Maybe ADB connection failed, or the artifacts don't exist.
1027                logging.exception('Copying extra artifacts failed.')
1028
1029    def _copy_extra_artifacts_host(self, extra_artifacts, host, output_dir):
1030        """ Upload the custom artifacts """
1031        self._safe_makedirs(output_dir)
1032
1033        for artifact in extra_artifacts:
1034            logging.info('Copying extra artifacts from "%s" to "%s".',
1035                         artifact, output_dir)
1036            for extracted_path in glob.glob(artifact):
1037                logging.info('... %s', extracted_path)
1038                # Move it not to collect it again in future retries.
1039                shutil.move(extracted_path, output_dir)
1040
1041    def _run_tradefed_list_results(self):
1042        """Run the `tradefed list results` command.
1043
1044        @return: tuple of the last (session_id, pass, fail, all_done?).
1045        """
1046
1047        # Fix b/143580192: We set the timeout to 3 min. It never takes more than
1048        # 10s on light disk load.
1049        output = self._run_tradefed_with_timeout(['list', 'results'], 180)
1050
1051        # Parses the last session from the output that looks like:
1052        #
1053        # Session  Pass  Fail  Modules Complete ...
1054        # 0        90    10    1 of 2
1055        # 1        199   1     2 of 2
1056        # ...
1057        lastmatch = None
1058        for m in re.finditer(r'^(\d+)\s+(\d+)\s+(\d+)\s+(\d+) of (\d+)',
1059                             output.stdout, re.MULTILINE):
1060            session, passed, failed, done, total = map(int,
1061                                                       m.group(1, 2, 3, 4, 5))
1062            lastmatch = (session, passed, failed, done == total)
1063        return lastmatch
1064
1065    def _tradefed_retry_command(self, template, session_id):
1066        raise NotImplementedError('Subclass should override this function')
1067
1068    def _tradefed_run_command(self, template):
1069        raise NotImplementedError('Subclass should override this function')
1070
1071    def _tradefed_cmd_path(self):
1072        raise NotImplementedError('Subclass should override this function')
1073
1074    def _tradefed_env(self):
1075        return None
1076
1077    def _run_tradefed_with_timeout(self, command, timeout):
1078        tradefed = self._tradefed_cmd_path()
1079        with tradefed_utils.adb_keepalive(self._get_adb_targets(),
1080                                          self._install_paths):
1081            logging.info('RUN(timeout=%d): %s', timeout,
1082                         ' '.join([tradefed] + command))
1083            output = self._run(
1084                tradefed,
1085                args=tuple(command),
1086                env=self._tradefed_env(),
1087                timeout=timeout,
1088                verbose=True,
1089                ignore_status=False,
1090                # Make sure to tee tradefed stdout/stderr to autotest logs
1091                # continuously during the test run.
1092                stdout_tee=utils.TEE_TO_LOGS,
1093                stderr_tee=utils.TEE_TO_LOGS)
1094            logging.info('END: %s\n', ' '.join([tradefed] + command))
1095        return output
1096
1097    def _run_tradefed(self, command):
1098        timeout = self._timeout * self._timeout_factor
1099        return self._run_tradefed_with_timeout(command, timeout)
1100
1101    def _run_tradefed_with_retries(self,
1102                                   test_name,
1103                                   run_template,
1104                                   retry_template,
1105                                   timeout,
1106                                   media_asset=None,
1107                                   enable_default_apps=False,
1108                                   target_module=None,
1109                                   target_plan=None,
1110                                   executable_test_count=None,
1111                                   bundle=None,
1112                                   extra_artifacts=[],
1113                                   extra_artifacts_host=[],
1114                                   cts_uri=None,
1115                                   login_precondition_commands=[],
1116                                   precondition_commands=[],
1117                                   prerequisites=[]):
1118        """Run CTS/GTS with retry logic.
1119
1120        We first kick off the specified module. Then rerun just the failures
1121        on the next MAX_RETRY iterations.
1122        """
1123        for prereq in prerequisites:
1124            result = tradefed_prerequisite.check(prereq, self._hosts)
1125            if not result[0]:
1126                raise error.TestError(result[1])
1127
1128        # On dev and beta channels timeouts are sharp, lenient on stable.
1129        self._timeout = timeout
1130        if (self._get_release_branch_number() >=
1131                constants.APPROXIMATE_STABLE_BRANCH_NUMBER):
1132            self._timeout += 3600
1133
1134        if self._should_skip_test(bundle):
1135            logging.warning('Skipped test %s', ' '.join(test_name))
1136            return
1137
1138        steps = -1  # For historic reasons the first iteration is not counted.
1139        self.summary = ''
1140        accurate = []
1141        board = self._get_board_name()
1142        session_id = None
1143
1144        self._setup_result_directories()
1145        self._prepare_media(media_asset)
1146
1147        # This loop retries failures. For this reason please do not raise
1148        # TestFail in this loop if you suspect the failure might be fixed
1149        # in the next loop iteration.
1150        while steps < self._max_retry:
1151            steps += 1
1152            keep_media = media_asset and media_asset.uri and steps >= 1
1153            self._run_commands(login_precondition_commands, ignore_status=True)
1154            with login.login_chrome(
1155                    hosts=self._hosts,
1156                    board=board,
1157                    dont_override_profile=keep_media,
1158                    enable_default_apps=enable_default_apps) as current_logins:
1159                if self._should_reboot(steps):
1160                    # TODO(rohitbm): Evaluate if power cycle really helps with
1161                    # Bluetooth test failures, and then make the implementation
1162                    # more strict by first running complete restart and reboot
1163                    # retries and then perform power cycle.
1164                    #
1165                    # Currently, (steps + 1 == self._max_retry) means that
1166                    # hard_reboot is attempted after "this" cycle failed. Then,
1167                    # the last remaining 1 step will be run on the rebooted DUT.
1168                    hard_reboot = (self._hard_reboot_on_failure
1169                        and steps + 1 == self._max_retry)
1170                    for current_login in current_logins:
1171                        current_login.need_reboot(hard_reboot=hard_reboot)
1172                self._ready_arc()
1173                self._calculate_test_count_factor(bundle)
1174                self._run_commands(precondition_commands, ignore_status=True)
1175
1176                # Run tradefed.
1177                if session_id == None:
1178                    if target_plan is not None:
1179                        self._install_plan(target_plan)
1180
1181                    logging.info('Running %s:', test_name)
1182                    command = self._tradefed_run_command(run_template)
1183                else:
1184                    logging.info('Retrying failures of %s with session_id %d:',
1185                                 test_name, session_id)
1186                    command = self._tradefed_retry_command(retry_template,
1187                                                           session_id)
1188
1189                # TODO(pwang): Evaluate if it is worth it to get the number of
1190                #              not-excecuted, for instance, by collecting all
1191                #              tests on startup (very expensive, may take 30
1192                #              minutes).
1193                # TODO(b/137917339): Only prevent screen from turning off for
1194                # media tests. Remove this check once the GPU issue is fixed.
1195                if media_asset and media_asset.uri:
1196                    self._override_powerd_prefs()
1197                try:
1198                    waived_tests, acc = self._run_and_parse_tradefed(command)
1199                finally:
1200                    # TODO(b/137917339): ditto
1201                    if media_asset and media_asset.uri:
1202                        self._restore_powerd_prefs()
1203                self._fail_on_unexpected_media_download(media_asset)
1204                result = self._run_tradefed_list_results()
1205                if not result:
1206                    logging.error('Did not find any test results. Retry.')
1207                    for current_login in current_logins:
1208                        current_login.need_reboot()
1209                    continue
1210
1211                waived = len(waived_tests)
1212                last_session_id, passed, failed, all_done = result
1213
1214                if failed > waived or not utils.is_in_container():
1215                    for host in self._hosts:
1216                        dir_name = "%s-step%02d" % (host.hostname, steps)
1217                        output_dir = os.path.join(
1218                            self.resultsdir, 'extra_artifacts', dir_name)
1219                        self._copy_extra_artifacts_dut(
1220                            extra_artifacts, host, output_dir)
1221                        self._copy_extra_artifacts_host(
1222                            extra_artifacts_host, host, output_dir)
1223
1224                if passed + failed > 0:
1225                    # At least one test had run, which means the media push step
1226                    # of tradefed didn't fail. To free up the storage earlier,
1227                    # delete the copy on the server side. See crbug.com/970881
1228                    self._cleanup_media(media_asset)
1229
1230                # If the result is |acc|urate according to the log, or the
1231                # inaccuracy is recognized by tradefed (not all_done), then
1232                # it is fine.
1233                accurate.append(acc or not all_done)
1234                if failed < waived:
1235                    logging.error(
1236                        'Error: Internal waiver bookkeeping has become '
1237                        'inconsistent (f=%d, w=%d)', failed, waived)
1238
1239                msg = 'run' if session_id == None else ' retry'
1240                msg += '(p=%s, f=%s, w=%s)' % (passed, failed, waived)
1241                self.summary += msg
1242                logging.info('RESULT: %s %s', msg, result)
1243
1244                # Check for no-test modules. We use the "all_done" indicator
1245                # provided by list_results to decide if there are outstanding
1246                # modules to iterate over (similar to missing tests just on a
1247                # per-module basis).
1248                notest = (passed + failed == 0 and all_done)
1249                if target_module in self._notest_modules:
1250                    if notest:
1251                        logging.info('Package has no tests as expected.')
1252                        return
1253                    else:
1254                        # We expected no tests, but the new bundle drop must
1255                        # have added some for us. Alert us to the situation.
1256                        raise error.TestFail(
1257                            'Failed: Remove module %s from '
1258                            'notest_modules directory!' % target_module)
1259                elif notest:
1260                    logging.error('Did not find any tests in module. Hoping '
1261                                  'this is transient. Retry after reboot.')
1262                    for current_login in current_logins:
1263                        current_login.need_reboot()
1264                    continue
1265
1266                session_id = last_session_id
1267                if (not all_done and executable_test_count != None and
1268                        (passed + failed ==
1269                         executable_test_count * self._test_count_factor)):
1270                    logging.warning('Overwriting all_done as True, since the '
1271                                    'explicitly set executable_test_count '
1272                                    'tests have run.')
1273                    all_done = True
1274
1275                # Check if all the tests passed.
1276                if failed <= waived and all_done:
1277                    break
1278
1279                # TODO(b/127908450) Tradefed loses track of not-executed tests
1280                # when the commandline pattern included '*', and retry run for
1281                # them wrongly declares all tests passed. This is misleading.
1282                # Rather, we give up the retry and report the result as FAIL.
1283                if not all_done and '*' in ''.join(run_template):
1284                    break
1285
1286        if session_id == None:
1287            raise error.TestFail('Error: Could not find any tests in module.')
1288
1289        if failed <= waived and all_done:
1290            if not all(accurate):
1291                raise error.TestFail(
1292                    'Failed: Not all tests were executed. After %d '
1293                    'retries passing %d tests, waived=%d. %s' % (
1294                        steps, passed, waived, self.summary))
1295            # TODO(ihf): Make this error.TestPass('...') once
1296            # available.
1297            if steps > 0 and self._warn_on_test_retry:
1298                raise error.TestWarn(
1299                    'Passed: after %d retries passing %d tests, '
1300                    'waived=%d. %s' % (steps, passed, waived,
1301                                       self.summary))
1302            return
1303
1304        raise error.TestFail(
1305            'Failed: after %d retries giving up. '
1306            'passed=%d, failed=%d, waived=%d%s%s. %s' %
1307            (steps, passed, failed, waived, '' if all_done else ', notexec>=1',
1308             '' if all(accurate) else ', Tests may not be accurate.',
1309             self.summary))
1310