• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2#
3# Copyright (C) 2016 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#      http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17
18import random
19import logging
20from concurrent import futures
21import queue
22import threading
23import os
24
25from vts.runners.host import asserts
26from vts.runners.host import base_test
27from vts.runners.host import const
28from vts.runners.host import keys
29from vts.runners.host import test_runner
30from vts.utils.python.common import cmd_utils
31from vts.utils.python.controllers import android_device
32
33from vts.testcases.kernel.ltp import test_cases_parser
34from vts.testcases.kernel.ltp import environment_requirement_checker as env_checker
35from vts.testcases.kernel.ltp.shell_environment import shell_environment
36from vts.testcases.kernel.ltp import ltp_enums
37from vts.testcases.kernel.ltp import ltp_configs
38
39
40class KernelLtpTest(base_test.BaseTestClass):
41    """Runs the LTP (Linux Test Project) test cases against Android OS kernel.
42
43    Attributes:
44        _dut: AndroidDevice, the device under test
45        _shell: ShellMirrorObject, shell mirror object used to execute commands
46        _testcases: TestcasesParser, test case input parser
47        _env: dict<stirng, string>, dict of environment variable key value pair
48        data_file_path: string, runner's directory where test cases are stored
49        run_staging: bool, whether to run staging tests
50        number_of_threads: int, number of threads to run in parallel. If this
51                           number is set to 0, the test case will automatically
52                           pick the number of available CPUs on device. If
53                           the number is less than 0, it will be set to 1. If
54                           the number is greater than 0, that number of threads
55                           will be created to run the tests.
56        include_filter: list of string, a list of test case names to run
57        exclude_filter: list of string, a list of test case names to exclude
58    """
59    _32BIT = "32"
60    _64BIT = "64"
61    _PASS = 0
62    _SKIP = 1
63    _FAIL = -1
64
65    def setUpClass(self):
66        """Creates a remote shell instance, and copies data files."""
67        required_params = [
68            keys.ConfigKeys.IKEY_DATA_FILE_PATH,
69            keys.ConfigKeys.KEY_TEST_SUITE, ltp_enums.ConfigKeys.RUN_STAGING,
70            ltp_enums.ConfigKeys.RUN_32BIT, ltp_enums.ConfigKeys.RUN_64BIT,
71            ltp_enums.ConfigKeys.NUMBER_OF_THREADS
72        ]
73        self.getUserParams(required_params)
74
75        logging.info("%s: %s", keys.ConfigKeys.IKEY_DATA_FILE_PATH,
76                     self.data_file_path)
77        logging.info("%s: %s", keys.ConfigKeys.KEY_TEST_SUITE, self.test_suite)
78        logging.info("%s: %s", ltp_enums.ConfigKeys.RUN_STAGING,
79                     self.run_staging)
80        logging.info("%s: %s", ltp_enums.ConfigKeys.NUMBER_OF_THREADS,
81                     self.number_of_threads)
82
83        self.include_filter = self.ExpandFilter(self.include_filter)
84        self.exclude_filter = self.ExpandFilter(self.exclude_filter)
85
86        self._dut = self.registerController(android_device)[0]
87        logging.info("product_type: %s", self._dut.product_type)
88        self._dut.shell.InvokeTerminal("one")
89        self.shell = self._dut.shell.one
90
91        self._requirement = env_checker.EnvironmentRequirementChecker(
92            self.shell)
93        self._shell_env = shell_environment.ShellEnvironment(self.shell)
94
95        disabled_tests = self.ExpandFilter(ltp_configs.DISABLED_TESTS)
96        staging_tests = self.ExpandFilter(ltp_configs.STAGING_TESTS)
97        self._testcases = test_cases_parser.TestCasesParser(
98            self.data_file_path, self.filterOneTest, disabled_tests,
99            staging_tests)
100
101        self._env = {ltp_enums.ShellEnvKeys.TMP: ltp_configs.TMP,
102                     ltp_enums.ShellEnvKeys.TMPBASE: ltp_configs.TMPBASE,
103                     ltp_enums.ShellEnvKeys.LTPTMP: ltp_configs.LTPTMP,
104                     ltp_enums.ShellEnvKeys.TMPDIR: ltp_configs.TMPDIR,
105                     ltp_enums.ShellEnvKeys.LTP_DEV_FS_TYPE:
106                     ltp_configs.LTP_DEV_FS_TYPE,
107                     ltp_enums.ShellEnvKeys.LTPROOT: ltp_configs.LTPDIR,
108                     ltp_enums.ShellEnvKeys.PATH: ltp_configs.PATH}
109
110    @property
111    def shell(self):
112        """returns an object that can execute a shell command"""
113        return self._shell
114
115    @shell.setter
116    def shell(self, shell):
117        """Set shell object"""
118        self._shell = shell
119
120    def ExpandFilter(self, input_list):
121        '''Expand filter items with bitness suffix.
122
123        If a filter item contains bitness suffix, only test name with that tag will be included
124        in output.
125        Otherwise, both 32bit and 64bit suffix will be paired to the test name in output
126        list.
127
128        Args:
129            input_list: list of string, the list to expand
130
131        Returns:
132            A list of string
133        '''
134        result = []
135        for item in input_list:
136            if (item.endswith(const.SUFFIX_32BIT) or
137                    item.endswith(const.SUFFIX_64BIT)):
138                result.append(item)
139            else:
140                result.append("%s_%s" % (item, const.SUFFIX_32BIT))
141                result.append("%s_%s" % (item, const.SUFFIX_64BIT))
142        return result
143
144    def PreTestSetup(self, test_bit):
145        """Setups that needs to be done before any tests."""
146        replacements = {'#!/bin/sh': '#!/system/bin/sh',
147                        '#! /bin/sh': '#!/system/bin/sh',
148                        '#!/bin/bash': '#!/system/bin/sh',
149                        '#! /bin/bash': '#!/system/bin/sh',
150                        'bs=1M': 'bs=1m',
151                        '/var/run': ltp_configs.TMP}
152        src_host = os.path.join(self.data_file_path, 'DATA', test_bit, 'ltp')
153
154        count = 0
155        for (dirpath, dirnames, filenames) in os.walk(src_host):
156            for filename in filenames:
157                filepath = os.path.join(dirpath, filename)
158                content = ''
159                with open(filepath, 'r') as f:
160                    content = f.read()
161                content_replaced = content
162                for key in replacements:
163                    content_replaced = content_replaced.replace(
164                        key, replacements[key])
165                if content_replaced != content:
166                    with open(filepath, 'w') as f:
167                        f.write(content_replaced)
168                    count += 1
169        logging.info('Finished replacing script contents from %s files', count)
170
171        self._report_thread_lock = threading.Lock()
172
173    def PushFiles(self, test_bit):
174        """Push the related files to target.
175
176        Args:
177            test_bit: nativetest or nativetest64
178        """
179        src = os.path.join(self.data_file_path, 'DATA', test_bit, 'ltp', '.')
180        logging.info('Pushing files from %s to %s', src, ltp_configs.LTPDIR)
181        self.shell.Execute("mkdir %s -p" % ltp_configs.LTPDIR)
182        self._dut.adb.push(src, ltp_configs.LTPDIR)
183        logging.info('finished pushing files from %s to %s', src,
184                     ltp_configs.LTPDIR)
185
186    def GetEnvp(self):
187        """Generate the environment variable required to run the tests."""
188        return ' '.join("%s=%s" % (key, value)
189                        for key, value in self._env.items())
190
191    def tearDownClass(self):
192        """Deletes all copied data files."""
193        self.shell.Execute("rm -rf %s" % ltp_configs.LTPDIR)
194        self._requirement.Cleanup()
195
196    def Verify(self, results):
197        """Interpret the test result of each test case.
198
199        Returns:
200            tuple(int, string), a tuple of int which represents test pass, fail
201            or skip, and string representing the reason of a failed or skipped
202            test
203        """
204        if not results:
205            return (self._FAIL, "No response received. Socket timeout")
206
207        # For LTP test cases, we run one shell command for each test case
208        # So the result should also contains only one execution output
209        stdout = results[const.STDOUT][0]
210        ret_code = results[const.EXIT_CODE][0]
211        # Test case is not for the current configuration, SKIP
212        if ((ret_code == ltp_enums.TestExitCode.TCONF and
213             'TPASS' not in stdout) or
214            (ret_code == ltp_enums.TestExitCode.TPASS and 'CONF' in stdout)):
215            return (self._SKIP, "Incompatible test skipped: TCONF")
216        elif ret_code not in (ltp_enums.TestExitCode.TCONF,
217                              ltp_enums.TestExitCode.TPASS):
218            return (self._FAIL,
219                    "Got return code %s, test did not pass." % ret_code)
220        else:
221            return (self._PASS, None)
222
223    def CheckResult(self, cmd_results, result=None, note=None):
224        """Check a test result and emit exceptions if test failed or skipped.
225
226        If the shell command result is not yet interpreted, self.Verify will
227        be called to interpret the results.
228
229        Args:
230            cmd_results: dict([str],[str],[int]), command results from shell.
231            result: int, which is one of the values of _PASS, _SKIP, and _FAIL
232            note: string, reason why a test failed or get skipped
233        """
234        asserts.assertTrue(cmd_results, "No response received. Socket timeout")
235
236        logging.info("stdout: %s", cmd_results[const.STDOUT])
237        logging.info("stderr: %s", cmd_results[const.STDERR])
238        logging.info("exit_code: %s", cmd_results[const.EXIT_CODE])
239
240        if result is None:
241            result, note = self.Verify(cmd_results)
242        logging.info("verify result: %s", result)
243        logging.info("note: %s", note)
244
245        asserts.skipIf(result == self._SKIP, note)
246        asserts.assertEqual(result, self._PASS, note)
247
248    def TestNBits(self, n_bit):
249        """Runs all 32-bit or 64-bit LTP test cases.
250
251        Args:
252            n_bit: int, bitness
253        """
254        test_bit = 'nativetest'
255        if n_bit == 64:
256            test_bit += '64'
257        self.PreTestSetup(test_bit)
258        self.PushFiles(test_bit)
259
260        test_cases = list(
261            self._testcases.Load(
262                ltp_configs.LTPDIR, n_bit=n_bit, run_staging=self.run_staging))
263
264        logging.info("Checking binary exists for all test cases.")
265        self._requirement.ltp_bin_host_path = os.path.join(
266            self.data_file_path, 'DATA', test_bit, 'ltp', 'testcases', 'bin')
267        self._requirement.CheckAllTestCaseExecutables(test_cases)
268        logging.info("Start running %i individual tests." % len(test_cases))
269
270        self.RunGeneratedTestsMultiThread(
271            test_func=self.RunLtpOnce,
272            settings=test_cases,
273            args=(n_bit, ),
274            name_func=self.GetTestName)
275
276    def RunGeneratedTestsMultiThread(self, test_func, settings, args,
277                                     name_func):
278        """Run LTP tests with multi-threads.
279
280        If number_of_thread is specified to be 0 in config file, a shell query
281        will be made to the device to get the number of available CPUs. If
282        number_of_thread or number of CPUs available is 1, this function will
283        call and return parent class's regular runGeneratedTest function. Since
284        some tests may be competing resources with each other, all the failed
285        tests will be rerun sequentially in the end to confirm their failure.
286        Also, if include_filter is not empty, only 1 thread will be running.
287
288        Args:
289            test_func: The common logic shared by all these generated test
290                       cases. This function should take at least one argument,
291                       which is a parameter set.
292            settings: A list of strings representing parameter sets. These are
293                      usually json strings that get loaded in the test_func.
294            args: Iterable of additional position args to be passed to
295                  test_func.
296            name_func: A function that takes a test setting and generates a
297                       proper test name.
298
299        Returns:
300            A list of settings that fail.
301        """
302        n_workers = self.number_of_threads
303
304        if n_workers < 0:
305            logging.error('invalid setting for number of threads: < 0.')
306            n_workers = 1
307
308        # Include filter is not empty; Run in sequential.
309        if self.include_filter:
310            n_workers = 1
311
312        # Number of thread is set to 0 (automatic)
313        if not n_workers:
314            n_workers = self._shell_env.GetDeviceNumberOfPresentCpu()
315            logging.info('Number of CPU available on device: %i', n_workers)
316
317        # Skip multithread version if only 1 worker available
318        if n_workers == 1:
319            return self.runGeneratedTests(
320                test_func=test_func,
321                settings=settings,
322                args=args,
323                name_func=name_func)
324
325        settings_multithread = []
326        settings_singlethread = []
327        for test_case in settings:
328            if (test_case.is_staging or test_case.testsuite in
329                    ltp_configs.TEST_SUITES_REQUIRE_SINGLE_THREAD_MODE):
330                settings_singlethread.append(test_case)
331            else:
332                settings_multithread.append(test_case)
333
334        failed_tests = self.runGeneratedTests(
335            test_func=test_func,
336            settings=settings_singlethread,
337            args=args,
338            name_func=name_func)
339
340        # Shuffle the tests to reduce resource competition probability
341        random.shuffle(settings_multithread)
342
343        # Create a queue for thread workers to pull tasks
344        q = queue.Queue()
345        map(q.put, settings_multithread)
346
347        # Create individual shell sessions for thread workers
348        for i in xrange(n_workers):
349            self._dut.shell.InvokeTerminal("shell_thread_{}".format(i))
350
351        failed_multithread_tests = set()
352        with futures.ThreadPoolExecutor(max_workers=n_workers) as executor:
353            fs = [executor.submit(self.RunLtpWorker, q, args, name_func, i)
354                  for i in xrange(n_workers)]
355
356            failed_test_sets = map(futures.Future.result, fs)
357            for failed_test_set in failed_test_sets:
358                for test_case in failed_test_set:
359                    failed_multithread_tests.add(test_case)
360
361        for test_case in failed_multithread_tests:
362            logging.info(
363                "Test case %s failed during multi-thread run, rerunning...",
364                test_case)
365
366        # In the end, rerun all failed tests to confirm their failure
367        # in sequential.
368        failed_tests.extend(
369            self.runGeneratedTests(
370                test_func=test_func,
371                settings=failed_multithread_tests,
372                args=args,
373                name_func=name_func))
374
375        return failed_tests
376
377    def RunLtpWorker(self, testcases, args, name_func, id):
378        """Worker thread to run a LTP test case at a time."""
379        shell = getattr(self._dut.shell, "shell_thread_{}".format(id))
380        failed_tests = set()
381
382        while True:
383            test_case = None
384            try:
385                test_case = testcases.get(block=False)
386                logging.info("Worker {} takes '{}'.".format(id, test_case))
387            except:
388                logging.info("Worker {} finished.".format(id))
389                return failed_tests
390
391            test_name = name_func(test_case, *args)
392
393            # Check whether test case is filtered out by base_test's filtering method
394            if test_case.is_filtered:
395                self.InternalResultReportMultiThread(test_name, asserts.skipIf,
396                                                     (False, test_case.note))
397                continue
398            logging.info("Worker {} starts checking requirement "
399                         "for '{}'.".format(id, test_case))
400
401            # Check test requirements
402            requirement_satisfied = self._requirement.Check(test_case)
403            if not requirement_satisfied:
404                logging.info("Worker {} reports requirement "
405                             "not satisfied for '{}'.".format(id, test_case))
406                self.InternalResultReportMultiThread(test_name, asserts.skipIf,
407                                                     (False, test_case.note))
408                continue
409
410            cmd = "export {envp} && {commands}".format(
411                envp=self.GetEnvp(), commands=test_case.GetCommand())
412
413            logging.info("Worker {} starts executing command "
414                         "for '{}'.\n  Command:{}".format(id, test_case, cmd))
415            cmd_results = shell.Execute(cmd)
416
417            logging.info("Worker {} starts verifying results "
418                         "for '{}'.".format(id, test_case))
419
420            result, note = self.Verify(cmd_results)
421            if result == self._FAIL:
422                # Hide failed tests from the runner and put into rerun list
423                logging.info("Worker {} reports '{}' failed. Adding to "
424                             "sequential job queue.".format(id, test_case))
425                failed_tests.add(test_case)
426            else:
427                # Report skipped or passed tests to runner
428                self.InternalResultReportMultiThread(
429                    test_name, self.CheckResult, (cmd_results, result, note))
430
431    def InternalResultReportMultiThread(self, test_name, function, args,
432                                        **kwargs):
433        """Report a test result to runner thread safely.
434
435        Run the given function to generate result for the runner. The function
436        given should produce the same result visible to the runner but may not
437        run any actual tests.
438
439        Args:
440            test_name: string, name of a test case
441            function: the function to generate a test case result for runner
442            args: any arguments for the function
443            **kwargs: any additional keyword arguments for runner
444        """
445        self._report_thread_lock.acquire()
446        self.results.requested.append(test_name)
447        try:
448            self.execOneTest(test_name, function, args, **kwargs)
449        except Exception as e:
450            raise e
451        finally:
452            self._report_thread_lock.release()
453
454    def GetTestName(self, test_case, n_bit):
455        "Generate the vts test name of a ltp test"
456        return "{}_{}bit".format(test_case, n_bit)
457
458    def RunLtpOnce(self, test_case, n_bit):
459        "Run one LTP test case"
460        asserts.skipIf(test_case.is_filtered, test_case.note)
461        asserts.skipIf(not self._requirement.Check(test_case), test_case.note)
462
463        cmd = "export {envp} && {commands}".format(
464            envp=self.GetEnvp(), commands=test_case.GetCommand())
465        logging.info("Executing %s", cmd)
466        self.CheckResult(self.shell.Execute(cmd))
467
468    def generate64BitTests(self):
469        """Runs all 64-bit LTP test cases."""
470        if not self.run_64bit:
471            logging.info('User specified not to run 64 bit version LTP tests.')
472            return
473        if not self._dut.is64Bit:
474            logging.info('Target device does not support 64 bit tests.')
475            return
476
477        self.TestNBits(self._64BIT)
478
479    def generate32BitTests(self):
480        """Runs all 32-bit LTP test cases."""
481        if not self.run_32bit:
482            logging.info('User specified not to run 32 bit version LTP tests.')
483            return
484
485        self.TestNBits(self._32BIT)
486
487
488if __name__ == "__main__":
489    test_runner.main()
490