• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python
2#
3# Copyright (C) 2016 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#      http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17
18import random
19import logging
20from concurrent import futures
21import queue
22import threading
23import os
24
25from vts.runners.host import asserts
26from vts.runners.host import base_test
27from vts.runners.host import const
28from vts.runners.host import keys
29from vts.runners.host import records
30from vts.runners.host import test_runner
31from vts.utils.python.common import cmd_utils
32from vts.utils.python.common import list_utils
33
34from vts.testcases.kernel.ltp import test_cases_parser
35from vts.testcases.kernel.ltp import environment_requirement_checker as env_checker
36from vts.testcases.kernel.ltp.shell_environment import shell_environment
37from vts.testcases.kernel.ltp import ltp_enums
38from vts.testcases.kernel.ltp import ltp_configs
39
40RANDOM_SEED = 0
41
42
43class KernelLtpTest(base_test.BaseTestClass):
44    """Runs the LTP (Linux Test Project) test cases against Android OS kernel.
45
46    Attributes:
47        _dut: AndroidDevice, the device under test
48        _shell: ShellMirrorObject, shell mirror object used to execute commands
49        _testcases: TestcasesParser, test case input parser
50        _env: dict<stirng, string>, dict of environment variable key value pair
51        data_file_path: string, runner's directory where test cases are stored
52        run_staging: bool, whether to run staging tests
53        number_of_threads: int, number of threads to run in parallel. If this
54                           number is set to 0, the test case will automatically
55                           pick the number of available CPUs on device. If
56                           the number is less than 0, it will be set to 1. If
57                           the number is greater than 0, that number of threads
58                           will be created to run the tests.
59    """
60    _32BIT = "32"
61    _64BIT = "64"
62    _PASS = 0
63    _SKIP = 1
64    _FAIL = -1
65
66    def setUpClass(self):
67        """Creates a remote shell instance, and copies data files."""
68        required_params = [
69            keys.ConfigKeys.IKEY_DATA_FILE_PATH, keys.ConfigKeys.KEY_TEST_SUITE
70        ]
71        self.getUserParams(required_params)
72
73        self.run_32bit = self.getUserParam(
74            ltp_enums.ConfigKeys.RUN_32BIT, default_value=True)
75        self.run_64bit = self.getUserParam(
76            ltp_enums.ConfigKeys.RUN_64BIT, default_value=True)
77        self.run_staging = self.getUserParam(
78            ltp_enums.ConfigKeys.RUN_STAGING, default_value=False)
79
80        logging.info("%s: %s", keys.ConfigKeys.IKEY_DATA_FILE_PATH,
81                     self.data_file_path)
82        logging.info("%s: %s", keys.ConfigKeys.KEY_TEST_SUITE, self.test_suite)
83        logging.info("%s: %s", ltp_enums.ConfigKeys.RUN_STAGING,
84                     self.run_staging),
85
86        self.number_of_threads = self.getUserParam(
87            ltp_enums.ConfigKeys.LTP_NUMBER_OF_THREADS,
88            default_value=ltp_configs.DEFAULT_NUMBER_OF_THREADS)
89        logging.info("%s: %s", ltp_enums.ConfigKeys.LTP_NUMBER_OF_THREADS,
90                     self.number_of_threads)
91
92        self._dut = self.android_devices[0]
93        logging.info("product_type: %s", self._dut.product_type)
94        self.shell = self._dut.shell
95
96        self._requirement = env_checker.EnvironmentRequirementChecker(
97            self.shell)
98        self._shell_env = shell_environment.ShellEnvironment(self.shell)
99
100        self._testcases = test_cases_parser.TestCasesParser(
101            self.data_file_path, self.filterOneTest)
102
103        self._env = {
104            ltp_enums.ShellEnvKeys.TMP: ltp_configs.TMP,
105            ltp_enums.ShellEnvKeys.TMPBASE: ltp_configs.TMPBASE,
106            ltp_enums.ShellEnvKeys.LTPTMP: ltp_configs.LTPTMP,
107            ltp_enums.ShellEnvKeys.TMPDIR: ltp_configs.TMPDIR,
108            ltp_enums.ShellEnvKeys.LTP_DEV_FS_TYPE:
109            ltp_configs.LTP_DEV_FS_TYPE,
110            ltp_enums.ShellEnvKeys.LTPROOT: ltp_configs.LTPDIR,
111            ltp_enums.ShellEnvKeys.PATH: ltp_configs.PATH
112        }
113
114    @property
115    def shell(self):
116        """returns an object that can execute a shell command"""
117        return self._shell
118
119    @shell.setter
120    def shell(self, shell):
121        """Set shell object"""
122        self._shell = shell
123
124    def PreTestSetup(self, test_bit):
125        """Setups that needs to be done before any tests."""
126        replacements = {
127            '#!/bin/sh': '#!/system/bin/sh',
128            '#! /bin/sh': '#!/system/bin/sh',
129            '#!/bin/bash': '#!/system/bin/sh',
130            '#! /bin/bash': '#!/system/bin/sh',
131            'bs=1M': 'bs=1m',
132            '/var/run': ltp_configs.TMP
133        }
134        src_host = os.path.join(self.data_file_path, 'DATA', test_bit, 'ltp')
135
136        count = 0
137        for (dirpath, dirnames, filenames) in os.walk(src_host):
138            for filename in filenames:
139                filepath = os.path.join(dirpath, filename)
140                content = ''
141                with open(filepath, 'rb') as f:
142                    content = f.read()
143                content_replaced = content
144                for key in replacements:
145                    content_replaced = content_replaced.replace(
146                        key, replacements[key])
147                if content_replaced != content:
148                    with open(filepath, 'wb') as f:
149                        f.write(content_replaced)
150                    count += 1
151        logging.info('Finished replacing script contents from %s files', count)
152
153        self._report_thread_lock = threading.Lock()
154
155    def PushFiles(self, test_bit):
156        """Push the related files to target.
157
158        Args:
159            test_bit: nativetest or nativetest64
160        """
161        src = os.path.join(self.data_file_path, 'DATA', test_bit, 'ltp', '.')
162        logging.info('Pushing files from %s to %s', src, ltp_configs.LTPDIR)
163        self.shell.Execute("mkdir %s -p" % ltp_configs.LTPDIR)
164        self._dut.adb.push(src, ltp_configs.LTPDIR)
165        logging.info('finished pushing files from %s to %s', src,
166                     ltp_configs.LTPDIR)
167
168    def GetEnvp(self):
169        """Generate the environment variable required to run the tests."""
170        return ' '.join("%s=%s" % (key, value)
171                        for key, value in self._env.items())
172
173    def tearDownClass(self):
174        """Deletes all copied data files."""
175        self.shell.Execute("rm -rf %s" % ltp_configs.LTPDIR)
176        self._requirement.Cleanup()
177
178    def Verify(self, results):
179        """Interpret the test result of each test case.
180
181        Returns:
182            tuple(int, string), a tuple of int which represents test pass, fail
183            or skip, and string representing the reason of a failed or skipped
184            test
185        """
186        if not results:
187            return (self._FAIL, "No response received. Socket timeout")
188
189        # For LTP test cases, we run one shell command for each test case
190        # So the result should also contains only one execution output
191        stdout = results[const.STDOUT][0]
192        ret_code = results[const.EXIT_CODE][0]
193        # Test case is not for the current configuration, SKIP
194        if ((ret_code == ltp_enums.TestExitCode.TCONF and
195             'TPASS' not in stdout) or
196            (ret_code == ltp_enums.TestExitCode.TPASS and 'CONF' in stdout)):
197            return (self._SKIP, "Incompatible test skipped: TCONF")
198        elif ret_code not in (ltp_enums.TestExitCode.TCONF,
199                              ltp_enums.TestExitCode.TPASS):
200            return (self._FAIL,
201                    "Got return code %s, test did not pass." % ret_code)
202        else:
203            return (self._PASS, None)
204
205    def CheckResult(self, cmd_results, result=None, note=None):
206        """Check a test result and emit exceptions if test failed or skipped.
207
208        If the shell command result is not yet interpreted, self.Verify will
209        be called to interpret the results.
210
211        Args:
212            cmd_results: dict([str],[str],[int]), command results from shell.
213            result: int, which is one of the values of _PASS, _SKIP, and _FAIL
214            note: string, reason why a test failed or get skipped
215        """
216        asserts.assertTrue(cmd_results, "No response received. Socket timeout")
217
218        logging.info("stdout: %s", cmd_results[const.STDOUT])
219        logging.info("stderr: %s", cmd_results[const.STDERR])
220        logging.info("exit_code: %s", cmd_results[const.EXIT_CODE])
221
222        if result is None:
223            result, note = self.Verify(cmd_results)
224        logging.info("verify result: %s", result)
225        logging.info("note: %s", note)
226
227        asserts.skipIf(result == self._SKIP, note)
228        asserts.assertEqual(result, self._PASS, note)
229
230    def TestNBits(self, n_bit):
231        """Runs all 32-bit or 64-bit LTP test cases.
232
233        Args:
234            n_bit: int, bitness
235        """
236        test_bit = 'nativetest'
237        if n_bit == 64:
238            test_bit += '64'
239        self.PreTestSetup(test_bit)
240        self.PushFiles(test_bit)
241
242        is_low_mem = self._dut.getProp('ro.config.low_ram').lower() == 'true'
243        if is_low_mem:
244            logging.info('Device is configured as a low RAM device.')
245
246        test_cases = list(
247            self._testcases.Load(
248                ltp_configs.LTPDIR,
249                n_bit,
250                self.test_filter,
251                run_staging=self.run_staging,
252                is_low_mem=is_low_mem))
253
254        logging.info("Checking binary exists for all test cases.")
255        self._requirement.ltp_bin_host_path = os.path.join(
256            self.data_file_path, 'DATA', test_bit, 'ltp', 'testcases', 'bin')
257        self._requirement.CheckAllTestCaseExecutables(test_cases)
258        logging.info("Start running %i individual tests." % len(test_cases))
259
260        self.RunGeneratedTestsMultiThread(
261            test_func=self.RunLtpOnce,
262            settings=test_cases,
263            args=(n_bit, ),
264            name_func=self.GetTestName)
265
266    def RunGeneratedTestsMultiThread(self, test_func, settings, args,
267                                     name_func):
268        """Run LTP tests with multi-threads.
269
270        If number_of_thread is specified to be 0 in config file, a shell query
271        will be made to the device to get the number of available CPUs. If
272        number_of_thread or number of CPUs available is 1, this function will
273        call and return parent class's regular runGeneratedTest function. Since
274        some tests may be competing resources with each other, all the failed
275        tests will be rerun sequentially in the end to confirm their failure.
276        Also, if include_filter is not empty, only 1 thread will be running.
277
278        Args:
279            test_func: The common logic shared by all these generated test
280                       cases. This function should take at least one argument,
281                       which is a parameter set.
282            settings: A list of strings representing parameter sets. These are
283                      usually json strings that get loaded in the test_func.
284            args: Iterable of additional position args to be passed to
285                  test_func.
286            name_func: A function that takes a test setting and generates a
287                       proper test name.
288
289        Returns:
290            A list of settings that fail.
291        """
292        n_workers = self.number_of_threads
293
294        if n_workers < 0:
295            logging.error('invalid setting for number of threads: < 0.')
296            n_workers = 1
297
298        # Include filter is not empty; Run in sequential.
299        if self.test_filter.include_filter:
300            n_workers = 1
301
302        # Number of thread is set to 0 (automatic)
303        if not n_workers:
304            n_workers = self._shell_env.GetDeviceNumberOfPresentCpu()
305            logging.info('Number of CPU available on device: %i', n_workers)
306
307        # Skip multithread version if only 1 worker available
308        if n_workers == 1:
309            return self.runGeneratedTests(
310                test_func=test_func,
311                settings=settings,
312                args=args,
313                name_func=name_func)
314
315        settings_multithread = []
316        settings_singlethread = []
317        for test_case in settings:
318            if (test_case.is_staging or test_case.testsuite in
319                    ltp_configs.TEST_SUITES_REQUIRE_SINGLE_THREAD_MODE):
320                settings_singlethread.append(test_case)
321            else:
322                settings_multithread.append(test_case)
323
324        failed_tests = self.runGeneratedTests(
325            test_func=test_func,
326            settings=settings_singlethread,
327            args=args,
328            name_func=name_func)
329
330        # Shuffle the tests to reduce resource competition probability
331        random.seed(RANDOM_SEED)
332        random.shuffle(settings_multithread)
333
334        # Create a queue for thread workers to pull tasks
335        q = queue.Queue()
336        map(q.put, settings_multithread)
337
338        # Create individual shell sessions for thread workers
339        for i in xrange(n_workers):
340            self._dut.shell.InvokeTerminal("shell_thread_{}".format(i))
341
342        failed_multithread_tests = set()
343        with futures.ThreadPoolExecutor(max_workers=n_workers) as executor:
344            fs = [
345                executor.submit(self.RunLtpWorker, q, args, name_func, i)
346                for i in xrange(n_workers)
347            ]
348
349            failed_test_sets = map(futures.Future.result, fs)
350            for failed_test_set in failed_test_sets:
351                for test_case in failed_test_set:
352                    failed_multithread_tests.add(test_case)
353
354        for test_case in failed_multithread_tests:
355            logging.info(
356                "Test case %s failed during multi-thread run, rerunning...",
357                test_case)
358
359        # In the end, rerun all failed tests to confirm their failure
360        # in sequential.
361        failed_tests.extend(
362            self.runGeneratedTests(
363                test_func=test_func,
364                settings=failed_multithread_tests,
365                args=args,
366                name_func=name_func))
367
368        return failed_tests
369
370    def RunLtpWorker(self, testcases, args, name_func, id):
371        """Worker thread to run a LTP test case at a time."""
372        shell = getattr(self._dut.shell, "shell_thread_{}".format(id))
373        failed_tests = set()
374
375        while True:
376            test_case = None
377            try:
378                test_case = testcases.get(block=False)
379                logging.info("Worker {} takes '{}'.".format(id, test_case))
380            except:
381                logging.info("Worker {} finished.".format(id))
382                return failed_tests
383
384            test_name = name_func(test_case, *args)
385
386            # Check whether test case is filtered out by base_test's filtering method
387            if test_case.is_filtered:
388                self.InternalResultReportMultiThread(test_name, asserts.skipIf,
389                                                     (True, test_case.note))
390                continue
391            logging.info("Worker {} starts checking requirement "
392                         "for '{}'.".format(id, test_case))
393
394            # Check test requirements
395            requirement_satisfied = self._requirement.Check(test_case)
396            if not requirement_satisfied:
397                logging.info("Worker {} reports requirement "
398                             "not satisfied for '{}'.".format(id, test_case))
399                self.InternalResultReportMultiThread(test_name, asserts.skipIf,
400                                                     (True, test_case.note))
401                continue
402
403            cmd = "export {envp} && cd {cwd} && {commands}".format(
404                envp=self.GetEnvp(), cwd=ltp_configs.LTPBINPATH,
405                commands=test_case.command)
406
407            logging.info("Worker {} starts executing command "
408                         "for '{}'.\n  Command:{}".format(id, test_case, cmd))
409            cmd_results = shell.Execute(cmd)
410
411            logging.info("Worker {} starts verifying results "
412                         "for '{}'.".format(id, test_case))
413
414            result, note = self.Verify(cmd_results)
415            if result == self._FAIL:
416                # Hide failed tests from the runner and put into rerun list
417                logging.info("Worker {} reports '{}' failed. Adding to "
418                             "sequential job queue.".format(id, test_case))
419                failed_tests.add(test_case)
420            else:
421                # Report skipped or passed tests to runner
422                self.InternalResultReportMultiThread(
423                    test_name, self.CheckResult, (cmd_results, result, note))
424
425    def InternalResultReportMultiThread(self, test_name, function, args,
426                                        **kwargs):
427        """Report a test result to runner thread safely.
428
429        Run the given function to generate result for the runner. The function
430        given should produce the same result visible to the runner but may not
431        run any actual tests.
432
433        Args:
434            test_name: string, name of a test case
435            function: the function to generate a test case result for runner
436            args: any arguments for the function
437            **kwargs: any additional keyword arguments for runner
438        """
439        self._report_thread_lock.acquire()
440        tr_record = records.TestResultRecord(test_name, self.TAG)
441        self.results.requested.append(tr_record)
442        try:
443            self.execOneTest(test_name, function, args, **kwargs)
444        except Exception as e:
445            raise e
446        finally:
447            self._report_thread_lock.release()
448
449    def GetTestName(self, test_case, n_bit):
450        "Generate the vts test name of a ltp test"
451        return "{}_{}bit".format(test_case, n_bit)
452
453    def RunLtpOnce(self, test_case, n_bit):
454        "Run one LTP test case"
455        asserts.skipIf(test_case.is_filtered, test_case.note)
456        asserts.skipIf(not self._requirement.Check(test_case), test_case.note)
457
458        cmd = "export {envp} && cd {cwd} && {commands}".format(
459            envp=self.GetEnvp(), cwd=ltp_configs.LTPBINPATH,
460            commands=test_case.command)
461        logging.info("Executing %s", cmd)
462        self.CheckResult(self.shell.Execute(cmd))
463
464    def generate64BitTests(self):
465        """Runs all 64-bit LTP test cases."""
466        if not self.run_64bit:
467            logging.info('User specified not to run 64 bit version LTP tests.')
468            return
469        if not self._dut.is64Bit:
470            logging.info('Target device does not support 64 bit tests.')
471            return
472        if self.abi_bitness != None and self.abi_bitness != '64':
473            logging.info('Skipped 64 bit tests on %s bit ABI.',
474                         self.abi_bitness)
475            return
476
477        self.TestNBits(self._64BIT)
478
479    def generate32BitTests(self):
480        """Runs all 32-bit LTP test cases."""
481        if not self.run_32bit:
482            logging.info('User specified not to run 32 bit version LTP tests.')
483            return
484        if self.abi_bitness != None and self.abi_bitness != '32':
485            logging.info('Skipped 32 bit tests on %s bit ABI.',
486                         self.abi_bitness)
487            return
488
489        self.TestNBits(self._32BIT)
490
491
492if __name__ == "__main__":
493    test_runner.main()
494