• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2#
3# Copyright (C) 2017 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#      http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17"""Release test for simpleperf prebuilts.
18
19It includes below tests:
201. Test profiling Android apps on different Android versions (starting from Android N).
212. Test simpleperf python scripts on different Hosts (linux, darwin and windows) on x86_64.
223. Test using both devices and emulators.
234. Test using both `adb root` and `adb unroot`.
24
25"""
26
27import argparse
28from dataclasses import dataclass
29import fnmatch
30import inspect
31import multiprocessing as mp
32import os
33from pathlib import Path
34import re
35import subprocess
36import sys
37import time
38from tqdm import tqdm
39import types
40from typing import List, Optional
41import unittest
42
43from simpleperf_utils import BaseArgumentParser, extant_dir, log_exit, remove, is_darwin
44
45from . api_profiler_test import *
46from . annotate_test import *
47from . app_profiler_test import *
48from . app_test import *
49from . binary_cache_builder_test import *
50from . cpp_app_test import *
51from . debug_unwind_reporter_test import *
52from . gecko_profile_generator_test import *
53from . inferno_test import *
54from . java_app_test import *
55from . kotlin_app_test import *
56from . pprof_proto_generator_test import *
57from . purgatorio_test import *
58from . report_html_test import *
59from . report_lib_test import *
60from . report_sample_test import *
61from . run_simpleperf_on_device_test import *
62from . stackcollapse_test import *
63from . tools_test import *
64from . test_utils import TestHelper
65
66
67def get_args() -> argparse.Namespace:
68    parser = BaseArgumentParser(description=__doc__)
69    parser.add_argument('--browser', action='store_true', help='open report html file in browser.')
70    parser.add_argument(
71        '-d', '--device', nargs='+',
72        help='set devices used to run tests. Each device in format name:serial-number')
73    parser.add_argument('--only-host-test', action='store_true', help='Only run host tests')
74    parser.add_argument('--list-tests', action='store_true', help='List tests')
75    parser.add_argument('--ndk-path', type=extant_dir, help='Set the path of a ndk release')
76    parser.add_argument('-p', '--pattern', nargs='+',
77                        help='Run tests matching the selected pattern.')
78    parser.add_argument('-r', '--repeat', type=int, default=1, help='times to repeat tests')
79    parser.add_argument('--test-from', help='Run tests following the selected test.')
80    parser.add_argument('--test-dir', default='test_dir', help='Directory to store test results')
81    return parser.parse_args()
82
83
84def get_all_tests() -> List[str]:
85    tests = []
86    for name, value in globals().items():
87        if isinstance(value, type) and issubclass(value, unittest.TestCase):
88            for member_name, member in inspect.getmembers(value):
89                if isinstance(member, (types.MethodType, types.FunctionType)):
90                    if member_name.startswith('test'):
91                        tests.append(name + '.' + member_name)
92    return sorted(tests)
93
94
95def get_host_tests() -> List[str]:
96    def filter_fn(test: str) -> bool:
97        return get_test_type(test) == 'host_test'
98    return list(filter(filter_fn, get_all_tests()))
99
100
101def get_filtered_tests(
102        tests: List[str],
103        test_from: Optional[str],
104        test_pattern: Optional[List[str]]) -> List[str]:
105    if test_from:
106        try:
107            tests = tests[tests.index(test_from):]
108        except ValueError:
109            log_exit("Can't find test %s" % test_from)
110    if test_pattern:
111        patterns = [re.compile(fnmatch.translate(x)) for x in test_pattern]
112        tests = [t for t in tests if any(pattern.match(t) for pattern in patterns)]
113        if not tests:
114            log_exit('No tests are matched.')
115    return tests
116
117
118def get_test_type(test: str) -> Optional[str]:
119    testcase_name, test_name = test.split('.')
120    if test_name == 'test_run_simpleperf_without_usb_connection':
121        return 'device_serialized_test'
122    if testcase_name in (
123        'TestApiProfiler', 'TestNativeProfiling', 'TestNativeLibDownloader',
124            'TestRecordingRealApps', 'TestRunSimpleperfOnDevice'):
125        return 'device_test'
126    if testcase_name.startswith('TestExample'):
127        return 'device_test'
128    if testcase_name in ('TestAnnotate',
129                         'TestBinaryCacheBuilder',
130                         'TestDebugUnwindReporter',
131                         'TestInferno',
132                         'TestPprofProtoGenerator',
133                         'TestPurgatorio',
134                         'TestReportHtml',
135                         'TestReportLib',
136                         'TestReportSample',
137                         'TestStackCollapse',
138                         'TestTools',
139                         'TestGeckoProfileGenerator'):
140        return 'host_test'
141    return None
142
143
144def build_testdata(testdata_dir: Path):
145    """ Collect testdata in testdata_dir.
146        In system/extras/simpleperf/scripts, testdata comes from:
147            <script_dir>/../testdata, <script_dir>/test/script_testdata, <script_dir>/../demo
148        In prebuilts/simpleperf, testdata comes from:
149            <script_dir>/test/testdata
150    """
151    testdata_dir.mkdir()
152
153    script_test_dir = Path(__file__).resolve().parent
154    script_dir = script_test_dir.parent
155
156    source_dirs = [
157        script_test_dir / 'script_testdata',
158        script_test_dir / 'testdata',
159        script_dir.parent / 'testdata',
160        script_dir.parent / 'demo',
161        script_dir.parent / 'runtest',
162    ]
163
164    for source_dir in source_dirs:
165        if not source_dir.is_dir():
166            continue
167        for src_path in source_dir.iterdir():
168            dest_path = testdata_dir / src_path.name
169            if dest_path.exists():
170                continue
171            if src_path.is_file():
172                shutil.copyfile(src_path, dest_path)
173            elif src_path.is_dir():
174                shutil.copytree(src_path, dest_path)
175
176
177def run_tests(tests: List[str]) -> bool:
178    argv = [sys.argv[0]] + tests
179    test_runner = unittest.TextTestRunner(stream=TestHelper.log_fh, verbosity=0)
180    test_program = unittest.main(argv=argv, testRunner=test_runner,
181                                 exit=False, verbosity=0, module='test.do_test')
182    return test_program.result.wasSuccessful()
183
184
185def test_process_entry(tests: List[str], test_options: List[str], conn: mp.connection.Connection):
186    parser = argparse.ArgumentParser()
187    parser.add_argument('--browser', action='store_true')
188    parser.add_argument('--device', help='android device serial number')
189    parser.add_argument('--ndk-path', type=extant_dir)
190    parser.add_argument('--testdata-dir', type=extant_dir)
191    parser.add_argument('--test-dir', help='directory to store test results')
192    args = parser.parse_args(test_options)
193
194    TestHelper.init(args.test_dir, args.testdata_dir,
195                    args.browser, args.ndk_path, args.device, conn)
196    run_tests(tests)
197
198
199@dataclass
200class Device:
201    name: str
202    serial_number: str
203
204
205@dataclass
206class TestResult:
207    try_time: int
208    status: str
209    duration: str
210
211    def __str__(self) -> str:
212        s = self.status
213        if s == 'FAILED':
214            s += f' (at try_time {self.try_time})'
215        s += f' {self.duration}'
216        return s
217
218
219class TestProcess:
220    """ Create a test process to run selected tests on a device. """
221
222    TEST_MAX_TRY_TIME = 10
223    TEST_TIMEOUT_IN_SEC = 10 * 60
224
225    def __init__(
226            self, test_type: str, tests: List[str],
227            device: Optional[Device],
228            repeat_index: int,
229            test_options: List[str]):
230        self.test_type = test_type
231        self.tests = tests
232        self.device = device
233        self.repeat_index = repeat_index
234        self.test_options = test_options
235        self.try_time = 1
236        self.test_results: Dict[str, TestResult] = {}
237        self.parent_conn: Optional[mp.connection.Connection] = None
238        self.proc: Optional[mp.Process] = None
239        self.last_update_time = 0.0
240        self._start_test_process()
241
242    def _start_test_process(self):
243        unfinished_tests = [test for test in self.tests if test not in self.test_results]
244        self.parent_conn, child_conn = mp.Pipe(duplex=False)
245        test_options = self.test_options[:]
246        test_options += ['--test-dir', str(self.test_dir)]
247        if self.device:
248            test_options += ['--device', self.device.serial_number]
249        self.proc = mp.Process(target=test_process_entry, args=(
250            unfinished_tests, test_options, child_conn))
251        self.proc.start()
252        self.last_update_time = time.time()
253
254    @property
255    def name(self) -> str:
256        name = self.test_type
257        if self.device:
258            name += '_' + self.device.name
259        name += '_repeat_%d' % self.repeat_index
260        return name
261
262    @property
263    def test_dir(self) -> Path:
264        """ Directory to run the tests. """
265        return Path.cwd() / (self.name + '_try_%d' % self.try_time)
266
267    @property
268    def alive(self) -> bool:
269        """ Return if the test process is alive. """
270        return self.proc.is_alive()
271
272    @property
273    def finished(self) -> bool:
274        """ Return if all tests are finished. """
275        return len(self.test_results) == len(self.tests)
276
277    def check_update(self):
278        """ Check if there is any test update. """
279        try:
280            while self.parent_conn.poll():
281                msg = self.parent_conn.recv()
282                self._process_msg(msg)
283                self.last_update_time = time.time()
284        except (EOFError, BrokenPipeError) as e:
285            pass
286        if time.time() - self.last_update_time > TestProcess.TEST_TIMEOUT_IN_SEC:
287            self.proc.terminate()
288
289    def _process_msg(self, msg: str):
290        test_name, test_success, test_duration = msg.split()
291        self.test_results[test_name] = TestResult(self.try_time, test_success, test_duration)
292
293    def join(self):
294        self.proc.join()
295
296    def restart(self) -> bool:
297        """ Create a new test process to run unfinished tests. """
298        if self.finished:
299            return False
300        if self.try_time == self.TEST_MAX_TRY_TIME:
301            """ Exceed max try time. So mark left tests as failed. """
302            for test in self.tests:
303                if test not in self.test_results:
304                    test_duration = '%.3fs' % (time.time() - self.last_update_time)
305                    self.test_results[test] = TestResult(self.try_time, 'FAILED', test_duration)
306            return False
307
308        self.try_time += 1
309        self._start_test_process()
310        return True
311
312
313class ProgressBar:
314    def __init__(self, total_count: int):
315        self.total_bar = tqdm(
316            total=total_count, desc='test progress', ascii=' ##',
317            bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}, {rate_fmt}", position=0)
318        self.test_process_bars: Dict[str, tqdm] = {}
319
320    def update(self, test_proc: TestProcess):
321        if test_proc.name not in self.test_process_bars:
322            bar = tqdm(total=len(test_proc.tests),
323                       desc=test_proc.name, ascii=' ##',
324                       bar_format="{l_bar}{bar} | {n_fmt}/{total_fmt} [{elapsed}]")
325            self.test_process_bars[test_proc.name] = bar
326        else:
327            bar = self.test_process_bars[test_proc.name]
328
329        add = len(test_proc.test_results) - bar.n
330        if add:
331            bar.update(add)
332            self.total_bar.update(add)
333
334    def end_test_proc(self, test_proc: TestProcess):
335        if test_proc.name in self.test_process_bars:
336            self.test_process_bars[test_proc.name].close()
337            del self.test_process_bars[test_proc.name]
338
339    def end_tests(self):
340        for bar in self.test_process_bars.values():
341            bar.close()
342        self.total_bar.close()
343
344
345class TestSummary:
346    def __init__(
347            self, devices: List[Device],
348            device_tests: List[str],
349            repeat_count: int, host_tests: List[str]):
350        self.results: Dict[Tuple[str, str], Optional[TestResult]] = {}
351        for test in device_tests:
352            for device in devices:
353                for repeat_index in range(1, repeat_count + 1):
354                    self.results[(test, '%s_repeat_%d' % (device.name, repeat_index))] = None
355        for test in host_tests:
356            self.results[(test, 'host')] = None
357        self.write_summary()
358
359    @property
360    def test_count(self) -> int:
361        return len(self.results)
362
363    @property
364    def failed_test_count(self) -> int:
365        count = 0
366        for result in self.results.values():
367            if result is None or result.status == 'FAILED':
368                count += 1
369        return count
370
371    def update(self, test_proc: TestProcess):
372        if test_proc.device:
373            test_env = '%s_repeat_%d' % (test_proc.device.name, test_proc.repeat_index)
374        else:
375            test_env = 'host'
376        has_update = False
377        for test, result in test_proc.test_results.items():
378            key = (test, test_env)
379            if self.results[key] != result:
380                self.results[key] = result
381                has_update = True
382        if has_update:
383            self.write_summary()
384
385    def write_summary(self):
386        with open('test_summary.txt', 'w') as fh, \
387                open('failed_test_summary.txt', 'w') as failed_fh:
388            for key in sorted(self.results.keys()):
389                test_name, test_env = key
390                result = self.results[key]
391                message = f'{test_name}    {test_env}    {result}'
392                print(message, file=fh)
393                if not result or result.status == 'FAILED':
394                    print(message, file=failed_fh)
395
396
397class TestManager:
398    """ Create test processes, monitor their status and log test progresses. """
399
400    def __init__(self, args: argparse.Namespace):
401        self.repeat_count = args.repeat
402        self.test_options = self._build_test_options(args)
403        self.devices = self._build_test_devices(args)
404        self.progress_bar: Optional[ProgressBar] = None
405        self.test_summary: Optional[TestSummary] = None
406
407    def _build_test_devices(self, args: argparse.Namespace) -> List[Device]:
408        devices = []
409        if args.device:
410            for s in args.device:
411                name, serial_number = s.split(':', 1)
412                devices.append(Device(name, serial_number))
413        else:
414            devices.append(Device('default', ''))
415        return devices
416
417    def _build_test_options(self, args: argparse.Namespace) -> List[str]:
418        test_options: List[str] = []
419        if args.browser:
420            test_options.append('--browser')
421        if args.ndk_path:
422            test_options += ['--ndk-path', args.ndk_path]
423        testdata_dir = Path('testdata').resolve()
424        test_options += ['--testdata-dir', str(testdata_dir)]
425        return test_options
426
427    def run_all_tests(self, tests: List[str]):
428        device_tests = []
429        device_serialized_tests = []
430        host_tests = []
431        for test in tests:
432            test_type = get_test_type(test)
433            assert test_type, f'No test type for test {test}'
434            if test_type == 'device_test':
435                device_tests.append(test)
436            if test_type == 'device_serialized_test':
437                device_serialized_tests.append(test)
438            if test_type == 'host_test':
439                host_tests.append(test)
440        total_test_count = (len(device_tests) + len(device_serialized_tests)
441                            ) * len(self.devices) * self.repeat_count + len(host_tests)
442        self.progress_bar = ProgressBar(total_test_count)
443        self.test_summary = TestSummary(self.devices, device_tests + device_serialized_tests,
444                                        self.repeat_count, host_tests)
445        if device_tests:
446            self.run_device_tests(device_tests)
447        if device_serialized_tests:
448            self.run_device_serialized_tests(device_serialized_tests)
449        if host_tests:
450            self.run_host_tests(host_tests)
451        self.progress_bar.end_tests()
452        self.progress_bar = None
453
454    def run_device_tests(self, tests: List[str]):
455        """ Tests can run in parallel on different devices. """
456        test_procs: List[TestProcess] = []
457        for device in self.devices:
458            test_procs.append(TestProcess('device_test', tests, device, 1, self.test_options))
459        self.wait_for_test_results(test_procs, self.repeat_count)
460
461    def run_device_serialized_tests(self, tests: List[str]):
462        """ Tests run on each device in order. """
463        for device in self.devices:
464            test_proc = TestProcess('device_serialized_test', tests, device, 1, self.test_options)
465            self.wait_for_test_results([test_proc], self.repeat_count)
466
467    def run_host_tests(self, tests: List[str]):
468        """ Tests run only once on host. """
469        test_proc = TestProcess('host_tests', tests, None, 1, self.test_options)
470        self.wait_for_test_results([test_proc], 1)
471
472    def wait_for_test_results(self, test_procs: List[TestProcess], repeat_count: int):
473        test_count = sum(len(test_proc.tests) for test_proc in test_procs)
474        while test_procs:
475            dead_procs: List[TestProcess] = []
476            # Check update.
477            for test_proc in test_procs:
478                if not test_proc.alive:
479                    dead_procs.append(test_proc)
480                test_proc.check_update()
481                self.progress_bar.update(test_proc)
482                self.test_summary.update(test_proc)
483
484            # Process dead procs.
485            for test_proc in dead_procs:
486                test_proc.join()
487                if not test_proc.finished:
488                    if test_proc.restart():
489                        continue
490                    else:
491                        self.progress_bar.update(test_proc)
492                        self.test_summary.update(test_proc)
493                self.progress_bar.end_test_proc(test_proc)
494                test_procs.remove(test_proc)
495                if test_proc.repeat_index < repeat_count:
496                    test_procs.append(
497                        TestProcess(test_proc.test_type, test_proc.tests, test_proc.device,
498                                    test_proc.repeat_index + 1, test_proc.test_options))
499            time.sleep(0.1)
500        return True
501
502
503def run_tests_in_child_process(tests: List[str], args: argparse.Namespace) -> bool:
504    """ run tests in child processes, read test results through a pipe. """
505    mp.set_start_method('spawn')  # to be consistent on darwin, linux, windows
506    test_manager = TestManager(args)
507    test_manager.run_all_tests(tests)
508
509    total_test_count = test_manager.test_summary.test_count
510    failed_test_count = test_manager.test_summary.failed_test_count
511    if failed_test_count == 0:
512        print('All tests passed!')
513        return True
514    print('%d of %d tests failed. See %s/failed_test_summary.txt for details.' %
515          (failed_test_count, total_test_count, args.test_dir))
516    return False
517
518
519def main() -> bool:
520    args = get_args()
521    tests = get_host_tests() if args.only_host_test else get_all_tests()
522    tests = get_filtered_tests(tests, args.test_from, args.pattern)
523
524    if args.list_tests:
525        print('\n'.join(tests))
526        return True
527
528    test_dir = Path(args.test_dir).resolve()
529    remove(test_dir)
530    test_dir.mkdir(parents=True)
531    # Switch to the test dir.
532    os.chdir(test_dir)
533    build_testdata(Path('testdata'))
534    return run_tests_in_child_process(tests, args)
535