• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2#
3# Copyright (C) 2017 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#      http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17"""Release test for simpleperf prebuilts.
18
19It includes below tests:
201. Test profiling Android apps on different Android versions (starting from Android N).
212. Test simpleperf python scripts on different Hosts (linux, darwin and windows) on x86_64.
223. Test using both devices and emulators.
234. Test using both `adb root` and `adb unroot`.
24
25"""
26
27import argparse
28from dataclasses import dataclass
29import fnmatch
30import inspect
31import multiprocessing as mp
32import os
33from pathlib import Path
34import re
35import subprocess
36import sys
37import time
38from tqdm import tqdm
39import types
40from typing import List, Optional
41import unittest
42
43from simpleperf_utils import BaseArgumentParser, extant_dir, log_exit, remove, is_darwin
44
45from . api_profiler_test import *
46from . annotate_test import *
47from . app_profiler_test import *
48from . app_test import *
49from . binary_cache_builder_test import *
50from . cpp_app_test import *
51from . debug_unwind_reporter_test import *
52from . etm_stack_test import *
53from . gecko_profile_generator_test import *
54from . inferno_test import *
55from . java_app_test import *
56from . kotlin_app_test import *
57from . pprof_proto_generator_test import *
58from . purgatorio_test import *
59from . report_html_test import *
60from . report_lib_test import *
61from . report_sample_test import *
62from . run_simpleperf_on_device_test import *
63from . sample_filter_test import *
64from . stackcollapse_test import *
65from . tools_test import *
66from . test_utils import TestHelper
67
68
69def get_args() -> argparse.Namespace:
70    parser = BaseArgumentParser(description=__doc__)
71    parser.add_argument('--browser', action='store_true', help='open report html file in browser.')
72    parser.add_argument(
73        '-d', '--device', nargs='+',
74        help='set devices used to run tests. Each device in format name:serial-number')
75    parser.add_argument('--only-host-test', action='store_true', help='Only run host tests')
76    parser.add_argument('--list-tests', action='store_true', help='List tests')
77    parser.add_argument('--ndk-path', type=extant_dir, help='Set the path of a ndk release')
78    parser.add_argument('-p', '--pattern', nargs='+',
79                        help='Run tests matching the selected pattern.')
80    parser.add_argument('-r', '--repeat', type=int, default=1, help='times to repeat tests')
81    parser.add_argument('--test-from', help='Run tests following the selected test.')
82    parser.add_argument('--test-dir', default='test_dir', help='Directory to store test results')
83    return parser.parse_args()
84
85
86def get_all_tests() -> List[str]:
87    tests = []
88    for name, value in globals().items():
89        if isinstance(value, type) and issubclass(value, unittest.TestCase):
90            for member_name, member in inspect.getmembers(value):
91                if isinstance(member, (types.MethodType, types.FunctionType)):
92                    if member_name.startswith('test'):
93                        tests.append(name + '.' + member_name)
94    return sorted(tests)
95
96
97def get_host_tests() -> List[str]:
98    def filter_fn(test: str) -> bool:
99        return get_test_type(test) == 'host_test'
100    return list(filter(filter_fn, get_all_tests()))
101
102
103def get_filtered_tests(
104        tests: List[str],
105        test_from: Optional[str],
106        test_pattern: Optional[List[str]]) -> List[str]:
107    if test_from:
108        try:
109            tests = tests[tests.index(test_from):]
110        except ValueError:
111            log_exit("Can't find test %s" % test_from)
112    if test_pattern:
113        patterns = [re.compile(fnmatch.translate(x)) for x in test_pattern]
114        tests = [t for t in tests if any(pattern.match(t) for pattern in patterns)]
115        if not tests:
116            log_exit('No tests are matched.')
117    return tests
118
119
120def get_test_type(test: str) -> Optional[str]:
121    testcase_name, test_name = test.split('.')
122    if test_name == 'test_run_simpleperf_without_usb_connection':
123        return 'device_serialized_test'
124    if testcase_name in (
125        'TestApiProfiler', 'TestNativeProfiling', 'TestNativeLibDownloader',
126            'TestRecordingRealApps', 'TestRunSimpleperfOnDevice'):
127        return 'device_test'
128    if testcase_name.startswith('TestExample'):
129        return 'device_test'
130    if testcase_name in ('TestAnnotate',
131                         'TestBinaryCacheBuilder',
132                         'TestDebugUnwindReporter',
133                         'TestEtmStacker',
134                         'TestInferno',
135                         'TestPprofProtoGenerator',
136                         'TestProtoFileReportLib',
137                         'TestPurgatorio',
138                         'TestReportHtml',
139                         'TestReportLib',
140                         'TestReportSample',
141                         'TestSampleFilter',
142                         'TestStackCollapse',
143                         'TestTools',
144                         'TestGeckoProfileGenerator'):
145        return 'host_test'
146    return None
147
148
149def build_testdata(testdata_dir: Path):
150    """ Collect testdata in testdata_dir.
151        In system/extras/simpleperf/scripts, testdata comes from:
152            <script_dir>/../testdata, <script_dir>/test/script_testdata, <script_dir>/../demo
153        In prebuilts/simpleperf, testdata comes from:
154            <script_dir>/test/testdata
155    """
156    testdata_dir.mkdir()
157
158    script_test_dir = Path(__file__).resolve().parent
159    script_dir = script_test_dir.parent
160
161    source_dirs = [
162        script_test_dir / 'script_testdata',
163        script_test_dir / 'testdata',
164        script_dir.parent / 'testdata',
165        script_dir.parent / 'demo',
166        script_dir.parent / 'runtest',
167    ]
168
169    for source_dir in source_dirs:
170        if not source_dir.is_dir():
171            continue
172        for src_path in source_dir.iterdir():
173            dest_path = testdata_dir / src_path.name
174            if dest_path.exists():
175                continue
176            if src_path.is_file():
177                shutil.copyfile(src_path, dest_path)
178            elif src_path.is_dir():
179                shutil.copytree(src_path, dest_path)
180
181
182def run_tests(tests: List[str]) -> bool:
183    argv = [sys.argv[0]] + tests
184    test_runner = unittest.TextTestRunner(stream=TestHelper.log_fh, verbosity=0)
185    test_program = unittest.main(argv=argv, testRunner=test_runner,
186                                 exit=False, verbosity=0, module='test.do_test')
187    return test_program.result.wasSuccessful()
188
189
190def test_process_entry(tests: List[str], test_options: List[str], conn: mp.connection.Connection):
191    parser = argparse.ArgumentParser()
192    parser.add_argument('--browser', action='store_true')
193    parser.add_argument('--device', help='android device serial number')
194    parser.add_argument('--ndk-path', type=extant_dir)
195    parser.add_argument('--testdata-dir', type=extant_dir)
196    parser.add_argument('--test-dir', help='directory to store test results')
197    args = parser.parse_args(test_options)
198
199    TestHelper.init(args.test_dir, args.testdata_dir,
200                    args.browser, args.ndk_path, args.device, conn)
201    run_tests(tests)
202
203
204@dataclass
205class Device:
206    name: str
207    serial_number: str
208
209
210@dataclass
211class TestResult:
212    try_time: int
213    status: str
214    duration: str
215
216    def __str__(self) -> str:
217        s = self.status
218        if s == 'FAILED':
219            s += f' (at try_time {self.try_time})'
220        s += f' {self.duration}'
221        return s
222
223
224class TestProcess:
225    """ Create a test process to run selected tests on a device. """
226
227    TEST_MAX_TRY_TIME = 10
228    TEST_TIMEOUT_IN_SEC = 10 * 60
229
230    def __init__(
231            self, test_type: str, tests: List[str],
232            device: Optional[Device],
233            repeat_index: int,
234            test_options: List[str]):
235        self.test_type = test_type
236        self.tests = tests
237        self.device = device
238        self.repeat_index = repeat_index
239        self.test_options = test_options
240        self.try_time = 1
241        self.test_results: Dict[str, TestResult] = {}
242        self.parent_conn: Optional[mp.connection.Connection] = None
243        self.proc: Optional[mp.Process] = None
244        self.last_update_time = 0.0
245        self._start_test_process()
246
247    def _start_test_process(self):
248        unfinished_tests = [test for test in self.tests if test not in self.test_results]
249        self.parent_conn, child_conn = mp.Pipe(duplex=False)
250        test_options = self.test_options[:]
251        test_options += ['--test-dir', str(self.test_dir)]
252        if self.device:
253            test_options += ['--device', self.device.serial_number]
254        self.proc = mp.Process(target=test_process_entry, args=(
255            unfinished_tests, test_options, child_conn))
256        self.proc.start()
257        self.last_update_time = time.time()
258
259    @property
260    def name(self) -> str:
261        name = self.test_type
262        if self.device:
263            name += '_' + self.device.name
264        name += '_repeat_%d' % self.repeat_index
265        return name
266
267    @property
268    def test_dir(self) -> Path:
269        """ Directory to run the tests. """
270        return Path.cwd() / (self.name + '_try_%d' % self.try_time)
271
272    @property
273    def alive(self) -> bool:
274        """ Return if the test process is alive. """
275        return self.proc.is_alive()
276
277    @property
278    def finished(self) -> bool:
279        """ Return if all tests are finished. """
280        return len(self.test_results) == len(self.tests)
281
282    def check_update(self):
283        """ Check if there is any test update. """
284        try:
285            while self.parent_conn.poll():
286                msg = self.parent_conn.recv()
287                self._process_msg(msg)
288                self.last_update_time = time.time()
289        except (EOFError, BrokenPipeError) as e:
290            pass
291        if time.time() - self.last_update_time > TestProcess.TEST_TIMEOUT_IN_SEC:
292            self.proc.terminate()
293
294    def _process_msg(self, msg: str):
295        test_name, test_success, test_duration = msg.split()
296        self.test_results[test_name] = TestResult(self.try_time, test_success, test_duration)
297
298    def join(self):
299        self.proc.join()
300
301    def restart(self) -> bool:
302        """ Create a new test process to run unfinished tests. """
303        if self.finished:
304            return False
305        if self.try_time == self.TEST_MAX_TRY_TIME:
306            """ Exceed max try time. So mark left tests as failed. """
307            for test in self.tests:
308                if test not in self.test_results:
309                    test_duration = '%.3fs' % (time.time() - self.last_update_time)
310                    self.test_results[test] = TestResult(self.try_time, 'FAILED', test_duration)
311            return False
312
313        self.try_time += 1
314        self._start_test_process()
315        return True
316
317
318class ProgressBar:
319    def __init__(self, total_count: int):
320        self.total_bar = tqdm(
321            total=total_count, desc='test progress', ascii=' ##',
322            bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}, {rate_fmt}", position=0)
323        self.test_process_bars: Dict[str, tqdm] = {}
324
325    def update(self, test_proc: TestProcess):
326        if test_proc.name not in self.test_process_bars:
327            bar = tqdm(total=len(test_proc.tests),
328                       desc=test_proc.name, ascii=' ##',
329                       bar_format="{l_bar}{bar} | {n_fmt}/{total_fmt} [{elapsed}]")
330            self.test_process_bars[test_proc.name] = bar
331        else:
332            bar = self.test_process_bars[test_proc.name]
333
334        add = len(test_proc.test_results) - bar.n
335        if add:
336            bar.update(add)
337            self.total_bar.update(add)
338
339    def end_test_proc(self, test_proc: TestProcess):
340        if test_proc.name in self.test_process_bars:
341            self.test_process_bars[test_proc.name].close()
342            del self.test_process_bars[test_proc.name]
343
344    def end_tests(self):
345        for bar in self.test_process_bars.values():
346            bar.close()
347        self.total_bar.close()
348
349
350class TestSummary:
351    def __init__(
352            self, devices: List[Device],
353            device_tests: List[str],
354            repeat_count: int, host_tests: List[str]):
355        self.results: Dict[Tuple[str, str], Optional[TestResult]] = {}
356        for test in device_tests:
357            for device in devices:
358                for repeat_index in range(1, repeat_count + 1):
359                    self.results[(test, '%s_repeat_%d' % (device.name, repeat_index))] = None
360        for test in host_tests:
361            self.results[(test, 'host')] = None
362        self.write_summary()
363
364    @property
365    def test_count(self) -> int:
366        return len(self.results)
367
368    @property
369    def failed_test_count(self) -> int:
370        count = 0
371        for result in self.results.values():
372            if result is None or result.status == 'FAILED':
373                count += 1
374        return count
375
376    def update(self, test_proc: TestProcess):
377        if test_proc.device:
378            test_env = '%s_repeat_%d' % (test_proc.device.name, test_proc.repeat_index)
379        else:
380            test_env = 'host'
381        has_update = False
382        for test, result in test_proc.test_results.items():
383            key = (test, test_env)
384            if self.results[key] != result:
385                self.results[key] = result
386                has_update = True
387        if has_update:
388            self.write_summary()
389
390    def write_summary(self):
391        with open('test_summary.txt', 'w') as fh, \
392                open('failed_test_summary.txt', 'w') as failed_fh:
393            for key in sorted(self.results.keys()):
394                test_name, test_env = key
395                result = self.results[key]
396                message = f'{test_name}    {test_env}    {result}'
397                print(message, file=fh)
398                if not result or result.status == 'FAILED':
399                    print(message, file=failed_fh)
400
401
402class TestManager:
403    """ Create test processes, monitor their status and log test progresses. """
404
405    def __init__(self, args: argparse.Namespace):
406        self.repeat_count = args.repeat
407        self.test_options = self._build_test_options(args)
408        self.devices = self._build_test_devices(args)
409        self.progress_bar: Optional[ProgressBar] = None
410        self.test_summary: Optional[TestSummary] = None
411
412    def _build_test_devices(self, args: argparse.Namespace) -> List[Device]:
413        devices = []
414        if args.device:
415            for s in args.device:
416                name, serial_number = s.split(':', 1)
417                devices.append(Device(name, serial_number))
418        else:
419            devices.append(Device('default', ''))
420        return devices
421
422    def _build_test_options(self, args: argparse.Namespace) -> List[str]:
423        test_options: List[str] = []
424        if args.browser:
425            test_options.append('--browser')
426        if args.ndk_path:
427            test_options += ['--ndk-path', args.ndk_path]
428        testdata_dir = Path('testdata').resolve()
429        test_options += ['--testdata-dir', str(testdata_dir)]
430        return test_options
431
432    def run_all_tests(self, tests: List[str]):
433        device_tests = []
434        device_serialized_tests = []
435        host_tests = []
436        for test in tests:
437            test_type = get_test_type(test)
438            assert test_type, f'No test type for test {test}'
439            if test_type == 'device_test':
440                device_tests.append(test)
441            if test_type == 'device_serialized_test':
442                device_serialized_tests.append(test)
443            if test_type == 'host_test':
444                host_tests.append(test)
445        total_test_count = (len(device_tests) + len(device_serialized_tests)
446                            ) * len(self.devices) * self.repeat_count + len(host_tests)
447        self.progress_bar = ProgressBar(total_test_count)
448        self.test_summary = TestSummary(self.devices, device_tests + device_serialized_tests,
449                                        self.repeat_count, host_tests)
450        if device_tests:
451            self.run_device_tests(device_tests)
452        if device_serialized_tests:
453            self.run_device_serialized_tests(device_serialized_tests)
454        if host_tests:
455            self.run_host_tests(host_tests)
456        self.progress_bar.end_tests()
457        self.progress_bar = None
458
459    def run_device_tests(self, tests: List[str]):
460        """ Tests can run in parallel on different devices. """
461        test_procs: List[TestProcess] = []
462        for device in self.devices:
463            test_procs.append(TestProcess('device_test', tests, device, 1, self.test_options))
464        self.wait_for_test_results(test_procs, self.repeat_count)
465
466    def run_device_serialized_tests(self, tests: List[str]):
467        """ Tests run on each device in order. """
468        for device in self.devices:
469            test_proc = TestProcess('device_serialized_test', tests, device, 1, self.test_options)
470            self.wait_for_test_results([test_proc], self.repeat_count)
471
472    def run_host_tests(self, tests: List[str]):
473        """ Tests run only once on host. """
474        test_proc = TestProcess('host_tests', tests, None, 1, self.test_options)
475        self.wait_for_test_results([test_proc], 1)
476
477    def wait_for_test_results(self, test_procs: List[TestProcess], repeat_count: int):
478        test_count = sum(len(test_proc.tests) for test_proc in test_procs)
479        while test_procs:
480            dead_procs: List[TestProcess] = []
481            # Check update.
482            for test_proc in test_procs:
483                if not test_proc.alive:
484                    dead_procs.append(test_proc)
485                test_proc.check_update()
486                self.progress_bar.update(test_proc)
487                self.test_summary.update(test_proc)
488
489            # Process dead procs.
490            for test_proc in dead_procs:
491                test_proc.join()
492                if not test_proc.finished:
493                    if test_proc.restart():
494                        continue
495                    else:
496                        self.progress_bar.update(test_proc)
497                        self.test_summary.update(test_proc)
498                self.progress_bar.end_test_proc(test_proc)
499                test_procs.remove(test_proc)
500                if test_proc.repeat_index < repeat_count:
501                    test_procs.append(
502                        TestProcess(test_proc.test_type, test_proc.tests, test_proc.device,
503                                    test_proc.repeat_index + 1, test_proc.test_options))
504            time.sleep(0.1)
505        return True
506
507
508def run_tests_in_child_process(tests: List[str], args: argparse.Namespace) -> bool:
509    """ run tests in child processes, read test results through a pipe. """
510    mp.set_start_method('spawn')  # to be consistent on darwin, linux, windows
511    test_manager = TestManager(args)
512    test_manager.run_all_tests(tests)
513
514    total_test_count = test_manager.test_summary.test_count
515    failed_test_count = test_manager.test_summary.failed_test_count
516    if failed_test_count == 0:
517        print('All tests passed!')
518        return True
519    print('%d of %d tests failed. See %s/failed_test_summary.txt for details.' %
520          (failed_test_count, total_test_count, args.test_dir))
521    return False
522
523
524def main() -> bool:
525    args = get_args()
526    tests = get_host_tests() if args.only_host_test else get_all_tests()
527    tests = get_filtered_tests(tests, args.test_from, args.pattern)
528
529    if args.list_tests:
530        print('\n'.join(tests))
531        return True
532
533    test_dir = Path(args.test_dir).resolve()
534    remove(test_dir)
535    test_dir.mkdir(parents=True)
536    # Switch to the test dir.
537    os.chdir(test_dir)
538    build_testdata(Path('testdata'))
539    return run_tests_in_child_process(tests, args)
540