• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2#
3# Copyright (C) 2017 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#      http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16#
17"""Release test for simpleperf prebuilts.
18
19It includes below tests:
201. Test profiling Android apps on different Android versions (starting from Android N).
212. Test simpleperf python scripts on different Hosts (linux, darwin and windows) on x86_64.
223. Test using both devices and emulators.
234. Test using both `adb root` and `adb unroot`.
24
25"""
26
27import argparse
28from dataclasses import dataclass
29import fnmatch
30import inspect
31import multiprocessing as mp
32import os
33from pathlib import Path
34import re
35import subprocess
36import sys
37import time
38from tqdm import tqdm
39import types
40from typing import List, Optional
41import unittest
42
43from simpleperf_utils import BaseArgumentParser, extant_dir, log_exit, remove, is_darwin
44
45from . api_profiler_test import *
46from . annotate_test import *
47from . app_profiler_test import *
48from . app_test import *
49from . binary_cache_builder_test import *
50from . cpp_app_test import *
51from . debug_unwind_reporter_test import *
52from . gecko_profile_generator_test import *
53from . inferno_test import *
54from . java_app_test import *
55from . kotlin_app_test import *
56from . pprof_proto_generator_test import *
57from . purgatorio_test import *
58from . report_html_test import *
59from . report_lib_test import *
60from . report_sample_test import *
61from . run_simpleperf_on_device_test import *
62from . stackcollapse_test import *
63from . tools_test import *
64from . test_utils import TestHelper
65
66
67def get_args() -> argparse.Namespace:
68    parser = BaseArgumentParser(description=__doc__)
69    parser.add_argument('--browser', action='store_true', help='open report html file in browser.')
70    parser.add_argument(
71        '-d', '--device', nargs='+',
72        help='set devices used to run tests. Each device in format name:serial-number')
73    parser.add_argument('--only-host-test', action='store_true', help='Only run host tests')
74    parser.add_argument('--list-tests', action='store_true', help='List tests')
75    parser.add_argument('--ndk-path', type=extant_dir, help='Set the path of a ndk release')
76    parser.add_argument('-p', '--pattern', nargs='+',
77                        help='Run tests matching the selected pattern.')
78    parser.add_argument('-r', '--repeat', type=int, default=1, help='times to repeat tests')
79    parser.add_argument('--test-from', help='Run tests following the selected test.')
80    parser.add_argument('--test-dir', default='test_dir', help='Directory to store test results')
81    return parser.parse_args()
82
83
84def get_all_tests() -> List[str]:
85    tests = []
86    for name, value in globals().items():
87        if isinstance(value, type) and issubclass(value, unittest.TestCase):
88            for member_name, member in inspect.getmembers(value):
89                if isinstance(member, (types.MethodType, types.FunctionType)):
90                    if member_name.startswith('test'):
91                        tests.append(name + '.' + member_name)
92    return sorted(tests)
93
94
95def get_host_tests() -> List[str]:
96    def filter_fn(test: str) -> bool:
97        return get_test_type(test) == 'host_test'
98    return list(filter(filter_fn, get_all_tests()))
99
100
101def get_filtered_tests(
102        tests: List[str],
103        test_from: Optional[str],
104        test_pattern: Optional[List[str]]) -> List[str]:
105    if test_from:
106        try:
107            tests = tests[tests.index(test_from):]
108        except ValueError:
109            log_exit("Can't find test %s" % test_from)
110    if test_pattern:
111        patterns = [re.compile(fnmatch.translate(x)) for x in test_pattern]
112        tests = [t for t in tests if any(pattern.match(t) for pattern in patterns)]
113        if not tests:
114            log_exit('No tests are matched.')
115    return tests
116
117
118def get_test_type(test: str) -> Optional[str]:
119    testcase_name, test_name = test.split('.')
120    if test_name == 'test_run_simpleperf_without_usb_connection':
121        return 'device_serialized_test'
122    if testcase_name in (
123        'TestApiProfiler', 'TestNativeProfiling', 'TestNativeLibDownloader',
124            'TestRecordingRealApps', 'TestRunSimpleperfOnDevice'):
125        return 'device_test'
126    if testcase_name.startswith('TestExample'):
127        return 'device_test'
128    if testcase_name in ('TestAnnotate',
129                         'TestBinaryCacheBuilder',
130                         'TestDebugUnwindReporter',
131                         'TestInferno',
132                         'TestPprofProtoGenerator',
133                         'TestPurgatorio',
134                         'TestReportHtml',
135                         'TestReportLib',
136                         'TestReportSample',
137                         'TestStackCollapse',
138                         'TestTools',
139                         'TestGeckoProfileGenerator'):
140        return 'host_test'
141    return None
142
143
144def build_testdata(testdata_dir: Path):
145    """ Collect testdata in testdata_dir.
146        In system/extras/simpleperf/scripts, testdata comes from:
147            <script_dir>/../testdata, <script_dir>/test/script_testdata, <script_dir>/../demo
148        In prebuilts/simpleperf, testdata comes from:
149            <script_dir>/test/testdata
150    """
151    testdata_dir.mkdir()
152
153    script_test_dir = Path(__file__).resolve().parent
154    script_dir = script_test_dir.parent
155
156    source_dirs = [
157        script_test_dir / 'script_testdata',
158        script_test_dir / 'testdata',
159        script_dir.parent / 'testdata',
160        script_dir.parent / 'demo',
161        script_dir.parent / 'runtest',
162    ]
163
164    for source_dir in source_dirs:
165        if not source_dir.is_dir():
166            continue
167        for src_path in source_dir.iterdir():
168            dest_path = testdata_dir / src_path.name
169            if dest_path.exists():
170                continue
171            if src_path.is_file():
172                shutil.copyfile(src_path, dest_path)
173            elif src_path.is_dir():
174                shutil.copytree(src_path, dest_path)
175
176
177def run_tests(tests: List[str]) -> bool:
178    argv = [sys.argv[0]] + tests
179    test_runner = unittest.TextTestRunner(stream=TestHelper.log_fh, verbosity=0)
180    test_program = unittest.main(argv=argv, testRunner=test_runner,
181                                 exit=False, verbosity=0, module='test.do_test')
182    return test_program.result.wasSuccessful()
183
184
185def test_process_entry(tests: List[str], test_options: List[str], conn: mp.connection.Connection):
186    parser = argparse.ArgumentParser()
187    parser.add_argument('--browser', action='store_true')
188    parser.add_argument('--device', help='android device serial number')
189    parser.add_argument('--ndk-path', type=extant_dir)
190    parser.add_argument('--testdata-dir', type=extant_dir)
191    parser.add_argument('--test-dir', help='directory to store test results')
192    args = parser.parse_args(test_options)
193
194    TestHelper.init(args.test_dir, args.testdata_dir,
195                    args.browser, args.ndk_path, args.device, conn)
196    run_tests(tests)
197
198
199@dataclass
200class Device:
201    name: str
202    serial_number: str
203
204
205@dataclass
206class TestResult:
207    try_time: int
208    ok: bool
209    duration: str
210
211    def __str__(self) -> str:
212        if self.ok:
213            s = 'OK'
214        else:
215            s = f'FAILED (at try_time {self.try_time})'
216        s += f' {self.duration}'
217        return s
218
219
220class TestProcess:
221    """ Create a test process to run selected tests on a device. """
222
223    TEST_MAX_TRY_TIME = 10
224    TEST_TIMEOUT_IN_SEC = 10 * 60
225
226    def __init__(
227            self, test_type: str, tests: List[str],
228            device: Optional[Device],
229            repeat_index: int,
230            test_options: List[str]):
231        self.test_type = test_type
232        self.tests = tests
233        self.device = device
234        self.repeat_index = repeat_index
235        self.test_options = test_options
236        self.try_time = 1
237        self.test_results: Dict[str, TestResult] = {}
238        self.parent_conn: Optional[mp.connection.Connection] = None
239        self.proc: Optional[mp.Process] = None
240        self.last_update_time = 0.0
241        self._start_test_process()
242
243    def _start_test_process(self):
244        unfinished_tests = [test for test in self.tests if test not in self.test_results]
245        self.parent_conn, child_conn = mp.Pipe(duplex=False)
246        test_options = self.test_options[:]
247        test_options += ['--test-dir', str(self.test_dir)]
248        if self.device:
249            test_options += ['--device', self.device.serial_number]
250        self.proc = mp.Process(target=test_process_entry, args=(
251            unfinished_tests, test_options, child_conn))
252        self.proc.start()
253        self.last_update_time = time.time()
254
255    @property
256    def name(self) -> str:
257        name = self.test_type
258        if self.device:
259            name += '_' + self.device.name
260        name += '_repeat_%d' % self.repeat_index
261        return name
262
263    @property
264    def test_dir(self) -> Path:
265        """ Directory to run the tests. """
266        return Path.cwd() / (self.name + '_try_%d' % self.try_time)
267
268    @property
269    def alive(self) -> bool:
270        """ Return if the test process is alive. """
271        return self.proc.is_alive()
272
273    @property
274    def finished(self) -> bool:
275        """ Return if all tests are finished. """
276        return len(self.test_results) == len(self.tests)
277
278    def check_update(self):
279        """ Check if there is any test update. """
280        try:
281            while self.parent_conn.poll():
282                msg = self.parent_conn.recv()
283                self._process_msg(msg)
284                self.last_update_time = time.time()
285        except (EOFError, BrokenPipeError) as e:
286            pass
287        if time.time() - self.last_update_time > TestProcess.TEST_TIMEOUT_IN_SEC:
288            self.proc.terminate()
289
290    def _process_msg(self, msg: str):
291        test_name, test_success, test_duration = msg.split()
292        test_success = test_success == 'OK'
293        self.test_results[test_name] = TestResult(self.try_time, test_success, test_duration)
294
295    def join(self):
296        self.proc.join()
297
298    def restart(self) -> bool:
299        """ Create a new test process to run unfinished tests. """
300        if self.finished:
301            return False
302        if self.try_time == self.TEST_MAX_TRY_TIME:
303            """ Exceed max try time. So mark left tests as failed. """
304            for test in self.tests:
305                if test not in self.test_results:
306                    test_duration = '%.3fs' % (time.time() - self.last_update_time)
307                    self.test_results[test] = TestResult(self.try_time, False, test_duration)
308            return False
309
310        self.try_time += 1
311        self._start_test_process()
312        return True
313
314
315class ProgressBar:
316    def __init__(self, total_count: int):
317        self.total_bar = tqdm(
318            total=total_count, desc='test progress', ascii=' ##',
319            bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}, {rate_fmt}", position=0)
320        self.test_process_bars: Dict[str, tqdm] = {}
321
322    def update(self, test_proc: TestProcess):
323        if test_proc.name not in self.test_process_bars:
324            bar = tqdm(total=len(test_proc.tests),
325                       desc=test_proc.name, ascii=' ##',
326                       bar_format="{l_bar}{bar} | {n_fmt}/{total_fmt} [{elapsed}]")
327            self.test_process_bars[test_proc.name] = bar
328        else:
329            bar = self.test_process_bars[test_proc.name]
330
331        add = len(test_proc.test_results) - bar.n
332        if add:
333            bar.update(add)
334            self.total_bar.update(add)
335
336    def end_test_proc(self, test_proc: TestProcess):
337        if test_proc.name in self.test_process_bars:
338            self.test_process_bars[test_proc.name].close()
339            del self.test_process_bars[test_proc.name]
340
341    def end_tests(self):
342        for bar in self.test_process_bars.values():
343            bar.close()
344        self.total_bar.close()
345
346
347class TestSummary:
348    def __init__(
349            self, devices: List[Device],
350            device_tests: List[str],
351            repeat_count: int, host_tests: List[str]):
352        self.results: Dict[Tuple[str, str], Optional[TestResult]] = {}
353        for test in device_tests:
354            for device in devices:
355                for repeat_index in range(1, repeat_count + 1):
356                    self.results[(test, '%s_repeat_%d' % (device.name, repeat_index))] = None
357        for test in host_tests:
358            self.results[(test, 'host')] = None
359        self.write_summary()
360
361    @property
362    def test_count(self) -> int:
363        return len(self.results)
364
365    @property
366    def failed_test_count(self) -> int:
367        count = 0
368        for result in self.results.values():
369            if result is None or not result.ok:
370                count += 1
371        return count
372
373    def update(self, test_proc: TestProcess):
374        if test_proc.device:
375            test_env = '%s_repeat_%d' % (test_proc.device.name, test_proc.repeat_index)
376        else:
377            test_env = 'host'
378        has_update = False
379        for test, result in test_proc.test_results.items():
380            key = (test, test_env)
381            if self.results[key] != result:
382                self.results[key] = result
383                has_update = True
384        if has_update:
385            self.write_summary()
386
387    def write_summary(self):
388        with open('test_summary.txt', 'w') as fh, \
389                open('failed_test_summary.txt', 'w') as failed_fh:
390            for key in sorted(self.results.keys()):
391                test_name, test_env = key
392                result = self.results[key]
393                message = f'{test_name}    {test_env}    {result}'
394                print(message, file=fh)
395                if not result or not result.ok:
396                    print(message, file=failed_fh)
397
398
399class TestManager:
400    """ Create test processes, monitor their status and log test progresses. """
401
402    def __init__(self, args: argparse.Namespace):
403        self.repeat_count = args.repeat
404        self.test_options = self._build_test_options(args)
405        self.devices = self._build_test_devices(args)
406        self.progress_bar: Optional[ProgressBar] = None
407        self.test_summary: Optional[TestSummary] = None
408
409    def _build_test_devices(self, args: argparse.Namespace) -> List[Device]:
410        devices = []
411        if args.device:
412            for s in args.device:
413                name, serial_number = s.split(':', 1)
414                devices.append(Device(name, serial_number))
415        else:
416            devices.append(Device('default', ''))
417        return devices
418
419    def _build_test_options(self, args: argparse.Namespace) -> List[str]:
420        test_options: List[str] = []
421        if args.browser:
422            test_options.append('--browser')
423        if args.ndk_path:
424            test_options += ['--ndk-path', args.ndk_path]
425        testdata_dir = Path('testdata').resolve()
426        test_options += ['--testdata-dir', str(testdata_dir)]
427        return test_options
428
429    def run_all_tests(self, tests: List[str]):
430        device_tests = []
431        device_serialized_tests = []
432        host_tests = []
433        for test in tests:
434            test_type = get_test_type(test)
435            assert test_type, f'No test type for test {test}'
436            if test_type == 'device_test':
437                device_tests.append(test)
438            if test_type == 'device_serialized_test':
439                device_serialized_tests.append(test)
440            if test_type == 'host_test':
441                host_tests.append(test)
442        total_test_count = (len(device_tests) + len(device_serialized_tests)
443                            ) * len(self.devices) * self.repeat_count + len(host_tests)
444        self.progress_bar = ProgressBar(total_test_count)
445        self.test_summary = TestSummary(self.devices, device_tests + device_serialized_tests,
446                                        self.repeat_count, host_tests)
447        if device_tests:
448            self.run_device_tests(device_tests)
449        if device_serialized_tests:
450            self.run_device_serialized_tests(device_serialized_tests)
451        if host_tests:
452            self.run_host_tests(host_tests)
453        self.progress_bar.end_tests()
454        self.progress_bar = None
455
456    def run_device_tests(self, tests: List[str]):
457        """ Tests can run in parallel on different devices. """
458        test_procs: List[TestProcess] = []
459        for device in self.devices:
460            test_procs.append(TestProcess('device_test', tests, device, 1, self.test_options))
461        self.wait_for_test_results(test_procs, self.repeat_count)
462
463    def run_device_serialized_tests(self, tests: List[str]):
464        """ Tests run on each device in order. """
465        for device in self.devices:
466            test_proc = TestProcess('device_serialized_test', tests, device, 1, self.test_options)
467            self.wait_for_test_results([test_proc], self.repeat_count)
468
469    def run_host_tests(self, tests: List[str]):
470        """ Tests run only once on host. """
471        test_proc = TestProcess('host_tests', tests, None, 1, self.test_options)
472        self.wait_for_test_results([test_proc], 1)
473
474    def wait_for_test_results(self, test_procs: List[TestProcess], repeat_count: int):
475        test_count = sum(len(test_proc.tests) for test_proc in test_procs)
476        while test_procs:
477            dead_procs: List[TestProcess] = []
478            # Check update.
479            for test_proc in test_procs:
480                if not test_proc.alive:
481                    dead_procs.append(test_proc)
482                test_proc.check_update()
483                self.progress_bar.update(test_proc)
484                self.test_summary.update(test_proc)
485
486            # Process dead procs.
487            for test_proc in dead_procs:
488                test_proc.join()
489                if not test_proc.finished:
490                    if test_proc.restart():
491                        continue
492                    else:
493                        self.progress_bar.update(test_proc)
494                        self.test_summary.update(test_proc)
495                self.progress_bar.end_test_proc(test_proc)
496                test_procs.remove(test_proc)
497                if test_proc.repeat_index < repeat_count:
498                    test_procs.append(
499                        TestProcess(test_proc.test_type, test_proc.tests, test_proc.device,
500                                    test_proc.repeat_index + 1, test_proc.test_options))
501            time.sleep(0.1)
502        return True
503
504
505def run_tests_in_child_process(tests: List[str], args: argparse.Namespace) -> bool:
506    """ run tests in child processes, read test results through a pipe. """
507    mp.set_start_method('spawn')  # to be consistent on darwin, linux, windows
508    test_manager = TestManager(args)
509    test_manager.run_all_tests(tests)
510
511    total_test_count = test_manager.test_summary.test_count
512    failed_test_count = test_manager.test_summary.failed_test_count
513    if failed_test_count == 0:
514        print('All tests passed!')
515        return True
516    print('%d of %d tests failed. See %s/failed_test_summary.txt for details.' %
517          (failed_test_count, total_test_count, args.test_dir))
518    return False
519
520
521def sign_executables_on_darwin():
522    """ Sign executables on M1 Mac, otherwise they can't run. """
523    if not is_darwin():
524        return
525    bin_dir = Path(__file__).resolve().parents[1] / 'bin' / 'darwin' / 'x86_64'
526    for path in bin_dir.iterdir():
527        subprocess.run(f'codesign --force -s - {path}', shell=True, check=True)
528
529
530def main() -> bool:
531    args = get_args()
532    tests = get_host_tests() if args.only_host_test else get_all_tests()
533    tests = get_filtered_tests(tests, args.test_from, args.pattern)
534
535    if args.list_tests:
536        print('\n'.join(tests))
537        return True
538
539    test_dir = Path(args.test_dir).resolve()
540    remove(test_dir)
541    test_dir.mkdir(parents=True)
542    # Switch to the test dir.
543    os.chdir(test_dir)
544    build_testdata(Path('testdata'))
545    sign_executables_on_darwin()
546    return run_tests_in_child_process(tests, args)
547