• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2020 The Pigweed Authors
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may not
4# use this file except in compliance with the License. You may obtain a copy of
5# the License at
6#
7#     https://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations under
13# the License.
14"""Functions for building code during presubmit checks."""
15
16import base64
17import contextlib
18from dataclasses import dataclass
19import itertools
20import json
21import logging
22import os
23import posixpath
24from pathlib import Path
25import re
26import subprocess
27from shutil import which
28import sys
29import tarfile
30from typing import (
31    Any,
32    Callable,
33    Collection,
34    Container,
35    ContextManager,
36    Iterable,
37    Iterator,
38    Mapping,
39    Sequence,
40    Set,
41    TextIO,
42)
43
44import pw_cli.color
45from pw_cli.plural import plural
46from pw_cli.file_filter import FileFilter
47from pw_presubmit.presubmit import (
48    call,
49    Check,
50    filter_paths,
51    install_package,
52    PresubmitResult,
53    SubStep,
54)
55from pw_presubmit.presubmit_context import (
56    PresubmitContext,
57    PresubmitFailure,
58)
59from pw_presubmit import (
60    bazel_parser,
61    format_code,
62    ninja_parser,
63)
64from pw_presubmit.tools import (
65    log_run,
66    format_command,
67)
68
69_LOG = logging.getLogger(__name__)
70
71
72def bazel(
73    ctx: PresubmitContext,
74    cmd: str,
75    *args: str,
76    use_remote_cache: bool = False,
77    stdout: TextIO | None = None,
78    **kwargs,
79) -> None:
80    """Invokes Bazel with some common flags set.
81
82    Intended for use with bazel build and test. May not work with others.
83    """
84
85    num_jobs: list[str] = []
86    if ctx.num_jobs is not None:
87        num_jobs.extend(('--jobs', str(ctx.num_jobs)))
88
89    keep_going: list[str] = []
90    if ctx.continue_after_build_error:
91        keep_going.append('--keep_going')
92
93    remote_cache: list[str] = []
94    if use_remote_cache and ctx.luci:
95        remote_cache.append('--config=remote_cache')
96        if ctx.luci.is_ci:
97            # Only CI builders should attempt to write to the cache. Try
98            # builders will be denied permission if they do so.
99            remote_cache.append('--remote_upload_local_results=true')
100
101    ctx.output_dir.mkdir(exist_ok=True, parents=True)
102    try:
103        with contextlib.ExitStack() as stack:
104            if not stdout:
105                stdout = stack.enter_context(
106                    (ctx.output_dir / f'bazel.{cmd}.stdout').open('w')
107                )
108
109            with (ctx.output_dir / 'bazel.output.base').open('w') as outs, (
110                ctx.output_dir / 'bazel.output.base.err'
111            ).open('w') as errs:
112                call('bazel', 'info', 'output_base', tee=outs, stderr=errs)
113
114            call(
115                'bazel',
116                cmd,
117                '--verbose_failures',
118                '--worker_verbose',
119                f'--symlink_prefix={ctx.output_dir / "bazel-"}',
120                *num_jobs,
121                *keep_going,
122                *remote_cache,
123                *args,
124                cwd=ctx.root,
125                tee=stdout,
126                call_annotation={'build_system': 'bazel'},
127                **kwargs,
128            )
129
130    except PresubmitFailure as exc:
131        if stdout:
132            failure = bazel_parser.parse_bazel_stdout(Path(stdout.name))
133            if failure:
134                with ctx.failure_summary_log.open('w') as outs:
135                    outs.write(failure)
136
137        raise exc
138
139
140def _gn_value(value) -> str:
141    if isinstance(value, bool):
142        return str(value).lower()
143
144    if (
145        isinstance(value, str)
146        and '"' not in value
147        and not value.startswith("{")
148        and not value.startswith("[")
149    ):
150        return f'"{value}"'
151
152    if isinstance(value, (list, tuple)):
153        return f'[{", ".join(_gn_value(a) for a in value)}]'
154
155    # Fall-back case handles integers as well as strings that already
156    # contain double quotation marks, or look like scopes or lists.
157    return str(value)
158
159
160def gn_args_list(**kwargs) -> list[str]:
161    """Return a list of formatted strings to use as gn args.
162
163    Currently supports bool, int, and str values. In the case of str values,
164    quotation marks will be added automatically, unless the string already
165    contains one or more double quotation marks, or starts with a { or [
166    character, in which case it will be passed through as-is.
167    """
168    transformed_args = []
169    for arg, val in kwargs.items():
170        transformed_args.append(f'{arg}={_gn_value(val)}')
171
172    # Use ccache if available for faster repeat presubmit runs.
173    if which('ccache') and 'pw_command_launcher' not in kwargs:
174        transformed_args.append('pw_command_launcher="ccache"')
175
176    return transformed_args
177
178
179def gn_args(**kwargs) -> str:
180    """Builds a string to use for the --args argument to gn gen.
181
182    Currently supports bool, int, and str values. In the case of str values,
183    quotation marks will be added automatically, unless the string already
184    contains one or more double quotation marks, or starts with a { or [
185    character, in which case it will be passed through as-is.
186    """
187    return '--args=' + ' '.join(gn_args_list(**kwargs))
188
189
190def write_gn_args_file(destination_file: Path, **kwargs) -> str:
191    """Write gn args to a file.
192
193    Currently supports bool, int, and str values. In the case of str values,
194    quotation marks will be added automatically, unless the string already
195    contains one or more double quotation marks, or starts with a { or [
196    character, in which case it will be passed through as-is.
197
198    Returns:
199      The contents of the written file.
200    """
201    contents = '\n'.join(gn_args_list(**kwargs))
202    # Add a trailing linebreak
203    contents += '\n'
204    destination_file.parent.mkdir(exist_ok=True, parents=True)
205
206    if (
207        destination_file.is_file()
208        and destination_file.read_text(encoding='utf-8') == contents
209    ):
210        # File is identical, don't re-write.
211        return contents
212
213    destination_file.write_text(contents, encoding='utf-8')
214    return contents
215
216
217def gn_gen(
218    ctx: PresubmitContext,
219    *args: str,
220    gn_check: bool = True,  # pylint: disable=redefined-outer-name
221    gn_fail_on_unused: bool = True,
222    export_compile_commands: bool | str = True,
223    preserve_args_gn: bool = False,
224    **gn_arguments,
225) -> None:
226    """Runs gn gen in the specified directory with optional GN args.
227
228    Runs with --check=system if gn_check=True. Note that this does not cover
229    generated files. Run gn_check() after building to check generated files.
230    """
231    all_gn_args = {'pw_build_COLORIZE_OUTPUT': pw_cli.color.is_enabled()}
232    all_gn_args.update(gn_arguments)
233    all_gn_args.update(ctx.override_gn_args)
234    _LOG.debug('%r', all_gn_args)
235    args_option = gn_args(**all_gn_args)
236
237    if not ctx.dry_run and not preserve_args_gn:
238        # Delete args.gn to ensure this is a clean build.
239        args_gn = ctx.output_dir / 'args.gn'
240        if args_gn.is_file():
241            args_gn.unlink()
242
243    export_commands_arg = ''
244    if export_compile_commands:
245        export_commands_arg = '--export-compile-commands'
246        if isinstance(export_compile_commands, str):
247            export_commands_arg += f'={export_compile_commands}'
248
249    call(
250        'gn',
251        '--color' if pw_cli.color.is_enabled() else '--nocolor',
252        'gen',
253        ctx.output_dir,
254        *(['--check=system'] if gn_check else []),
255        *(['--fail-on-unused-args'] if gn_fail_on_unused else []),
256        *([export_commands_arg] if export_commands_arg else []),
257        *args,
258        *([args_option] if all_gn_args else []),
259        cwd=ctx.root,
260        call_annotation={
261            'gn_gen_args': all_gn_args,
262            'gn_gen_args_option': args_option,
263        },
264    )
265
266
267def gn_check(ctx: PresubmitContext) -> PresubmitResult:
268    """Runs gn check, including on generated and system files."""
269    call(
270        'gn',
271        'check',
272        ctx.output_dir,
273        '--check-generated',
274        '--check-system',
275        cwd=ctx.root,
276    )
277    return PresubmitResult.PASS
278
279
280def ninja(
281    ctx: PresubmitContext,
282    *args,
283    save_compdb: bool = True,
284    save_graph: bool = True,
285    **kwargs,
286) -> None:
287    """Runs ninja in the specified directory."""
288
289    num_jobs: list[str] = []
290    if ctx.num_jobs is not None:
291        num_jobs.extend(('-j', str(ctx.num_jobs)))
292
293    keep_going: list[str] = []
294    if ctx.continue_after_build_error:
295        keep_going.extend(('-k', '0'))
296
297    if save_compdb:
298        proc = log_run(
299            ['ninja', '-C', ctx.output_dir, '-t', 'compdb', *args],
300            capture_output=True,
301            **kwargs,
302        )
303        if not ctx.dry_run:
304            (ctx.output_dir / 'ninja.compdb').write_bytes(proc.stdout)
305
306    if save_graph:
307        proc = log_run(
308            ['ninja', '-C', ctx.output_dir, '-t', 'graph', *args],
309            capture_output=True,
310            **kwargs,
311        )
312        if not ctx.dry_run:
313            (ctx.output_dir / 'ninja.graph').write_bytes(proc.stdout)
314
315    ninja_stdout = ctx.output_dir / 'ninja.stdout'
316    ctx.output_dir.mkdir(exist_ok=True, parents=True)
317    try:
318        with ninja_stdout.open('w') as outs:
319            if sys.platform == 'win32':
320                # Windows doesn't support pw-wrap-ninja.
321                ninja_command = ['ninja']
322            else:
323                ninja_command = ['pw-wrap-ninja', '--log-actions']
324
325            call(
326                *ninja_command,
327                '-C',
328                ctx.output_dir,
329                *num_jobs,
330                *keep_going,
331                *args,
332                tee=outs,
333                propagate_sigterm=True,
334                call_annotation={'build_system': 'ninja'},
335                **kwargs,
336            )
337
338    except PresubmitFailure as exc:
339        failure = ninja_parser.parse_ninja_stdout(ninja_stdout)
340        if failure:
341            with ctx.failure_summary_log.open('w') as outs:
342                outs.write(failure)
343
344        raise exc
345
346
347def get_gn_args(directory: Path) -> list[dict[str, dict[str, str]]]:
348    """Dumps GN variables to JSON."""
349    proc = log_run(
350        ['gn', 'args', directory, '--list', '--json'], stdout=subprocess.PIPE
351    )
352    return json.loads(proc.stdout)
353
354
355def cmake(
356    ctx: PresubmitContext,
357    *args: str,
358    env: Mapping['str', 'str'] | None = None,
359) -> None:
360    """Runs CMake for Ninja on the given source and output directories."""
361    call(
362        'cmake',
363        '-B',
364        ctx.output_dir,
365        '-S',
366        ctx.root,
367        '-G',
368        'Ninja',
369        *args,
370        env=env,
371    )
372
373
374def env_with_clang_vars() -> Mapping[str, str]:
375    """Returns the environment variables with CC, CXX, etc. set for clang."""
376    env = os.environ.copy()
377    env['CC'] = env['LD'] = env['AS'] = 'clang'
378    env['CXX'] = 'clang++'
379    return env
380
381
382def _get_paths_from_command(source_dir: Path, *args, **kwargs) -> Set[Path]:
383    """Runs a command and reads Bazel or GN //-style paths from it."""
384    process = log_run(args, capture_output=True, cwd=source_dir, **kwargs)
385
386    if process.returncode:
387        _LOG.error(
388            'Build invocation failed with return code %d!', process.returncode
389        )
390        _LOG.error(
391            '[COMMAND] %s\n%s\n%s',
392            *format_command(args, kwargs),
393            process.stderr.decode(),
394        )
395        raise PresubmitFailure
396
397    files = set()
398
399    for line in process.stdout.splitlines():
400        path = line.strip().lstrip(b'/').replace(b':', b'/').decode()
401        path = source_dir.joinpath(path)
402        if path.is_file():
403            files.add(path)
404
405    return files
406
407
408# Finds string literals with '.' in them.
409_MAYBE_A_PATH = re.compile(
410    r'"'  # Starting double quote.
411    # Start capture group 1 - the whole filename:
412    #   File basename, a single period, file extension.
413    r'([^\n" ]+\.[^\n" ]+)'
414    # Non-capturing group 2 (optional).
415    r'(?: > [^\n"]+)?'  # pw_zip style string "input_file.txt > output_file.txt"
416    r'"'  # Ending double quote.
417)
418
419
420def _search_files_for_paths(build_files: Iterable[Path]) -> Iterable[Path]:
421    for build_file in build_files:
422        directory = build_file.parent
423
424        for string in _MAYBE_A_PATH.finditer(build_file.read_text()):
425            path = directory / string.group(1)
426            if path.is_file():
427                yield path
428
429
430def _read_compile_commands(compile_commands: Path) -> dict:
431    with compile_commands.open('rb') as fd:
432        return json.load(fd)
433
434
435def compiled_files(compile_commands: Path) -> Iterable[Path]:
436    for command in _read_compile_commands(compile_commands):
437        file = Path(command['file'])
438        if file.is_absolute():
439            yield file
440        else:
441            yield file.joinpath(command['directory']).resolve()
442
443
444def check_compile_commands_for_files(
445    compile_commands: Path | Iterable[Path],
446    files: Iterable[Path],
447    extensions: Collection[str] = format_code.CPP_SOURCE_EXTS,
448) -> list[Path]:
449    """Checks for paths in one or more compile_commands.json files.
450
451    Only checks C and C++ source files by default.
452    """
453    if isinstance(compile_commands, Path):
454        compile_commands = [compile_commands]
455
456    compiled = frozenset(
457        itertools.chain.from_iterable(
458            compiled_files(cmds) for cmds in compile_commands
459        )
460    )
461    return [f for f in files if f not in compiled and f.suffix in extensions]
462
463
464def check_bazel_build_for_files(
465    bazel_extensions_to_check: Container[str],
466    files: Iterable[Path],
467    bazel_dirs: Iterable[Path] = (),
468) -> list[Path]:
469    """Checks that source files are in the Bazel builds.
470
471    Args:
472        bazel_extensions_to_check: which file suffixes to look for in Bazel
473        files: the files that should be checked
474        bazel_dirs: directories in which to run bazel query
475
476    Returns:
477        a list of missing files; will be empty if there were no missing files
478    """
479
480    # Collect all paths in the Bazel builds.
481    bazel_builds: Set[Path] = set()
482    for directory in bazel_dirs:
483        bazel_builds.update(
484            _get_paths_from_command(
485                directory, 'bazel', 'query', 'kind("source file", //...:*)'
486            )
487        )
488
489    missing: list[Path] = []
490
491    if bazel_dirs:
492        for path in (p for p in files if p.suffix in bazel_extensions_to_check):
493            if path not in bazel_builds:
494                # TODO: b/234883555 - Replace this workaround for fuzzers.
495                if 'fuzz' not in str(path):
496                    missing.append(path)
497
498    if missing:
499        _LOG.warning(
500            '%s missing from the Bazel build:\n%s',
501            plural(missing, 'file', are=True),
502            '\n'.join(str(x) for x in missing),
503        )
504
505    return missing
506
507
508def check_gn_build_for_files(
509    gn_extensions_to_check: Container[str],
510    files: Iterable[Path],
511    gn_dirs: Iterable[tuple[Path, Path]] = (),
512    gn_build_files: Iterable[Path] = (),
513) -> list[Path]:
514    """Checks that source files are in the GN build.
515
516    Args:
517        gn_extensions_to_check: which file suffixes to look for in GN
518        files: the files that should be checked
519        gn_dirs: (source_dir, output_dir) tuples with which to run gn desc
520        gn_build_files: paths to BUILD.gn files to directly search for paths
521
522    Returns:
523        a list of missing files; will be empty if there were no missing files
524    """
525
526    # Collect all paths in GN builds.
527    gn_builds: Set[Path] = set()
528
529    for source_dir, output_dir in gn_dirs:
530        gn_builds.update(
531            _get_paths_from_command(source_dir, 'gn', 'desc', output_dir, '*')
532        )
533
534    gn_builds.update(_search_files_for_paths(gn_build_files))
535
536    missing: list[Path] = []
537
538    if gn_dirs or gn_build_files:
539        for path in (p for p in files if p.suffix in gn_extensions_to_check):
540            if path not in gn_builds:
541                missing.append(path)
542
543    if missing:
544        _LOG.warning(
545            '%s missing from the GN build:\n%s',
546            plural(missing, 'file', are=True),
547            '\n'.join(str(x) for x in missing),
548        )
549
550    return missing
551
552
553def check_builds_for_files(
554    bazel_extensions_to_check: Container[str],
555    gn_extensions_to_check: Container[str],
556    files: Iterable[Path],
557    bazel_dirs: Iterable[Path] = (),
558    gn_dirs: Iterable[tuple[Path, Path]] = (),
559    gn_build_files: Iterable[Path] = (),
560) -> dict[str, list[Path]]:
561    """Checks that source files are in the GN and Bazel builds.
562
563    Args:
564        bazel_extensions_to_check: which file suffixes to look for in Bazel
565        gn_extensions_to_check: which file suffixes to look for in GN
566        files: the files that should be checked
567        bazel_dirs: directories in which to run bazel query
568        gn_dirs: (source_dir, output_dir) tuples with which to run gn desc
569        gn_build_files: paths to BUILD.gn files to directly search for paths
570
571    Returns:
572        a dictionary mapping build system ('Bazel' or 'GN' to a list of missing
573        files; will be empty if there were no missing files
574    """
575
576    bazel_missing = check_bazel_build_for_files(
577        bazel_extensions_to_check=bazel_extensions_to_check,
578        files=files,
579        bazel_dirs=bazel_dirs,
580    )
581    gn_missing = check_gn_build_for_files(
582        gn_extensions_to_check=gn_extensions_to_check,
583        files=files,
584        gn_dirs=gn_dirs,
585        gn_build_files=gn_build_files,
586    )
587
588    result = {}
589    if bazel_missing:
590        result['Bazel'] = bazel_missing
591    if gn_missing:
592        result['GN'] = gn_missing
593    return result
594
595
596@contextlib.contextmanager
597def test_server(executable: str, output_dir: Path):
598    """Context manager that runs a test server executable.
599
600    Args:
601        executable: name of the test server executable
602        output_dir: path to the output directory (for logs)
603    """
604
605    with open(output_dir / 'test_server.log', 'w') as outs:
606        try:
607            proc = subprocess.Popen(
608                [executable, '--verbose'],
609                stdout=outs,
610                stderr=subprocess.STDOUT,
611            )
612
613            yield
614
615        finally:
616            proc.terminate()  # pylint: disable=used-before-assignment
617
618
619@contextlib.contextmanager
620def modified_env(**envvars):
621    """Context manager that sets environment variables.
622
623    Use by assigning values to variable names in the argument list, e.g.:
624        `modified_env(MY_FLAG="some value")`
625
626    Args:
627        envvars: Keyword arguments
628    """
629    saved_env = os.environ.copy()
630    os.environ.update(envvars)
631    try:
632        yield
633    finally:
634        os.environ = saved_env
635
636
637def fuzztest_prng_seed(ctx: PresubmitContext) -> str:
638    """Convert the RNG seed to the format expected by FuzzTest.
639
640    FuzzTest can be configured to use the seed by setting the
641    `FUZZTEST_PRNG_SEED` environment variable to this value.
642
643    Args:
644        ctx: The context that includes a pseudorandom number generator seed.
645    """
646    rng_bytes = ctx.rng_seed.to_bytes(32, sys.byteorder)
647    return base64.urlsafe_b64encode(rng_bytes).decode('ascii').rstrip('=')
648
649
650@filter_paths(
651    file_filter=FileFilter(endswith=('.bzl', '.bazel'), name=('WORKSPACE',))
652)
653def bazel_lint(ctx: PresubmitContext):
654    """Runs buildifier with lint on Bazel files.
655
656    Should be run after bazel_format since that will give more useful output
657    for formatting-only issues.
658    """
659
660    failure = False
661    for path in ctx.paths:
662        try:
663            call('buildifier', '--lint=warn', '--mode=check', path)
664        except PresubmitFailure:
665            failure = True
666
667    if failure:
668        raise PresubmitFailure
669
670
671@Check
672def gn_gen_check(ctx: PresubmitContext):
673    """Runs gn gen --check to enforce correct header dependencies."""
674    gn_gen(ctx, gn_check=True)
675
676
677Item = int | str
678Value = Item | Sequence[Item]
679ValueCallable = Callable[[PresubmitContext], Value]
680InputItem = Item | ValueCallable
681InputValue = InputItem | Sequence[InputItem]
682
683
684def _value(ctx: PresubmitContext, val: InputValue) -> Value:
685    """Process any lambdas inside val
686
687    val is a single value or a list of values, any of which might be a lambda
688    that needs to be resolved. Call each of these lambdas with ctx and replace
689    the lambda with the result. Return the updated top-level structure.
690    """
691
692    if isinstance(val, (str, int)):
693        return val
694    if callable(val):
695        return val(ctx)
696
697    result: list[Item] = []
698    for item in val:
699        if callable(item):
700            call_result = item(ctx)
701            if isinstance(call_result, (int, str)):
702                result.append(call_result)
703            else:  # Sequence.
704                result.extend(call_result)
705        elif isinstance(item, (int, str)):
706            result.append(item)
707        else:  # Sequence.
708            result.extend(item)
709    return result
710
711
712_CtxMgrLambda = Callable[[PresubmitContext], ContextManager]
713_CtxMgrOrLambda = ContextManager | _CtxMgrLambda
714
715
716@dataclass(frozen=True)
717class CommonCoverageOptions:
718    """Coverage options shared by both CodeSearch and Gerrit.
719
720    For Google use only.
721    """
722
723    # The "root" of the Kalypsi GCS bucket path to which the coverage data
724    # should be uploaded. Typically gs://ng3-metrics/ng3-<teamname>-coverage.
725    target_bucket_root: str
726
727    # The project name in the Kalypsi GCS bucket path.
728    target_bucket_project: str
729
730    # See go/kalypsi-abs#trace-type-required.
731    trace_type: str
732
733    # go/kalypsi-abs#owner-required.
734    owner: str
735
736    # go/kalypsi-abs#bug-component-required.
737    bug_component: str
738
739
740@dataclass(frozen=True)
741class CodeSearchCoverageOptions:
742    """CodeSearch-specific coverage options. For Google use only."""
743
744    # The name of the Gerrit host containing the CodeSearch repo. Just the name
745    # ("pigweed"), not the full URL ("pigweed.googlesource.com"). This may be
746    # different from the host from which the code was originally checked out.
747    host: str
748
749    # The name of the project, as expected by CodeSearch. Typically
750    # 'codesearch'.
751    project: str
752
753    # See go/kalypsi-abs#ref-required.
754    ref: str
755
756    # See go/kalypsi-abs#source-required.
757    source: str
758
759    # See go/kalypsi-abs#add-prefix-optional.
760    add_prefix: str = ''
761
762
763@dataclass(frozen=True)
764class GerritCoverageOptions:
765    """Gerrit-specific coverage options. For Google use only."""
766
767    # The name of the project, as expected by Gerrit. This is typically the
768    # repository name, e.g. 'pigweed/pigweed' for upstream Pigweed.
769    # See go/kalypsi-inc#project-required.
770    project: str
771
772
773@dataclass(frozen=True)
774class CoverageOptions:
775    """Coverage collection configuration. For Google use only."""
776
777    common: CommonCoverageOptions
778    codesearch: tuple[CodeSearchCoverageOptions, ...]
779    gerrit: GerritCoverageOptions
780
781
782class _NinjaBase(Check):
783    """Thin wrapper of Check for steps that call ninja."""
784
785    def __init__(
786        self,
787        *args,
788        packages: Sequence[str] = (),
789        ninja_contexts: Sequence[_CtxMgrOrLambda] = (),
790        ninja_targets: str | Sequence[str] | Sequence[Sequence[str]] = (),
791        coverage_options: CoverageOptions | None = None,
792        **kwargs,
793    ):
794        """Initializes a _NinjaBase object.
795
796        Args:
797            *args: Passed on to superclass.
798            packages: List of 'pw package' packages to install.
799            ninja_contexts: List of context managers to apply around ninja
800                calls.
801            ninja_targets: Single ninja target, list of Ninja targets, or list
802                of list of ninja targets. If a list of a list, ninja will be
803                called multiple times with the same build directory.
804            coverage_options: Coverage collection options (or None, if not
805                collecting coverage data).
806            **kwargs: Passed on to superclass.
807        """
808        super().__init__(*args, **kwargs)
809        self._packages: Sequence[str] = packages
810        self._ninja_contexts: tuple[_CtxMgrOrLambda, ...] = tuple(
811            ninja_contexts
812        )
813        self._coverage_options = coverage_options
814
815        if isinstance(ninja_targets, str):
816            ninja_targets = (ninja_targets,)
817        ninja_targets = list(ninja_targets)
818        all_strings = all(isinstance(x, str) for x in ninja_targets)
819        any_strings = any(isinstance(x, str) for x in ninja_targets)
820        if ninja_targets and all_strings != any_strings:
821            raise ValueError(repr(ninja_targets))
822
823        self._ninja_target_lists: tuple[tuple[str, ...], ...]
824        if all_strings:
825            targets: list[str] = []
826            for target in ninja_targets:
827                targets.append(target)  # type: ignore
828            self._ninja_target_lists = (tuple(targets),)
829        else:
830            self._ninja_target_lists = tuple(tuple(x) for x in ninja_targets)
831
832    @property
833    def ninja_targets(self) -> list[str]:
834        return list(itertools.chain(*self._ninja_target_lists))
835
836    def _install_package(  # pylint: disable=no-self-use
837        self,
838        ctx: PresubmitContext,
839        package: str,
840    ) -> PresubmitResult:
841        install_package(ctx, package)
842        return PresubmitResult.PASS
843
844    @contextlib.contextmanager
845    def _context(self, ctx: PresubmitContext):
846        """Apply any context managers necessary for building."""
847        with contextlib.ExitStack() as stack:
848            for mgr in self._ninja_contexts:
849                if isinstance(mgr, contextlib.AbstractContextManager):
850                    stack.enter_context(mgr)
851                else:
852                    stack.enter_context(mgr(ctx))  # type: ignore
853            yield
854
855    def _ninja(
856        self, ctx: PresubmitContext, targets: Sequence[str]
857    ) -> PresubmitResult:
858        with self._context(ctx):
859            ninja(ctx, *targets)
860        return PresubmitResult.PASS
861
862    def _coverage(
863        self, ctx: PresubmitContext, options: CoverageOptions
864    ) -> PresubmitResult:
865        """Archive and (on LUCI) upload coverage reports."""
866        reports = ctx.output_dir / 'coverage_reports'
867        os.makedirs(reports, exist_ok=True)
868        coverage_jsons: list[Path] = []
869        for path in ctx.output_dir.rglob('coverage_report'):
870            _LOG.debug('exploring %s', path)
871            name = str(path.relative_to(ctx.output_dir))
872            name = name.replace('_', '').replace('/', '_')
873            with tarfile.open(reports / f'{name}.tar.gz', 'w:gz') as tar:
874                tar.add(path, arcname=name, recursive=True)
875            json_path = path / 'json' / 'report.json'
876            if json_path.is_file():
877                _LOG.debug('found json %s', json_path)
878                coverage_jsons.append(json_path)
879
880        if not coverage_jsons:
881            ctx.fail('No coverage json file found')
882            return PresubmitResult.FAIL
883
884        if len(coverage_jsons) > 1:
885            _LOG.warning(
886                'More than one coverage json file, selecting first: %r',
887                coverage_jsons,
888            )
889
890        coverage_json = coverage_jsons[0]
891
892        if ctx.luci:
893            if not ctx.luci.is_prod:
894                _LOG.warning('Not uploading coverage since not running in prod')
895                return PresubmitResult.PASS
896
897            with self._context(ctx):
898                metadata_json_paths = _write_coverage_metadata(ctx, options)
899                for i, metadata_json in enumerate(metadata_json_paths):
900                    # GCS bucket paths are POSIX-like.
901                    coverage_gcs_path = posixpath.join(
902                        options.common.target_bucket_root,
903                        'incremental' if ctx.luci.is_try else 'absolute',
904                        options.common.target_bucket_project,
905                        f'{ctx.luci.buildbucket_id}-{i}',
906                    )
907                    _copy_to_gcs(
908                        ctx,
909                        coverage_json,
910                        posixpath.join(coverage_gcs_path, 'report.json'),
911                    )
912                    _copy_to_gcs(
913                        ctx,
914                        metadata_json,
915                        posixpath.join(coverage_gcs_path, 'metadata.json'),
916                    )
917
918                return PresubmitResult.PASS
919
920        _LOG.warning('Not uploading coverage since running locally')
921        return PresubmitResult.PASS
922
923    def _package_substeps(self) -> Iterator[SubStep]:
924        for package in self._packages:
925            yield SubStep(
926                f'install {package} package',
927                self._install_package,
928                (package,),
929            )
930
931    def _ninja_substeps(self) -> Iterator[SubStep]:
932        targets_parts = set()
933        for targets in self._ninja_target_lists:
934            targets_part = " ".join(targets)
935            maxlen = 70
936            if len(targets_part) > maxlen:
937                targets_part = f'{targets_part[0:maxlen-3]}...'
938            assert targets_part not in targets_parts
939            targets_parts.add(targets_part)
940            yield SubStep(f'ninja {targets_part}', self._ninja, (targets,))
941
942    def _coverage_substeps(self) -> Iterator[SubStep]:
943        if self._coverage_options is not None:
944            yield SubStep('coverage', self._coverage, (self._coverage_options,))
945
946
947def _copy_to_gcs(ctx: PresubmitContext, filepath: Path, gcs_dst: str):
948    cmd = [
949        "gsutil",
950        "cp",
951        filepath,
952        gcs_dst,
953    ]
954
955    upload_stdout = ctx.output_dir / (filepath.name + '.stdout')
956    with upload_stdout.open('w') as outs:
957        call(*cmd, tee=outs)
958
959
960def _write_coverage_metadata(
961    ctx: PresubmitContext, options: CoverageOptions
962) -> Sequence[Path]:
963    """Write out Kalypsi coverage metadata file(s) and return their paths."""
964    assert ctx.luci is not None
965    assert len(ctx.luci.triggers) == 1
966    change = ctx.luci.triggers[0]
967
968    metadata = {
969        'trace_type': options.common.trace_type,
970        'trim_prefix': str(ctx.root),
971        'patchset_num': change.patchset,
972        'change_id': change.number,
973        'owner': options.common.owner,
974        'bug_component': options.common.bug_component,
975    }
976
977    if ctx.luci.is_try:
978        # Running in CQ: uploading incremental coverage
979        metadata.update(
980            {
981                'change_id': change.number,
982                'host': change.gerrit_name,
983                'patchset_num': change.patchset,
984                'project': options.gerrit.project,
985            }
986        )
987
988        metadata_json = ctx.output_dir / "metadata.json"
989        with metadata_json.open('w') as metadata_file:
990            json.dump(metadata, metadata_file)
991        return (metadata_json,)
992
993    # Running in CI: uploading absolute coverage, possibly to multiple locations
994    # since a repo could be in codesearch in multiple places.
995    metadata_jsons = []
996    for i, cs in enumerate(options.codesearch):
997        metadata.update(
998            {
999                'add_prefix': cs.add_prefix,
1000                'commit_id': change.ref,
1001                'host': cs.host,
1002                'project': cs.project,
1003                'ref': cs.ref,
1004                'source': cs.source,
1005            }
1006        )
1007
1008        metadata_json = ctx.output_dir / f'metadata-{i}.json'
1009        with metadata_json.open('w') as metadata_file:
1010            json.dump(metadata, metadata_file)
1011        metadata_jsons.append(metadata_json)
1012
1013    return tuple(metadata_jsons)
1014
1015
1016class GnGenNinja(_NinjaBase):
1017    """Thin wrapper of Check for steps that just call gn/ninja.
1018
1019    Runs gn gen, ninja, then gn check.
1020    """
1021
1022    def __init__(
1023        self,
1024        *args,
1025        gn_args: (  # pylint: disable=redefined-outer-name
1026            dict[str, Any] | None
1027        ) = None,
1028        **kwargs,
1029    ):
1030        """Initializes a GnGenNinja object.
1031
1032        Args:
1033            *args: Passed on to superclass.
1034            gn_args: dict of GN args.
1035            **kwargs: Passed on to superclass.
1036        """
1037        super().__init__(self._substeps(), *args, **kwargs)
1038        self._gn_args: dict[str, Any] = gn_args or {}
1039
1040    def add_default_gn_args(self, args):
1041        """Add any project-specific default GN args to 'args'."""
1042
1043    @property
1044    def gn_args(self) -> dict[str, Any]:
1045        return self._gn_args
1046
1047    def _gn_gen(self, ctx: PresubmitContext) -> PresubmitResult:
1048        args: dict[str, Any] = {}
1049        if self._coverage_options is not None:
1050            args['pw_toolchain_COVERAGE_ENABLED'] = True
1051            args['pw_build_PYTHON_TEST_COVERAGE'] = True
1052
1053            if ctx.incremental:
1054                args['pw_toolchain_PROFILE_SOURCE_FILES'] = [
1055                    f'//{x.relative_to(ctx.root)}' for x in ctx.paths
1056                ]
1057
1058        self.add_default_gn_args(args)
1059
1060        args.update({k: _value(ctx, v) for k, v in self._gn_args.items()})
1061        gn_gen(ctx, gn_check=False, **args)  # type: ignore
1062        return PresubmitResult.PASS
1063
1064    def _substeps(self) -> Iterator[SubStep]:
1065        yield from self._package_substeps()
1066
1067        yield SubStep('gn gen', self._gn_gen)
1068
1069        yield from self._ninja_substeps()
1070
1071        # Run gn check after building so it can check generated files.
1072        yield SubStep('gn check', gn_check)
1073
1074        yield from self._coverage_substeps()
1075