• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2017 The Bazel Authors. All rights reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#    http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""Unit testing support.
16
17Unlike most Skylib files, this exports two modules: `unittest` which contains
18functions to declare and define unit tests, and `asserts` which contains the
19assertions used to within tests.
20"""
21
22load(":new_sets.bzl", new_sets = "sets")
23load(":partial.bzl", "partial")
24load(":types.bzl", "types")
25
26# The following function should only be called from WORKSPACE files and workspace macros.
27# buildifier: disable=unnamed-macro
28def register_unittest_toolchains():
29    """Registers the toolchains for unittest users."""
30    native.register_toolchains(
31        "@bazel_skylib//toolchains/unittest:cmd_toolchain",
32        "@bazel_skylib//toolchains/unittest:bash_toolchain",
33    )
34
35TOOLCHAIN_TYPE = "@bazel_skylib//toolchains/unittest:toolchain_type"
36
37_UnittestToolchainInfo = provider(
38    doc = "Execution platform information for rules in the bazel_skylib repository.",
39    fields = [
40        "file_ext",
41        "success_templ",
42        "failure_templ",
43        "join_on",
44        "escape_chars_with",
45        "escape_other_chars_with",
46    ],
47)
48
49def _unittest_toolchain_impl(ctx):
50    return [
51        platform_common.ToolchainInfo(
52            unittest_toolchain_info = _UnittestToolchainInfo(
53                file_ext = ctx.attr.file_ext,
54                success_templ = ctx.attr.success_templ,
55                failure_templ = ctx.attr.failure_templ,
56                join_on = ctx.attr.join_on,
57                escape_chars_with = ctx.attr.escape_chars_with,
58                escape_other_chars_with = ctx.attr.escape_other_chars_with,
59            ),
60        ),
61    ]
62
63unittest_toolchain = rule(
64    implementation = _unittest_toolchain_impl,
65    attrs = {
66        "failure_templ": attr.string(
67            mandatory = True,
68            doc = (
69                "Test script template with a single `%s`. That " +
70                "placeholder is replaced with the lines in the " +
71                "failure message joined with the string " +
72                "specified in `join_with`. The resulting script " +
73                "should print the failure message and exit with " +
74                "non-zero status."
75            ),
76        ),
77        "file_ext": attr.string(
78            mandatory = True,
79            doc = (
80                "File extension for test script, including leading dot."
81            ),
82        ),
83        "join_on": attr.string(
84            mandatory = True,
85            doc = (
86                "String used to join the lines in the failure " +
87                "message before including the resulting string " +
88                "in the script specified in `failure_templ`."
89            ),
90        ),
91        "success_templ": attr.string(
92            mandatory = True,
93            doc = (
94                "Test script generated when the test passes. " +
95                "Should exit with status 0."
96            ),
97        ),
98        "escape_chars_with": attr.string_dict(
99            doc = (
100                "Dictionary of characters that need escaping in " +
101                "test failure message to prefix appended to escape " +
102                "those characters. For example, " +
103                '`{"%": "%", ">": "^"}` would replace `%` with ' +
104                "`%%` and `>` with `^>` in the failure message " +
105                "before that is included in `success_templ`."
106            ),
107        ),
108        "escape_other_chars_with": attr.string(
109            default = "",
110            doc = (
111                "String to prefix every character in test failure " +
112                "message which is not a key in `escape_chars_with` " +
113                "before including that in `success_templ`. For " +
114                'example, `"\"` would prefix every character in ' +
115                "the failure message (except those in the keys of " +
116                "`escape_chars_with`) with `\\`."
117            ),
118        ),
119    },
120)
121
122def _impl_function_name(impl):
123    """Derives the name of the given rule implementation function.
124
125    This can be used for better test feedback.
126
127    Args:
128      impl: the rule implementation function
129
130    Returns:
131      The name of the given function
132    """
133
134    # Starlark currently stringifies a function as "<function NAME>", so we use
135    # that knowledge to parse the "NAME" portion out. If this behavior ever
136    # changes, we'll need to update this.
137    # TODO(bazel-team): Expose a ._name field on functions to avoid this.
138    impl_name = str(impl)
139    impl_name = impl_name.partition("<function ")[-1]
140    return impl_name.rpartition(">")[0]
141
142def _make(impl, attrs = {}):
143    """Creates a unit test rule from its implementation function.
144
145    Each unit test is defined in an implementation function that must then be
146    associated with a rule so that a target can be built. This function handles
147    the boilerplate to create and return a test rule and captures the
148    implementation function's name so that it can be printed in test feedback.
149
150    The optional `attrs` argument can be used to define dependencies for this
151    test, in order to form unit tests of rules.
152
153    An example of a unit test:
154
155    ```
156    def _your_test(ctx):
157      env = unittest.begin(ctx)
158
159      # Assert statements go here
160
161      return unittest.end(env)
162
163    your_test = unittest.make(_your_test)
164    ```
165
166    Recall that names of test rules must end in `_test`.
167
168    Args:
169      impl: The implementation function of the unit test.
170      attrs: An optional dictionary to supplement the attrs passed to the
171          unit test's `rule()` constructor.
172
173    Returns:
174      A rule definition that should be stored in a global whose name ends in
175      `_test`.
176    """
177    attrs = dict(attrs)
178    attrs["_impl_name"] = attr.string(default = _impl_function_name(impl))
179
180    return rule(
181        impl,
182        attrs = attrs,
183        _skylark_testable = True,
184        test = True,
185        toolchains = [TOOLCHAIN_TYPE],
186    )
187
188_ActionInfo = provider(
189    doc = "Information relating to the target under test.",
190    fields = ["actions", "bin_path"],
191)
192
193def _action_retrieving_aspect_impl(target, ctx):
194    return [
195        _ActionInfo(
196            actions = target.actions,
197            bin_path = ctx.bin_dir.path,
198        ),
199    ]
200
201_action_retrieving_aspect = aspect(
202    attr_aspects = [],
203    implementation = _action_retrieving_aspect_impl,
204)
205
206# TODO(cparsons): Provide more full documentation on analysis testing in README.
207def _make_analysis_test(
208        impl,
209        expect_failure = False,
210        attrs = {},
211        fragments = [],
212        config_settings = {},
213        extra_target_under_test_aspects = [],
214        doc = ""):
215    """Creates an analysis test rule from its implementation function.
216
217    An analysis test verifies the behavior of a "real" rule target by examining
218    and asserting on the providers given by the real target.
219
220    Each analysis test is defined in an implementation function that must then be
221    associated with a rule so that a target can be built. This function handles
222    the boilerplate to create and return a test rule and captures the
223    implementation function's name so that it can be printed in test feedback.
224
225    An example of an analysis test:
226
227    ```
228    def _your_test(ctx):
229      env = analysistest.begin(ctx)
230
231      # Assert statements go here
232
233      return analysistest.end(env)
234
235    your_test = analysistest.make(_your_test)
236    ```
237
238    Recall that names of test rules must end in `_test`.
239
240    Args:
241      impl: The implementation function of the unit test.
242      expect_failure: If true, the analysis test will expect the target_under_test
243          to fail. Assertions can be made on the underlying failure using asserts.expect_failure
244      attrs: An optional dictionary to supplement the attrs passed to the
245          unit test's `rule()` constructor.
246      fragments: An optional list of fragment names that can be used to give rules access to
247          language-specific parts of configuration.
248      config_settings: A dictionary of configuration settings to change for the target under
249          test and its dependencies. This may be used to essentially change 'build flags' for
250          the target under test, and may thus be utilized to test multiple targets with different
251          flags in a single build
252      extra_target_under_test_aspects: An optional list of aspects to apply to the target_under_test
253          in addition to those set up by default for the test harness itself.
254      doc: A description of the rule that can be extracted by documentation generating tools.
255
256    Returns:
257      A rule definition that should be stored in a global whose name ends in
258      `_test`.
259    """
260    attrs = dict(attrs)
261    attrs["_impl_name"] = attr.string(default = _impl_function_name(impl))
262
263    changed_settings = dict(config_settings)
264    if expect_failure:
265        changed_settings["//command_line_option:allow_analysis_failures"] = "True"
266
267    target_attr_kwargs = {}
268    if changed_settings:
269        test_transition = analysis_test_transition(
270            settings = changed_settings,
271        )
272        target_attr_kwargs["cfg"] = test_transition
273
274    attrs["target_under_test"] = attr.label(
275        aspects = [_action_retrieving_aspect] + extra_target_under_test_aspects,
276        mandatory = True,
277        **target_attr_kwargs
278    )
279
280    return rule(
281        impl,
282        doc = doc,
283        attrs = attrs,
284        fragments = fragments,
285        test = True,
286        toolchains = [TOOLCHAIN_TYPE],
287        analysis_test = True,
288    )
289
290def _suite(name, *test_rules):
291    """Defines a `test_suite` target that contains multiple tests.
292
293    After defining your test rules in a `.bzl` file, you need to create targets
294    from those rules so that `blaze test` can execute them. Doing this manually
295    in a BUILD file would consist of listing each test in your `load` statement
296    and then creating each target one by one. To reduce duplication, we recommend
297    writing a macro in your `.bzl` file to instantiate all targets, and calling
298    that macro from your BUILD file so you only have to load one symbol.
299
300    You can use this function to create the targets and wrap them in a single
301    test_suite target. If a test rule requires no arguments, you can simply list
302    it as an argument. If you wish to supply attributes explicitly, you can do so
303    using `partial.make()`. For instance, in your `.bzl` file, you could write:
304
305    ```
306    def your_test_suite():
307      unittest.suite(
308          "your_test_suite",
309          your_test,
310          your_other_test,
311          partial.make(yet_another_test, timeout = "short"),
312      )
313    ```
314
315    Then, in your `BUILD` file, simply load the macro and invoke it to have all
316    of the targets created:
317
318    ```
319    load("//path/to/your/package:tests.bzl", "your_test_suite")
320    your_test_suite()
321    ```
322
323    If you pass _N_ unit test rules to `unittest.suite`, _N_ + 1 targets will be
324    created: a `test_suite` target named `${name}` (where `${name}` is the name
325    argument passed in here) and targets named `${name}_test_${i}`, where `${i}`
326    is the index of the test in the `test_rules` list, which is used to uniquely
327    name each target.
328
329    Args:
330      name: The name of the `test_suite` target, and the prefix of all the test
331          target names.
332      *test_rules: A list of test rules defines by `unittest.test`.
333    """
334    test_names = []
335    for index, test_rule in enumerate(test_rules):
336        test_name = "%s_test_%d" % (name, index)
337        if partial.is_instance(test_rule):
338            partial.call(test_rule, name = test_name)
339        else:
340            test_rule(name = test_name)
341        test_names.append(test_name)
342
343    native.test_suite(
344        name = name,
345        tests = [":%s" % t for t in test_names],
346    )
347
348def _begin(ctx):
349    """Begins a unit test.
350
351    This should be the first function called in a unit test implementation
352    function. It initializes a "test environment" that is used to collect
353    assertion failures so that they can be reported and logged at the end of the
354    test.
355
356    Args:
357      ctx: The Starlark context. Pass the implementation function's `ctx` argument
358          in verbatim.
359
360    Returns:
361      A test environment struct that must be passed to assertions and finally to
362      `unittest.end`. Do not rely on internal details about the fields in this
363      struct as it may change.
364    """
365    return struct(ctx = ctx, failures = [])
366
367def _end_analysis_test(env):
368    """Ends an analysis test and logs the results.
369
370    This must be called and returned at the end of an analysis test implementation function so
371    that the results are reported.
372
373    Args:
374      env: The test environment returned by `analysistest.begin`.
375
376    Returns:
377      A list of providers needed to automatically register the analysis test result.
378    """
379    return [AnalysisTestResultInfo(
380        success = (len(env.failures) == 0),
381        message = "\n".join(env.failures),
382    )]
383
384def _end(env):
385    """Ends a unit test and logs the results.
386
387    This must be called and returned at the end of a unit test implementation function so
388    that the results are reported.
389
390    Args:
391      env: The test environment returned by `unittest.begin`.
392
393    Returns:
394      A list of providers needed to automatically register the test result.
395    """
396
397    tc = env.ctx.toolchains[TOOLCHAIN_TYPE].unittest_toolchain_info
398    testbin = env.ctx.actions.declare_file(env.ctx.label.name + tc.file_ext)
399    if env.failures:
400        failure_message_lines = "\n".join(env.failures).split("\n")
401        escaped_failure_message_lines = [
402            "".join([
403                tc.escape_chars_with.get(c, tc.escape_other_chars_with) + c
404                for c in line.elems()
405            ])
406            for line in failure_message_lines
407        ]
408        cmd = tc.failure_templ % tc.join_on.join(escaped_failure_message_lines)
409    else:
410        cmd = tc.success_templ
411
412    env.ctx.actions.write(
413        output = testbin,
414        content = cmd,
415        is_executable = True,
416    )
417    return [DefaultInfo(executable = testbin)]
418
419def _fail(env, msg):
420    """Unconditionally causes the current test to fail.
421
422    Args:
423      env: The test environment returned by `unittest.begin`.
424      msg: The message to log describing the failure.
425    """
426    full_msg = "In test %s: %s" % (env.ctx.attr._impl_name, msg)
427
428    # There isn't a better way to output the message in Starlark, so use print.
429    # buildifier: disable=print
430    print(full_msg)
431    env.failures.append(full_msg)
432
433def _assert_true(
434        env,
435        condition,
436        msg = "Expected condition to be true, but was false."):
437    """Asserts that the given `condition` is true.
438
439    Args:
440      env: The test environment returned by `unittest.begin`.
441      condition: A value that will be evaluated in a Boolean context.
442      msg: An optional message that will be printed that describes the failure.
443          If omitted, a default will be used.
444    """
445    if not condition:
446        _fail(env, msg)
447
448def _assert_false(
449        env,
450        condition,
451        msg = "Expected condition to be false, but was true."):
452    """Asserts that the given `condition` is false.
453
454    Args:
455      env: The test environment returned by `unittest.begin`.
456      condition: A value that will be evaluated in a Boolean context.
457      msg: An optional message that will be printed that describes the failure.
458          If omitted, a default will be used.
459    """
460    if condition:
461        _fail(env, msg)
462
463def _assert_equals(env, expected, actual, msg = None):
464    """Asserts that the given `expected` and `actual` values are equal.
465
466    Args:
467      env: The test environment returned by `unittest.begin`.
468      expected: The expected value of some computation.
469      actual: The actual value returned by some computation.
470      msg: An optional message that will be printed that describes the failure.
471          If omitted, a default will be used.
472    """
473    if expected != actual:
474        expectation_msg = 'Expected "%s", but got "%s"' % (expected, actual)
475        if msg:
476            full_msg = "%s (%s)" % (msg, expectation_msg)
477        else:
478            full_msg = expectation_msg
479        _fail(env, full_msg)
480
481def _assert_set_equals(env, expected, actual, msg = None):
482    """Asserts that the given `expected` and `actual` sets are equal.
483
484    Args:
485      env: The test environment returned by `unittest.begin`.
486      expected: The expected set resulting from some computation.
487      actual: The actual set returned by some computation.
488      msg: An optional message that will be printed that describes the failure.
489          If omitted, a default will be used.
490    """
491    if not new_sets.is_equal(expected, actual):
492        missing = new_sets.difference(expected, actual)
493        unexpected = new_sets.difference(actual, expected)
494        expectation_msg = "Expected %s, but got %s" % (new_sets.str(expected), new_sets.str(actual))
495        if new_sets.length(missing) > 0:
496            expectation_msg += ", missing are %s" % (new_sets.str(missing))
497        if new_sets.length(unexpected) > 0:
498            expectation_msg += ", unexpected are %s" % (new_sets.str(unexpected))
499        if msg:
500            full_msg = "%s (%s)" % (msg, expectation_msg)
501        else:
502            full_msg = expectation_msg
503        _fail(env, full_msg)
504
505_assert_new_set_equals = _assert_set_equals
506
507def _expect_failure(env, expected_failure_msg = ""):
508    """Asserts that the target under test has failed with a given error message.
509
510    This requires that the analysis test is created with `analysistest.make()` and
511    `expect_failures = True` is specified.
512
513    Args:
514      env: The test environment returned by `analysistest.begin`.
515      expected_failure_msg: The error message to expect as a result of analysis failures.
516    """
517    dep = _target_under_test(env)
518    if AnalysisFailureInfo in dep:
519        actual_errors = ""
520        for cause in dep[AnalysisFailureInfo].causes.to_list():
521            actual_errors += cause.message + "\n"
522        if actual_errors.find(expected_failure_msg) < 0:
523            expectation_msg = "Expected errors to contain '%s' but did not. " % expected_failure_msg
524            expectation_msg += "Actual errors:%s" % actual_errors
525            _fail(env, expectation_msg)
526    else:
527        _fail(env, "Expected failure of target_under_test, but found success")
528
529def _target_actions(env):
530    """Returns a list of actions registered by the target under test.
531
532    Args:
533      env: The test environment returned by `analysistest.begin`.
534
535    Returns:
536      A list of actions registered by the target under test
537    """
538
539    # Validate?
540    return _target_under_test(env)[_ActionInfo].actions
541
542def _target_bin_dir_path(env):
543    """Returns ctx.bin_dir.path for the target under test.
544
545    Args:
546      env: The test environment returned by `analysistest.begin`.
547
548    Returns:
549      Output bin dir path string.
550    """
551    return _target_under_test(env)[_ActionInfo].bin_path
552
553def _target_under_test(env):
554    """Returns the target under test.
555
556    Args:
557      env: The test environment returned by `analysistest.begin`.
558
559    Returns:
560      The target under test.
561    """
562    result = getattr(env.ctx.attr, "target_under_test")
563    if types.is_list(result):
564        if result:
565            return result[0]
566        else:
567            fail("test rule does not have a target_under_test")
568    return result
569
570def _loading_test_impl(ctx):
571    tc = ctx.toolchains[TOOLCHAIN_TYPE].unittest_toolchain_info
572    content = tc.success_templ
573    if ctx.attr.failure_message:
574        content = tc.failure_templ % ctx.attr.failure_message
575
576    testbin = ctx.actions.declare_file("loading_test_" + ctx.label.name + tc.file_ext)
577    ctx.actions.write(
578        output = testbin,
579        content = content,
580        is_executable = True,
581    )
582    return [DefaultInfo(executable = testbin)]
583
584_loading_test = rule(
585    implementation = _loading_test_impl,
586    attrs = {
587        "failure_message": attr.string(),
588    },
589    toolchains = [TOOLCHAIN_TYPE],
590    test = True,
591)
592
593def _loading_make(name):
594    """Creates a loading phase test environment and test_suite.
595
596    Args:
597       name: name of the suite of tests to create
598
599    Returns:
600       loading phase environment passed to other loadingtest functions
601    """
602    native.test_suite(
603        name = name + "_tests",
604        tags = [name + "_test_case"],
605    )
606    return struct(name = name)
607
608def _loading_assert_equals(env, test_case, expected, actual):
609    """Creates a test case for asserting state at LOADING phase.
610
611    Args:
612      env:       Loading test env created from loadingtest.make
613      test_case: Name of the test case
614      expected:  Expected value to test
615      actual:    Actual value received.
616
617    Returns:
618      None, creates test case
619    """
620
621    msg = None
622    if expected != actual:
623        msg = 'Expected "%s", but got "%s"' % (expected, actual)
624
625    _loading_test(
626        name = "%s_%s" % (env.name, test_case),
627        failure_message = msg,
628        tags = [env.name + "_test_case"],
629    )
630
631asserts = struct(
632    expect_failure = _expect_failure,
633    equals = _assert_equals,
634    false = _assert_false,
635    set_equals = _assert_set_equals,
636    new_set_equals = _assert_new_set_equals,
637    true = _assert_true,
638)
639
640unittest = struct(
641    make = _make,
642    suite = _suite,
643    begin = _begin,
644    end = _end,
645    fail = _fail,
646)
647
648analysistest = struct(
649    make = _make_analysis_test,
650    begin = _begin,
651    end = _end_analysis_test,
652    fail = _fail,
653    target_actions = _target_actions,
654    target_bin_dir_path = _target_bin_dir_path,
655    target_under_test = _target_under_test,
656)
657
658loadingtest = struct(
659    make = _loading_make,
660    equals = _loading_assert_equals,
661)
662