• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2017 The Bazel Authors. All rights reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#    http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""Unit testing support.
16
17Unlike most Skylib files, this exports two modules: `unittest` which contains
18functions to declare and define unit tests, and `asserts` which contains the
19assertions used to within tests.
20"""
21
22load(":new_sets.bzl", new_sets = "sets")
23load(":types.bzl", "types")
24
25# The following function should only be called from WORKSPACE files and workspace macros.
26# buildifier: disable=unnamed-macro
27def register_unittest_toolchains():
28    """Registers the toolchains for unittest users."""
29    native.register_toolchains(
30        "@bazel_skylib//toolchains/unittest:cmd_toolchain",
31        "@bazel_skylib//toolchains/unittest:bash_toolchain",
32    )
33
34TOOLCHAIN_TYPE = "@bazel_skylib//toolchains/unittest:toolchain_type"
35
36_UnittestToolchainInfo = provider(
37    doc = "Execution platform information for rules in the bazel_skylib repository.",
38    fields = ["file_ext", "success_templ", "failure_templ", "join_on"],
39)
40
41def _unittest_toolchain_impl(ctx):
42    return [
43        platform_common.ToolchainInfo(
44            unittest_toolchain_info = _UnittestToolchainInfo(
45                file_ext = ctx.attr.file_ext,
46                success_templ = ctx.attr.success_templ,
47                failure_templ = ctx.attr.failure_templ,
48                join_on = ctx.attr.join_on,
49            ),
50        ),
51    ]
52
53unittest_toolchain = rule(
54    implementation = _unittest_toolchain_impl,
55    attrs = {
56        "failure_templ": attr.string(mandatory = True),
57        "file_ext": attr.string(mandatory = True),
58        "join_on": attr.string(mandatory = True),
59        "success_templ": attr.string(mandatory = True),
60    },
61)
62
63def _impl_function_name(impl):
64    """Derives the name of the given rule implementation function.
65
66    This can be used for better test feedback.
67
68    Args:
69      impl: the rule implementation function
70
71    Returns:
72      The name of the given function
73    """
74
75    # Starlark currently stringifies a function as "<function NAME>", so we use
76    # that knowledge to parse the "NAME" portion out. If this behavior ever
77    # changes, we'll need to update this.
78    # TODO(bazel-team): Expose a ._name field on functions to avoid this.
79    impl_name = str(impl)
80    impl_name = impl_name.partition("<function ")[-1]
81    return impl_name.rpartition(">")[0]
82
83def _make(impl, attrs = {}):
84    """Creates a unit test rule from its implementation function.
85
86    Each unit test is defined in an implementation function that must then be
87    associated with a rule so that a target can be built. This function handles
88    the boilerplate to create and return a test rule and captures the
89    implementation function's name so that it can be printed in test feedback.
90
91    The optional `attrs` argument can be used to define dependencies for this
92    test, in order to form unit tests of rules.
93
94    An example of a unit test:
95
96    ```
97    def _your_test(ctx):
98      env = unittest.begin(ctx)
99
100      # Assert statements go here
101
102      return unittest.end(env)
103
104    your_test = unittest.make(_your_test)
105    ```
106
107    Recall that names of test rules must end in `_test`.
108
109    Args:
110      impl: The implementation function of the unit test.
111      attrs: An optional dictionary to supplement the attrs passed to the
112          unit test's `rule()` constructor.
113
114    Returns:
115      A rule definition that should be stored in a global whose name ends in
116      `_test`.
117    """
118    attrs = dict(attrs)
119    attrs["_impl_name"] = attr.string(default = _impl_function_name(impl))
120
121    return rule(
122        impl,
123        attrs = attrs,
124        _skylark_testable = True,
125        test = True,
126        toolchains = [TOOLCHAIN_TYPE],
127    )
128
129_ActionInfo = provider(fields = ["actions", "bin_path"])
130
131def _action_retrieving_aspect_impl(target, ctx):
132    return [
133        _ActionInfo(
134            actions = target.actions,
135            bin_path = ctx.bin_dir.path,
136        ),
137    ]
138
139_action_retrieving_aspect = aspect(
140    attr_aspects = [],
141    implementation = _action_retrieving_aspect_impl,
142)
143
144# TODO(cparsons): Provide more full documentation on analysis testing in README.
145def _make_analysis_test(
146        impl,
147        expect_failure = False,
148        attrs = {},
149        fragments = [],
150        config_settings = {}):
151    """Creates an analysis test rule from its implementation function.
152
153    An analysis test verifies the behavior of a "real" rule target by examining
154    and asserting on the providers given by the real target.
155
156    Each analysis test is defined in an implementation function that must then be
157    associated with a rule so that a target can be built. This function handles
158    the boilerplate to create and return a test rule and captures the
159    implementation function's name so that it can be printed in test feedback.
160
161    An example of an analysis test:
162
163    ```
164    def _your_test(ctx):
165      env = analysistest.begin(ctx)
166
167      # Assert statements go here
168
169      return analysistest.end(env)
170
171    your_test = analysistest.make(_your_test)
172    ```
173
174    Recall that names of test rules must end in `_test`.
175
176    Args:
177      impl: The implementation function of the unit test.
178      expect_failure: If true, the analysis test will expect the target_under_test
179          to fail. Assertions can be made on the underlying failure using asserts.expect_failure
180      attrs: An optional dictionary to supplement the attrs passed to the
181          unit test's `rule()` constructor.
182      fragments: An optional list of fragment names that can be used to give rules access to
183          language-specific parts of configuration.
184      config_settings: A dictionary of configuration settings to change for the target under
185          test and its dependencies. This may be used to essentially change 'build flags' for
186          the target under test, and may thus be utilized to test multiple targets with different
187          flags in a single build
188
189    Returns:
190      A rule definition that should be stored in a global whose name ends in
191      `_test`.
192    """
193    attrs = dict(attrs)
194    attrs["_impl_name"] = attr.string(default = _impl_function_name(impl))
195
196    changed_settings = dict(config_settings)
197    if expect_failure:
198        changed_settings["//command_line_option:allow_analysis_failures"] = "True"
199
200    target_attr_kwargs = {}
201    if changed_settings:
202        test_transition = analysis_test_transition(
203            settings = changed_settings,
204        )
205        target_attr_kwargs["cfg"] = test_transition
206
207    attrs["target_under_test"] = attr.label(
208        aspects = [_action_retrieving_aspect],
209        mandatory = True,
210        **target_attr_kwargs
211    )
212
213    return rule(
214        impl,
215        attrs = attrs,
216        fragments = fragments,
217        test = True,
218        toolchains = [TOOLCHAIN_TYPE],
219        analysis_test = True,
220    )
221
222def _suite(name, *test_rules):
223    """Defines a `test_suite` target that contains multiple tests.
224
225    After defining your test rules in a `.bzl` file, you need to create targets
226    from those rules so that `blaze test` can execute them. Doing this manually
227    in a BUILD file would consist of listing each test in your `load` statement
228    and then creating each target one by one. To reduce duplication, we recommend
229    writing a macro in your `.bzl` file to instantiate all targets, and calling
230    that macro from your BUILD file so you only have to load one symbol.
231
232    For the case where your unit tests do not take any (non-default) attributes --
233    i.e., if your unit tests do not test rules -- you can use this function to
234    create the targets and wrap them in a single test_suite target. In your
235    `.bzl` file, write:
236
237    ```
238    def your_test_suite():
239      unittest.suite(
240          "your_test_suite",
241          your_test,
242          your_other_test,
243          yet_another_test,
244      )
245    ```
246
247    Then, in your `BUILD` file, simply load the macro and invoke it to have all
248    of the targets created:
249
250    ```
251    load("//path/to/your/package:tests.bzl", "your_test_suite")
252    your_test_suite()
253    ```
254
255    If you pass _N_ unit test rules to `unittest.suite`, _N_ + 1 targets will be
256    created: a `test_suite` target named `${name}` (where `${name}` is the name
257    argument passed in here) and targets named `${name}_test_${i}`, where `${i}`
258    is the index of the test in the `test_rules` list, which is used to uniquely
259    name each target.
260
261    Args:
262      name: The name of the `test_suite` target, and the prefix of all the test
263          target names.
264      *test_rules: A list of test rules defines by `unittest.test`.
265    """
266    test_names = []
267    for index, test_rule in enumerate(test_rules):
268        test_name = "%s_test_%d" % (name, index)
269        test_rule(name = test_name)
270        test_names.append(test_name)
271
272    native.test_suite(
273        name = name,
274        tests = [":%s" % t for t in test_names],
275    )
276
277def _begin(ctx):
278    """Begins a unit test.
279
280    This should be the first function called in a unit test implementation
281    function. It initializes a "test environment" that is used to collect
282    assertion failures so that they can be reported and logged at the end of the
283    test.
284
285    Args:
286      ctx: The Starlark context. Pass the implementation function's `ctx` argument
287          in verbatim.
288
289    Returns:
290      A test environment struct that must be passed to assertions and finally to
291      `unittest.end`. Do not rely on internal details about the fields in this
292      struct as it may change.
293    """
294    return struct(ctx = ctx, failures = [])
295
296def _end_analysis_test(env):
297    """Ends an analysis test and logs the results.
298
299    This must be called and returned at the end of an analysis test implementation function so
300    that the results are reported.
301
302    Args:
303      env: The test environment returned by `analysistest.begin`.
304
305    Returns:
306      A list of providers needed to automatically register the analysis test result.
307    """
308    return [AnalysisTestResultInfo(
309        success = (len(env.failures) == 0),
310        message = "\n".join(env.failures),
311    )]
312
313def _end(env):
314    """Ends a unit test and logs the results.
315
316    This must be called and returned at the end of a unit test implementation function so
317    that the results are reported.
318
319    Args:
320      env: The test environment returned by `unittest.begin`.
321
322    Returns:
323      A list of providers needed to automatically register the test result.
324    """
325
326    tc = env.ctx.toolchains[TOOLCHAIN_TYPE].unittest_toolchain_info
327    testbin = env.ctx.actions.declare_file(env.ctx.label.name + tc.file_ext)
328    if env.failures:
329        cmd = tc.failure_templ % tc.join_on.join(env.failures)
330    else:
331        cmd = tc.success_templ
332
333    env.ctx.actions.write(
334        output = testbin,
335        content = cmd,
336        is_executable = True,
337    )
338    return [DefaultInfo(executable = testbin)]
339
340def _fail(env, msg):
341    """Unconditionally causes the current test to fail.
342
343    Args:
344      env: The test environment returned by `unittest.begin`.
345      msg: The message to log describing the failure.
346    """
347    full_msg = "In test %s: %s" % (env.ctx.attr._impl_name, msg)
348
349    # There isn't a better way to output the message in Starlark, so use print.
350    # buildifier: disable=print
351    print(full_msg)
352    env.failures.append(full_msg)
353
354def _assert_true(
355        env,
356        condition,
357        msg = "Expected condition to be true, but was false."):
358    """Asserts that the given `condition` is true.
359
360    Args:
361      env: The test environment returned by `unittest.begin`.
362      condition: A value that will be evaluated in a Boolean context.
363      msg: An optional message that will be printed that describes the failure.
364          If omitted, a default will be used.
365    """
366    if not condition:
367        _fail(env, msg)
368
369def _assert_false(
370        env,
371        condition,
372        msg = "Expected condition to be false, but was true."):
373    """Asserts that the given `condition` is false.
374
375    Args:
376      env: The test environment returned by `unittest.begin`.
377      condition: A value that will be evaluated in a Boolean context.
378      msg: An optional message that will be printed that describes the failure.
379          If omitted, a default will be used.
380    """
381    if condition:
382        _fail(env, msg)
383
384def _assert_equals(env, expected, actual, msg = None):
385    """Asserts that the given `expected` and `actual` values are equal.
386
387    Args:
388      env: The test environment returned by `unittest.begin`.
389      expected: The expected value of some computation.
390      actual: The actual value returned by some computation.
391      msg: An optional message that will be printed that describes the failure.
392          If omitted, a default will be used.
393    """
394    if expected != actual:
395        expectation_msg = 'Expected "%s", but got "%s"' % (expected, actual)
396        if msg:
397            full_msg = "%s (%s)" % (msg, expectation_msg)
398        else:
399            full_msg = expectation_msg
400        _fail(env, full_msg)
401
402def _assert_set_equals(env, expected, actual, msg = None):
403    """Asserts that the given `expected` and `actual` sets are equal.
404
405    Args:
406      env: The test environment returned by `unittest.begin`.
407      expected: The expected set resulting from some computation.
408      actual: The actual set returned by some computation.
409      msg: An optional message that will be printed that describes the failure.
410          If omitted, a default will be used.
411    """
412    if not new_sets.is_equal(expected, actual):
413        missing = new_sets.difference(expected, actual)
414        unexpected = new_sets.difference(actual, expected)
415        expectation_msg = "Expected %s, but got %s" % (new_sets.str(expected), new_sets.str(actual))
416        if new_sets.length(missing) > 0:
417            expectation_msg += ", missing are %s" % (new_sets.str(missing))
418        if new_sets.length(unexpected) > 0:
419            expectation_msg += ", unexpected are %s" % (new_sets.str(unexpected))
420        if msg:
421            full_msg = "%s (%s)" % (msg, expectation_msg)
422        else:
423            full_msg = expectation_msg
424        _fail(env, full_msg)
425
426_assert_new_set_equals = _assert_set_equals
427
428def _expect_failure(env, expected_failure_msg = ""):
429    """Asserts that the target under test has failed with a given error message.
430
431    This requires that the analysis test is created with `analysistest.make()` and
432    `expect_failures = True` is specified.
433
434    Args:
435      env: The test environment returned by `analysistest.begin`.
436      expected_failure_msg: The error message to expect as a result of analysis failures.
437    """
438    dep = _target_under_test(env)
439    if AnalysisFailureInfo in dep:
440        actual_errors = ""
441        for cause in dep[AnalysisFailureInfo].causes.to_list():
442            actual_errors += cause.message + "\n"
443        if actual_errors.find(expected_failure_msg) < 0:
444            expectation_msg = "Expected errors to contain '%s' but did not. " % expected_failure_msg
445            expectation_msg += "Actual errors:%s" % actual_errors
446            _fail(env, expectation_msg)
447    else:
448        _fail(env, "Expected failure of target_under_test, but found success")
449
450def _target_actions(env):
451    """Returns a list of actions registered by the target under test.
452
453    Args:
454      env: The test environment returned by `analysistest.begin`.
455
456    Returns:
457      A list of actions registered by the target under test
458    """
459
460    # Validate?
461    return _target_under_test(env)[_ActionInfo].actions
462
463def _target_bin_dir_path(env):
464    """Returns ctx.bin_dir.path for the target under test.
465
466    Args:
467      env: The test environment returned by `analysistest.begin`.
468
469    Returns:
470      Output bin dir path string.
471    """
472    return _target_under_test(env)[_ActionInfo].bin_path
473
474def _target_under_test(env):
475    """Returns the target under test.
476
477    Args:
478      env: The test environment returned by `analysistest.begin`.
479
480    Returns:
481      The target under test.
482    """
483    result = getattr(env.ctx.attr, "target_under_test")
484    if types.is_list(result):
485        if result:
486            return result[0]
487        else:
488            fail("test rule does not have a target_under_test")
489    return result
490
491asserts = struct(
492    expect_failure = _expect_failure,
493    equals = _assert_equals,
494    false = _assert_false,
495    set_equals = _assert_set_equals,
496    new_set_equals = _assert_new_set_equals,
497    true = _assert_true,
498)
499
500unittest = struct(
501    make = _make,
502    suite = _suite,
503    begin = _begin,
504    end = _end,
505    fail = _fail,
506)
507
508analysistest = struct(
509    make = _make_analysis_test,
510    begin = _begin,
511    end = _end_analysis_test,
512    fail = _fail,
513    target_actions = _target_actions,
514    target_bin_dir_path = _target_bin_dir_path,
515    target_under_test = _target_under_test,
516)
517