• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/env python3
2# Owner(s): ["oncall: distributed"]
3
4import sys
5
6import torch
7import torch.distributed as dist
8
9
10if not dist.is_available():
11    print("Distributed not available, skipping tests", file=sys.stderr)
12    sys.exit(0)
13
14from torch.testing._internal.common_utils import IS_CI, run_tests
15from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture import (
16    TensorPipeRpcAgentTestFixture,
17)
18from torch.testing._internal.distributed.rpc_utils import (
19    generate_tests,
20    GENERIC_TESTS,
21    TENSORPIPE_TESTS,
22)
23
24
25# On CircleCI these tests are already run on CPU jobs, thus to save resources do
26# not run them on GPU jobs, since thet wouldn't provide additional test signal.
27if not (IS_CI and torch.cuda.is_available()):
28    globals().update(
29        generate_tests(
30            "TensorPipe",
31            TensorPipeRpcAgentTestFixture,
32            GENERIC_TESTS + TENSORPIPE_TESTS,
33            __name__,
34        )
35    )
36
37
38if __name__ == "__main__":
39    run_tests()
40