• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1"""
2:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list
3of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the
4optimizer locally on the workers where the parameters live.  The distributed
5optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
6apply the gradients on each worker.
7"""
8import warnings
9
10import torch
11from torch import optim
12
13from .apply_optimizer_in_backward import (
14    _apply_optimizer_in_backward,
15    _get_in_backward_optimizers,
16)
17from .functional_adadelta import _FunctionalAdadelta
18from .functional_adagrad import _FunctionalAdagrad
19from .functional_adam import _FunctionalAdam
20from .functional_adamax import _FunctionalAdamax
21from .functional_adamw import _FunctionalAdamW
22from .functional_rmsprop import _FunctionalRMSprop
23from .functional_rprop import _FunctionalRprop
24from .functional_sgd import _FunctionalSGD
25from .named_optimizer import _NamedOptimizer
26from .utils import as_functional_optim
27
28
29with warnings.catch_warnings():
30    warnings.simplefilter("always")
31    warnings.warn(
32        "`TorchScript` support for functional optimizers is deprecated "
33        "and will be removed in a future PyTorch release. "
34        "Consider using the `torch.compile` optimizer instead.",
35        DeprecationWarning,
36        stacklevel=2,
37    )
38
39# DistributedOptimizer imports torch.distributed.rpc names, so gate availability
40# based on RPC being available.
41if hasattr(torch._C, "_rpc_init"):
42    from .optimizer import DistributedOptimizer
43
44from .post_localSGD_optimizer import PostLocalSGDOptimizer
45from .zero_redundancy_optimizer import ZeroRedundancyOptimizer
46
47
48__all__ = [
49    "as_functional_optim",
50    "DistributedOptimizer",
51    "PostLocalSGDOptimizer",
52    "ZeroRedundancyOptimizer",
53]
54