• Home
  • Raw
  • Download

Lines Matching full:torch

11 import torch
12 import torch._dynamo.config as config
13 import torch._dynamo.test_case
14 import torch._functorch.deprecated as deprecated_func
15 from torch._dynamo.trace_rules import (
23 from torch._dynamo.utils import hashable, is_safe_constant, istype
24 from torch._dynamo.variables import TorchInGraphFunctionVariable, UserFunctionVariable
25 from torch.testing._internal.common_utils import skipIfWindows
36 "torch._nested_tensor_from_mask",
37 "torch._nested_from_padded",
38 "torch.sparse_compressed_tensor",
39 "torch.sparse_bsc_tensor",
40 "torch.sparse_bsr_tensor",
41 "torch.sparse_coo_tensor",
42 "torch.sparse_csc_tensor",
43 "torch.sparse_csr_tensor",
44 "torch.cuda._get_device_properties",
46 "torch._functionalize_are_all_mutations_under_no_grad_or_inference_mode",
47 "torch._cslt_sparse_mm_search",
48 "torch._C._abort",
49 "torch._C._mps_is_on_macos_or_newer",
50 "torch._C._swap_tensor_impl",
51 "torch._C._unsafe_reset_storage",
52 "torch._dynamo.eval_frame.reset_code",
53 "torch._C.autocast_decrement_nesting",
54 "torch._C.autocast_increment_nesting",
55 "torch._C.clear_autocast_cache",
56 "torch._C.set_anomaly_enabled",
57 "torch._C.set_autocast_cache_enabled",
58 "torch._C.set_autocast_cpu_dtype",
59 "torch._C.set_autocast_cpu_enabled",
60 "torch._C.set_autocast_enabled",
61 "torch._C.set_autocast_gpu_dtype",
62 "torch._C.set_autocast_ipu_dtype",
63 "torch._C.set_autocast_ipu_enabled",
64 "torch._C.set_autocast_xla_dtype",
65 "torch._C.set_autocast_xla_enabled",
66 "torch.resize_as_",
67 "torch.resize_as_sparse_",
68 "torch._C._data_address",
69 "torch._C._is_cow_tensor",
70 "torch._lazy_clone",
71 "torch._test_parallel_materialize",
72 "torch._C._storage_address",
73 "torch._C._pickle_save",
74 "torch._validate_sparse_compressed_tensor_args",
75 "torch._validate_sparse_csr_tensor_args",
76 "torch._validate_sparse_bsr_tensor_args",
77 "torch._validate_sparse_csc_tensor_args",
78 "torch._validate_sparse_coo_tensor_args",
79 "torch._validate_sparse_bsc_tensor_args",
80 "torch._validate_compressed_sparse_indices",
82 if torch._C._llvm_enabled():
84 "torch._C._te.set_llvm_aot_workflow",
85 "torch._C._te.set_llvm_target_cpu",
86 "torch._C._te.set_llvm_target_attrs",
87 "torch._C._te.set_llvm_target_triple",
91 # Helper function to dump the torch name rule map generated based on
114 Walk torch.* and get the ids of all the stuff in it
117 warnings.filterwarnings("ignore", category=UserWarning, module="torch.distributed")
127 torch._C._cuda_isCurrentStreamCapturing,
128 torch._C._graph_pool_handle,
132 # if it's a torch function or method.
162 allowed_modules = ("torch", "math")
163 # torch.nn.modules.rnn is disallowed because these modules internally
169 "torch.optim.",
170 "torch.nn.modules.rnn.",
171 "torch._dynamo.",
172 "torch._C._dynamo.",
173 "torch._inductor.",
174 "torch._C.inductor.",
175 "torch.fx.",
176 "torch._C._autograd",
177 "torch._C._cudart",
178 "torch._C._distributed_autograd",
179 "torch._C._distributed_c10d",
180 "torch._C._distributed_rpc",
181 "torch._C._functorch",
182 "torch._C._monitor",
183 "torch._C._nvtx",
184 "torch._C._lazy",
185 "torch._C._profiler",
186 "torch.__config__",
187 "torch._custom_op",
188 "torch._decomp",
189 "torch._dispatch",
190 "torch._export",
191 "torch._functorch.make_functional",
192 "torch._functorch.compile_utils",
193 "torch._functorch.partitioners",
194 "torch._functorch.aot_autograd",
195 "torch._functorch.compilers",
196 "torch._functorch.fx_minifier",
197 "torch.autograd.profiler_util",
198 "torch.autograd.profiler",
199 "torch._jit_internal",
200 "torch._library",
201 "torch._lobpcg",
202 "torch._logging",
203 "torch._meta_registrations",
204 "torch._namedtensor_internals",
205 "torch._numpy",
206 "torch._sources",
207 "torch._subclasses",
208 "torch._tensor",
209 "torch._tensor_str",
210 "torch._utils",
211 "torch._utils_internal",
212 "torch._vmap_internals",
213 "torch.compiler",
214 "torch.distributed",
215 "torch.export",
216 "torch.hub",
217 "torch.jit",
218 "torch.library",
219 "torch.masked.maskedtensor",
220 "torch.nn.init",
221 "torch.nn.modules.module",
222 "torch.nn.parallel",
223 "torch.nn.utils",
224 "torch.multiprocessing",
225 "torch.onnx",
226 "torch.overrides",
227 "torch.package",
228 "torch.profiler",
229 "torch.serialization",
230 "torch.storage",
231 "torch.utils",
232 "torch.distributed.",
262 import torch._ops
264 if isinstance(obj, torch._ops.HigherOrderOperator):
269 torch.func.grad,
271 torch.func.vmap,
273 torch.nn.functional.triplet_margin_with_distance_loss,
274 torch.cond,
279 if obj.__name__.startswith("torch.") and _is_allowed_module_prefix(
293 _find_torch_objects(torch)
304 class TraceRuleTests(torch._dynamo.test_case.TestCase):
309 f"New torch objects: {x} "
311 "Refer the instruction in `torch/_dynamo/trace_rules.py` for more details."
314 f"Existing torch objects: {y} were removed. "
316 "Refer the instruction in `torch/_dynamo/trace_rules.py` for more details."
368 # `torch._dynamo.utils.istype` is skipped by default
370 if istype(x, torch.Tensor):
376 # Force inline `torch._dynamo.utils.istype` by setting trace rule.
377 _manual_torch_name_rule_map["torch._dynamo.utils.istype"] = UserFunctionVariable
386 "torch._dynamo" not in torch._dynamo.trace_rules.LEGACY_MOD_INLINELIST
388 self.assertTrue("torch._dynamo" not in torch._dynamo.trace_rules.MOD_INLINELIST)
391 "torch._dynamo.trace_rules.torch_name_rule_map",
394 "torch._dynamo.trace_rules.get_torch_obj_rule_map",
395torch._dynamo.trace_rules.get_torch_obj_rule_map.__wrapped__, # bypass functools.lru_cache
397 x = torch.rand(3)
398 opt_fn = torch.compile(backend="eager", fullgraph=True)(fn)
422 "torch._dynamo.trace_rules.torch_name_rule_map",
425 "torch._dynamo.trace_rules.get_torch_obj_rule_map",
426 torch._dynamo.trace_rules.get_torch_obj_rule_map.__wrapped__,
429 torch._dynamo.trace_rules.add(mod.__name__)
430 x = torch.rand(3)
431 opt_fn = torch.compile(backend="eager", fullgraph=True)(fn)
437 class TestModuleSurviveSkipFiles(torch._dynamo.test_case.TestCase):
439 not torch.distributed.is_available(),
446 from torch.testing._internal.common_fsdp import MLP
449 inp = torch.randn((2, 3))
450 frame_count_before = torch._dynamo.convert_frame.FRAME_COUNTER
453 frame_count_after = torch._dynamo.convert_frame.FRAME_COUNTER
460 from torch._dynamo.test_case import run_tests