| /external/pytorch/torch/_numpy/ |
| D | _casting_dicts.py | 3 import torch 11 torch.float16: { 12 torch.float16: True, 13 torch.float32: False, 14 torch.float64: False, 15 torch.complex64: False, 16 torch.complex128: False, 17 torch.uint8: False, 18 torch.uint16: False, 19 torch.uint32: False, [all …]
|
| /external/pytorch/torch/_dynamo/ |
| D | trace_rules.py | 38 import torch 39 import torch._inductor.test_operators 40 import torch.distributed 41 import torch.utils._content_store 42 from torch.utils import _config_module 93 * PyTorch(torch) is in the BUILTIN_SKIPLIST by default, but there are many cases 94 where we want inline the functions under torch namespace. 118 - torch.add: should be put into the FX graph. 119 - torch.is_floating_point: constant folded. 123 For developers: If you add/remove a torch level API, it may trigger failures from [all …]
|
| /external/pytorch/functorch/dim/ |
| D | op_properties.py | 6 import torch 62 *(getattr(torch.Tensor, m) for m in pointwise_methods), 63 torch.nn.functional.dropout, 64 torch.where, 65 torch.Tensor.abs, 66 torch.abs, 67 torch.Tensor.acos, 68 torch.acos, 69 torch.Tensor.acosh, 70 torch.acosh, [all …]
|
| /external/pytorch/test/mobile/model_test/ |
| D | math_ops.py | 1 # https://pytorch.org/docs/stable/torch.html#math-operations 5 import torch 8 class PointwiseOpsModule(torch.nn.Module): 13 a = torch.randn(4) 14 b = torch.randn(4) 15 t = torch.tensor([-1, -2, 3], dtype=torch.int8) 16 r = torch.tensor([0, 1, 10, 0], dtype=torch.int8) 17 t = torch.tensor([-1, -2, 3], dtype=torch.int8) 18 s = torch.tensor([4, 0, 1, 0], dtype=torch.int8) 19 f = torch.zeros(3) [all …]
|
| D | tensor_ops.py | 1 import torch 4 class TensorOpsModule(torch.nn.Module): 9 a = torch.randn(4) 10 b = torch.tensor([1.5]) 11 x = torch.ones((2,)) 12 c = torch.randn(4, dtype=torch.cfloat) 13 w = torch.rand(4, 4, 4, 4) 14 v = torch.rand(4, 4, 4, 4) 16 # torch.is_tensor(a), 17 # torch.is_storage(a), [all …]
|
| /external/pytorch/test/typing/pass/ |
| D | math_ops.py | 4 import torch 7 a = torch.randn(4) 8 b = torch.randn(4) 9 t = torch.tensor([-1, -2, 3], dtype=torch.int8) 12 torch.abs(torch.tensor([-1, -2, 3])) 13 torch.absolute(torch.tensor([-1, -2, 3])) 16 torch.acos(a) 17 torch.arccos(a) 20 torch.acosh(a.uniform_(1, 2)) 23 torch.add(a, 20) [all …]
|
| D | creation_ops.py | 5 import torch 6 from torch.testing._internal.common_utils import TEST_NUMPY 15 # torch.tensor() 16 torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]) 17 torch.tensor([0, 1]) 18 torch.tensor( 19 [[0.11111, 0.222222, 0.3333333]], dtype=torch.float64, device=torch.device("cuda:0") 21 torch.tensor(3.14159) 23 # torch.sparse_coo_tensor 24 i = torch.tensor([[0, 1, 1], [2, 0, 2]]) [all …]
|
| /external/pytorch/test/cpp/lazy/ |
| D | test_lazy_ops.cpp | 5 #include <torch/csrc/lazy/core/debug_util.h> 6 #include <torch/csrc/lazy/core/helpers.h> 7 #include <torch/csrc/lazy/core/ir_builder.h> 8 #include <torch/csrc/lazy/core/lazy_graph_executor.h> 9 #include <torch/csrc/lazy/core/metrics.h> 10 #include <torch/csrc/lazy/core/permutation_util.h> 11 #include <torch/csrc/lazy/ts_backend/dynamic_ir.h> 12 #include <torch/csrc/lazy/ts_backend/ts_backend_impl.h> 13 #include <torch/torch.h> 16 namespace torch { namespace [all …]
|
| /external/pytorch/test/cpp/api/ |
| D | optim_baseline.h | 3 #include <torch/types.h> 9 inline std::vector<std::vector<torch::Tensor>> LBFGS() { in LBFGS() 12 torch::tensor( in LBFGS() 19 torch::tensor( in LBFGS() 21 torch::tensor( in LBFGS() 23 torch::tensor({-0.43108206822505857}), in LBFGS() 26 torch::tensor( in LBFGS() 33 torch::tensor( in LBFGS() 35 torch::tensor( in LBFGS() 37 torch::tensor({-4.776742087865583}), in LBFGS() [all …]
|
| /external/executorch/backends/qualcomm/quantizer/ |
| D | qconfig.py | 4 import torch 5 from torch import Tensor 6 from torch.ao.quantization.fake_quantize import ( 10 from torch.ao.quantization.observer import ( 16 from torch.ao.quantization.quantizer import DerivedQuantizationSpec, QuantizationSpec 17 from torch.fx import Node 39 (broadcast_act_scale, broadcast_weight_scale) = torch.broadcast_tensors( 42 derived_scale = (broadcast_act_scale * broadcast_weight_scale).to(torch.float32) 43 derived_zero = torch.zeros(derived_scale.size()).to(torch.int32) 54 dtype=torch.int32, [all …]
|
| /external/pytorch/test/inductor/ |
| D | test_b2b_gemm.py | 5 import torch 6 from torch._inductor.runtime.benchmarking import benchmarker 7 from torch._inductor.test_case import run_tests, TestCase 8 from torch._inductor.utils import run_and_get_code 9 from torch.testing._internal.inductor_utils import HAS_CUDA 13 @torch._dynamo.config.patch(cache_size_limit=32) 14 @torch._inductor.config.patch(b2b_gemm_pass=True) 21 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 22 g = torch.nn.GELU() 23 return torch.mm(g(torch.mm(m1, m2)), m3) [all …]
|
| /external/executorch/backends/arm/test/ops/ |
| D | test_batch_norm.py | 13 import torch 25 torch.zeros(1, 32, 112, 112), 34 torch.zeros(1, 32, 112, 112), 43 torch.zeros(1, 32, 112, 112), 48 torch.rand(32), 49 torch.rand(32), 50 torch.rand(32), 51 torch.rand(32), 56 torch.zeros(1, 32, 112, 112), 61 torch.rand(32), [all …]
|
| /external/pytorch/test/ |
| D | test_type_promotion.py | 7 import torch 9 from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor, 13 from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDe… 15 from torch.testing._internal.common_dtype import ( 24 # load_tests from torch.testing._internal.common_utils is used to automatically filter tests for 29 # the default dtype being torch.float and again with the default dtype 30 # being torch.double. 34 with set_default_dtype(torch.float): 36 with set_default_dtype(torch.double): 51 int_tensor = torch.ones([4, 4, 4], dtype=torch.int32, device=device) [all …]
|
| /external/pytorch/torch/csrc/jit/runtime/ |
| D | serialized_shape_function_registry.cpp | 9 #include <torch/csrc/jit/jit_log.h> 10 #include <torch/csrc/jit/passes/inliner.h> 11 #include <torch/csrc/jit/runtime/operator.h> 12 #include <torch/csrc/jit/runtime/serialized_shape_function_registry.h> 16 namespace torch::jit { namespace 23 for _0 in range(torch.len(self)): 25 _1 = torch.append(out, elem) 30 if torch.eq(torch.len(out), 2): 34 if torch.eq(torch.len(self), 3): 37 _0 = torch.eq(torch.len(self), 4) [all …]
|
| /external/pytorch/test/dynamo/ |
| D | test_ctx_manager.py | 4 import torch 5 import torch._dynamo.test_case 6 import torch._dynamo.testing 7 import torch.onnx.operators 8 from torch._dynamo.testing import EagerAndRecordGraphs, normalize_gm, same 9 from torch.nn import functional as F 10 from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FLASH_ATTENTION 11 from torch.testing._internal.common_utils import TEST_WITH_ROCM 16 self.prev = torch.is_grad_enabled() 20 torch._C._set_grad_enabled(self.mode) [all …]
|
| D | test_trace_rules.py | 11 import torch 12 import torch._dynamo.config as config 13 import torch._dynamo.test_case 14 import torch._functorch.deprecated as deprecated_func 15 from torch._dynamo.trace_rules import ( 23 from torch._dynamo.utils import hashable, is_safe_constant, istype 24 from torch._dynamo.variables import TorchInGraphFunctionVariable, UserFunctionVariable 25 from torch.testing._internal.common_utils import skipIfWindows 36 "torch._nested_tensor_from_mask", 37 "torch._nested_from_padded", [all …]
|
| /external/pytorch/torch/ao/quantization/pt2e/representation/ |
| D | rewrite.py | 6 import torch 7 from torch._higher_order_ops.out_dtype import out_dtype 8 from torch.ao.quantization.fx._decomposed import quantized_decomposed_lib # noqa: F401 9 from torch.ao.quantization.pt2e.export_utils import _WrapperModule 10 from torch.ao.quantization.pt2e.utils import ( 16 from torch.fx import GraphModule 17 from torch.fx.subgraph_rewriter import replace_pattern 26 torch.randint(-128, 127, (2, 5), dtype=torch.int8), 27 torch.randn(1, dtype=torch.float), 28 torch.zeros(1, dtype=torch.int), [all …]
|
| /external/pytorch/test/jit/ |
| D | test_batch_mm.py | 3 import torch 4 from torch.testing import FileCheck 5 from torch.testing._internal.jit_utils import JitTestCase 20 torch.tensor([[1 + x, 2 + x, 3 + x], [4 + x, 5 + x, 6 + x]]) 22 else torch.tensor([[1 + x, 2 + x], [3 + x, 4 + x], [5 + x, 6 + x]]) 28 T1: torch.Tensor, 29 T2: torch.Tensor, 30 T3: torch.Tensor, 31 T4: torch.Tensor, 32 T5: torch.Tensor, [all …]
|
| /external/pytorch/ |
| D | build_variables.bzl | 21 "torch/csrc/autograd/generated/Functions.cpp", 22 "torch/csrc/autograd/generated/VariableType_0.cpp", 23 "torch/csrc/autograd/generated/VariableType_1.cpp", 24 "torch/csrc/autograd/generated/VariableType_2.cpp", 25 "torch/csrc/autograd/generated/VariableType_3.cpp", 26 "torch/csrc/autograd/generated/VariableType_4.cpp", 27 "torch/csrc/autograd/generated/ViewFuncs.cpp", 28 "torch/csrc/autograd/generated/TraceType_0.cpp", 29 "torch/csrc/autograd/generated/TraceType_1.cpp", 30 "torch/csrc/autograd/generated/TraceType_2.cpp", [all …]
|
| /external/pytorch/torch/_prims/ |
| D | context.py | 6 import torch 7 import torch._decomp 8 import torch._prims 9 import torch._refs 10 import torch._refs.nn 11 import torch._refs.nn.functional 12 import torch._refs.special 13 import torch.overrides 14 from torch._prims_common import torch_function_passthrough 20 Mapping of torch API functions to torch._refs functions. [all …]
|
| /external/pytorch/test/cpp_api_parity/ |
| D | parity-tracker.md | 3 ## torch::nn 6 torch::nn::Sequential|Yes|No 7 torch::nn::ModuleList|Yes|No 8 torch::nn::ModuleDict|No|No 9 torch::nn::ParameterList|No|No 10 torch::nn::ParameterDict|No|No 11 torch::nn::Conv1d|Yes|No 12 torch::nn::Conv2d|Yes|No 13 torch::nn::Conv3d|Yes|No 14 torch::nn::ConvTranspose1d|Yes|No [all …]
|
| /external/executorch/kernels/quantized/test/ |
| D | test_quant_dequant_per_token.py | 11 import torch 12 from torch.ao.quantization.fx._decomposed import quantized_decomposed_lib # noqa: F401 18 input_tensor = torch.tensor( 19 [[-0.5, 0.3, 1.2], [0.1, -0.8, 2.1], [-5, 1, 2]], dtype=torch.float32 21 scale = torch.tensor([0.5, 0.8, 1.0], dtype=torch.float64) 23 zero_point = torch.tensor([-1, -2, 0]) 25 quantized_tensor = torch.ops.quantized_decomposed.quantize_per_token( 26 input_tensor, scale, zero_point, -128, 127, torch.int8 28 expected_quantized_tensor = torch.ops.et_quant_test.quantize_per_token( 29 input_tensor, scale, zero_point, -128, 127, torch.int8 [all …]
|
| /external/pytorch/torch/ |
| D | overrides.py | 4 While most of the torch API and handling for ``__torch_function__`` happens 5 at the C++ level, some of the torch API is written in Python so we need 8 has_torch_function. See torch/functional.py and test/test_overrides.py 33 import torch 34 from torch._C import ( 64 module: str = "torch", 105 A tuple of functions that are publicly available in the torch API but cannot 111 >>> torch.Tensor.as_subclass in torch.overrides.get_ignored_functions() 113 >>> torch.add in torch.overrides.get_ignored_functions() 116 Tensor = torch.Tensor [all …]
|
| /external/pytorch/torch/testing/_internal/ |
| D | autocast_test_lists.py | 5 import torch 6 from torch.testing._internal.common_utils import TEST_WITH_ROCM 7 from torch.testing._internal.common_utils import TestCase 12 input = (torch.randn((n, n), device=dev, dtype=torch.float32),) 14 hx = ((torch.randn((n, n), device=dev, dtype=torch.float32), 15 torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else 16 torch.randn((n, n), device=dev, dtype=torch.float32),) 18 weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih 19 torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh 20 torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih [all …]
|
| /external/pytorch/torch/ao/nn/quantized/reference/modules/ |
| D | utils.py | 4 import torch 12 class ReferenceQuantizedModule(torch.nn.Module): 16 "qscheme": torch.per_tensor_affine, 17 "dtype": torch.quint8, 21 self.weight_qscheme: torch.qscheme = weight_qparams["qscheme"] 25 torch.per_tensor_affine, 26 torch.per_channel_affine, 27 torch.per_channel_affine_float_qparams, 30 torch.quint8, 31 torch.qint8, [all …]
|