| /external/executorch/runtime/executor/test/ |
| D | method_meta_test.cpp | 54 void check_tensor(const TensorInfo& tensor_info) { in check_tensor() function 107 check_tensor(in_1.get()); in TEST_F() 112 check_tensor(in_2.get()); in TEST_F() 117 check_tensor(out_1.get()); in TEST_F() 124 check_tensor(info_copy_ctor); in TEST_F() 125 check_tensor(info_copy_assign); in TEST_F() 129 check_tensor(info_move_ctor); in TEST_F()
|
| /external/pytorch/test/distributed/_shard/sharding_spec/ |
| D | test_sharding_spec.py | 22 check_tensor, 117 check_tensor(spec.shards, torch.rand(10, 5).size()) 144 check_tensor(spec.shards, torch.rand(6, 6).size()) 171 check_tensor(spec.shards, torch.rand(6, 6).size()) 229 check_tensor(spec.shards, torch.rand(10, 10, 10).size()) 247 check_tensor(spec.shards, torch.rand(10, 3).size()) 265 check_tensor(spec.shards, torch.rand(10, 10).size()) 566 check_tensor(meta.shards_metadata, torch.Size((8, 8)))
|
| /external/pytorch/torch/csrc/inductor/aoti_torch/ |
| D | utils.h | 53 at::Tensor& check_tensor) { in assert_inf_and_nan() argument 54 auto isnan_tensor = check_tensor.isnan(); in assert_inf_and_nan() 58 auto isinf_tensor = check_tensor.isinf(); in assert_inf_and_nan()
|
| D | shim_common.cpp | 954 at::Tensor* check_tensor = tensor_handle_to_tensor_pointer(tensor); in aoti_torch_check_inf_and_nan() 956 assert_inf_and_nan(tensor_name, *check_tensor); in aoti_torch_check_inf_and_nan()
|
| /external/pytorch/test/ao/sparsity/ |
| D | test_sparsity_utils.py | 112 check_tensor = fqn_to_module(model, tensor_fqn) 113 self.assertEqual(tensor, check_tensor)
|
| /external/pytorch/torch/distributed/_shard/sharding_spec/ |
| D | api.py | 14 check_tensor, 174 check_tensor(self.shards, tensor_sizes)
|
| D | _internals.py | 114 def check_tensor(shards_metadata, tensor_dims) -> None: function
|
| /external/pytorch/torch/distributed/_shard/sharded_tensor/ |
| D | utils.py | 9 check_tensor, 260 check_tensor(
|
| D | api.py | 20 check_tensor, 177 check_tensor(shards_metadata, list(sharded_tensor_metadata.size)) 1005 check_tensor(shards_metadata, list(sharded_tensor_metadata.size))
|
| /external/pytorch/test/ |
| D | test_bundled_inputs.py | 103 def check_tensor(sample): function 116 check_tensor(torch.randn(1 << 16)) 122 check_tensor(small_sparse)
|
| /external/pytorch/torch/csrc/distributed/c10d/ |
| D | ProcessGroupUCC.cpp | 193 void check_tensor(const std::vector<at::Tensor>& tensors) { in check_tensor() function 961 check_tensor({outputTensor}); in _allgather_base() 962 check_tensor({inputTensor}); in _allgather_base() 1000 check_tensor(tensors); in allreduce() 1230 check_tensor(tensors); in broadcast() 1346 check_tensor(tensors); in reduce() 1385 check_tensor(outputTensors); in reduce_scatter() 1397 check_tensor(flat_input); in reduce_scatter() 1531 check_tensor(tensors); in send() 1568 check_tensor(tensors); in recv()
|
| /external/pytorch/docs/source/ |
| D | torch.compiler_dynamo_overview.rst | 85 …check_tensor(L['a'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), tor… 86 …check_tensor(L['b'], Tensor, DispatchKeySet(CPU, BackendSelect, ADInplaceOrView, AutogradCPU), tor… 89 recompiled. The interesting guard there is ``check_tensor``, which
|
| D | torch.compiler_dynamo_deepdive.rst | 573 check_tensor(L['a'], torch.float32, device=None, requires_grad=False, size=[4, 3], stride=[3, 1]) 574 check_tensor(L['b'], torch.float32, device=None, requires_grad=False, size=[4, 3], stride=[3, 1]) 577 …check_tensor(L['a'], torch.float32, device=None, requires_grad=False, size=[None, 3], stride=[3, 1… 578 …check_tensor(L['b'], torch.float32, device=None, requires_grad=False, size=[None, 3], stride=[3, 1…
|
| /external/pytorch/torch/csrc/cuda/ |
| D | nccl.cpp | 292 static inline void check_tensor( in check_tensor() function 365 check_tensor( in check_inputs() 396 check_tensor( in check_inputs()
|