Searched refs:logical_tensor (Results 1 – 11 of 11) sorted by relevance
/external/pytorch/aten/src/ATen/functorch/ |
D | LegacyVmapTransforms.cpp | 37 VmapPhysicalView MultiBatchVmapTransform::logicalToPhysical(const Tensor& logical_tensor) { in logicalToPhysical() argument 38 auto* batched = maybeGetBatchedImpl(logical_tensor); in logicalToPhysical() 125 for (const auto& logical_tensor : logical_tensors) { in logicalToPhysical() local 126 auto* batched = maybeGetBatchedImpl(logical_tensor); in logicalToPhysical() 141 for (const auto& logical_tensor : logical_tensors) { in logicalToPhysical() local 142 auto* batched = maybeGetBatchedImpl(logical_tensor); in logicalToPhysical() 146 auto value = moveDimToFrontAndExpand(logical_tensor, {}, bdim_size); in logicalToPhysical() 177 for (const auto& logical_tensor : logical_tensors) { in logicalToPhysical() local 178 auto* batched = maybeGetBatchedImpl(logical_tensor); in logicalToPhysical() 191 for (const auto& logical_tensor : logical_tensors) { in logicalToPhysical() local [all …]
|
D | LegacyVmapTransforms.h | 66 static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor);
|
/external/pytorch/aten/src/ATen/ |
D | LegacyVmapTransforms.cpp | 43 VmapPhysicalView MultiBatchVmapTransform::logicalToPhysical(const Tensor& logical_tensor) { in logicalToPhysical() argument 44 auto* batched = maybeGetBatchedImpl(logical_tensor); in logicalToPhysical() 193 for (const auto& logical_tensor : logical_tensors) { in logicalToPhysical() local 194 auto* batched = maybeGetBatchedImpl(logical_tensor); in logicalToPhysical() 206 for (const auto& logical_tensor : logical_tensors) { in logicalToPhysical() local 207 auto requested_example_dim = /*logical_dim*/logical_tensor.dim(); in logicalToPhysical() 209 logical_tensor, collective_levels, requested_example_dim); in logicalToPhysical()
|
D | LegacyVmapTransforms.h | 59 static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor);
|
D | LegacyBatchingRegistrations.cpp | 84 bool isPhysicalScalarTensor(const Tensor& logical_tensor) { in isPhysicalScalarTensor() argument 85 if (logical_tensor.dim() > 0) { in isPhysicalScalarTensor() 88 auto* batched = maybeGetBatchedImpl(logical_tensor); in isPhysicalScalarTensor()
|
/external/pytorch/torch/csrc/jit/codegen/onednn/ |
D | kernel.h | 63 static dnnl::graph::logical_tensor toLogicalTensor(const ArgSpec& s) { in toLogicalTensor() 64 return s.logical_tensor(); in toLogicalTensor() 86 std::vector<dnnl::graph::logical_tensor> constantLogicalTensors_;
|
D | kernel.cpp | 13 using data_type = dnnl::graph::logical_tensor::data_type; 107 constantLogicalTensors_.emplace_back(constantInputSpec.logical_tensor()); in initializeInputSpecs() 134 {spec.logical_tensor(), Engine::getEngine(), input.data_ptr()}); in prepareRunArgs() 182 {spec.logical_tensor(), in prepareRunArgs() 205 {spec.logical_tensor(), Engine::getEngine(), tensor.data_ptr()}); in prepareRunArgs()
|
D | LlgaTensorImpl.h | 39 using desc = dnnl::graph::logical_tensor; 158 return logical_tensor().get_mem_size(); in storage_size() 161 desc logical_tensor() const { in logical_tensor() function
|
D | LlgaTensorImpl.cpp | 95 get_llga_desc(tensor).logical_tensor(), in llga_from_aten_tensor() 100 using data_type = dnnl::graph::logical_tensor::data_type;
|
D | operator.h | 140 dnnl::graph::logical_tensor createLogicalTensor(Value* value) const { in createLogicalTensor() 141 return LlgaTensorDesc(value).logical_tensor(); in createLogicalTensor()
|
/external/pytorch/test/distributed/_tensor/ |
D | test_dtensor.py | 689 logical_tensor = torch.randn(tensor_shape) 691 dtensor = distribute_tensor(logical_tensor, device_mesh, placements)
|