Searched full:expand_as (Results 1 – 25 of 102) sorted by relevance
12345
| /external/pytorch/aten/src/ATen/native/vulkan/ops/ |
| D | Expand.cpp | 70 Tensor expand_as(const at::Tensor& self, const at::Tensor& other) { in expand_as() function 78 m.impl(TORCH_SELECTIVE_NAME("aten::expand_as"), TORCH_FN(expand_as)); in TORCH_LIBRARY_IMPL()
|
| /external/pytorch/aten/src/ATen/native/mps/operations/ |
| D | Copy.mm | 103 src = src_.expand_as(dst).contiguous(); 197 Tensor src = (src_.dtype() != dst_.dtype() ? src_.to(dst_.dtype()) : src_).expand_as(dst_); 254 src = src_.expand_as(dst_).contiguous(); 340 return copy_from_mps_(dst, needs_broadcasting ? src.expand_as(dst) : src, non_blocking); 343 return copy_to_mps_(dst, needs_broadcasting ? src.expand_as(dst) : src, non_blocking); 347 return copy_kernel_mps(dst, needs_broadcasting ? src.expand_as(dst) : src, non_blocking);
|
| /external/pytorch/torch/csrc/api/include/torch/nn/functional/ |
| D | normalization.h | 21 auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input); in normalize() 24 auto denom = input.norm(p, dim, true).clamp_min(eps).expand_as(input); in normalize()
|
| /external/pytorch/torch/onnx/ |
| D | symbolic_opset7.py | 26 "expand_as",
|
| /external/pytorch/aten/src/ATen/native/ |
| D | SobolEngineOpsUtils.h | 39 inter = at::pow(2, inter).expand_as(bmat); in cdot_pow2()
|
| D | MathBitFallThroughLists.h | 11 m.impl("expand_as", torch::CppFunction::makeFallthrough()); \
|
| D | SparseTensorUtils.cpp | 123 at::zeros({1}, t._values().options()).expand_as(t._values()), in zeros_like_with_indices()
|
| /external/pytorch/torch/distributions/ |
| D | dirichlet.py | 15 total = concentration.sum(-1, True).expand_as(concentration)
|
| /external/pytorch/test/quantization/core/experimental/ |
| D | quantization_util.py | 63 correct = pred.eq(target.view(1, -1).expand_as(pred))
|
| /external/pytorch/aten/src/ATen/native/cuda/ |
| D | Copy.cu | 311 src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous(); in copy_kernel_cuda() 315 src_contig = iter.tensor(1).expand_as(dst).contiguous(); in copy_kernel_cuda()
|
| /external/pytorch/aten/src/ATen/native/sparse/ |
| D | ValidateCompressedIndicesCommon.h | 269 .add_owned_output(dummy.expand_as(idx)) in _validate_compressed_sparse_indices_kernel() 303 .add_owned_output(dummy.expand_as(cidx_curr)) in _validate_compressed_sparse_indices_kernel()
|
| /external/pytorch/torch/masked/maskedtensor/ |
| D | binary.py | 156 result_mask = result_mask.expand_as(result_data)
|
| /external/pytorch/functorch/op_analysis/ |
| D | public_api | 178 expand_as
|
| D | annotated_ops | 117 expand_as, view/reshape
|
| /external/pytorch/docs/source/ |
| D | tensor_view.rst | 56 - :meth:`~torch.Tensor.expand_as`
|
| D | masked.rst | 296 Tensor.expand_as
|
| /external/pytorch/aten/src/ATen/functorch/ |
| D | BatchRulesLoss.cpp | 137 reduction == Reduction::None ? grad : grad.expand_as(input), cur_level); in binary_cross_entropy_backward_plumbing()
|
| D | BatchRulesReduceOps.cpp | 255 a_has_bdim ? a : a.expand_as(flagpole), in expand_bdims() 256 b_has_bdim ? b : b.expand_as(flagpole)); in expand_bdims()
|
| D | BatchRulesDecompositions.cpp | 107 OP_DECOMPOSE(expand_as); in TORCH_LIBRARY_IMPL()
|
| /external/pytorch/torch/csrc/jit/passes/ |
| D | device_type_analysis.cpp | 234 {"aten::expand_as(Tensor self, Tensor other) -> Tensor", in buildRuleRegistry()
|
| /external/pytorch/test/jit/ |
| D | test_device_analysis.py | 143 return x.expand_as(y)
|
| /external/pytorch/test/mobile/model_test/ |
| D | tensor_ops.py | 258 x.expand_as(y),
|
| /external/pytorch/torch/_decomp/ |
| D | __init__.py | 304 aten.expand_as,
|
| /external/pytorch/torch/_inductor/fx_passes/ |
| D | fuse_attention.py | 376 attn_mask = (attn_mask == 0).view((bs, 1, 1, k_len)).expand_as(scores) 441 attn_mask = (attn_mask == 0).view((bs, 1, 1, k_len)).expand_as(scores)
|
| /external/pytorch/torch/csrc/autograd/ |
| D | TraceTypeManual.cpp | 24 jit::Node* node = graph->create(jit::aten::expand_as, /*num_outputs=*/1); in copy_()
|
12345