/external/pytorch/torch/csrc/autograd/ |
D | FunctionsManual.h | 34 TORCH_API Tensor toNonOptFwGrad(const std::optional<Tensor>& t); 35 TORCH_API Tensor toNonOptPrimal(const std::optional<Tensor>& t); 36 TORCH_API Tensor toNonOptTensor(const std::optional<Tensor>& t); 38 TORCH_API inline std::optional<Tensor> wrap_opt_if( in wrap_opt_if() 39 const Tensor& t, in wrap_opt_if() 41 using OptTensor = std::optional<Tensor>; in wrap_opt_if() 45 TORCH_API Tensor 46 apply_loss_reduction(const Tensor& unreduced, int64_t reduction); 51 const at::Tensor& t); 55 at::ArrayRef<at::Tensor> t); [all …]
|
/external/pytorch/torch/csrc/api/include/torch/ |
D | special.h | 17 inline Tensor gammaln(const Tensor& self) { in gammaln() 21 inline Tensor& gammaln_out(Tensor& result, const Tensor& self) { in gammaln_out() 34 inline Tensor gammainc(const Tensor& self, const Tensor& other) { in gammainc() 38 inline Tensor& gammainc_out( in gammainc_out() 39 Tensor& result, in gammainc_out() 40 const Tensor& self, in gammainc_out() 41 const Tensor& other) { in gammainc_out() 54 inline Tensor gammaincc(const Tensor& self, const Tensor& other) { in gammaincc() 58 inline Tensor& gammaincc_out( in gammaincc_out() 59 Tensor& result, in gammaincc_out() [all …]
|
D | linalg.h | 11 inline Tensor cholesky(const Tensor& self) { in cholesky() 15 inline Tensor cholesky_out(Tensor& result, const Tensor& self) { in cholesky_out() 19 inline Tensor det(const Tensor& self) { in det() 23 inline std::tuple<Tensor, Tensor> slogdet(const Tensor& input) { in slogdet() 27 inline std::tuple<Tensor&, Tensor&> slogdet_out( in slogdet_out() 28 Tensor& sign, in slogdet_out() 29 Tensor& logabsdet, in slogdet_out() 30 const Tensor& input) { in slogdet_out() 34 inline std::tuple<Tensor, Tensor> eig(const Tensor& self) { in eig() 38 inline std::tuple<Tensor&, Tensor&> eig_out( in eig_out() [all …]
|
/external/pytorch/docs/source/ |
D | tensors.rst | 5 torch.Tensor 8 A :class:`torch.Tensor` is a multi-dimensional matrix containing elements of 85 :class:`torch.Tensor` constructor is an alias for the default tensor type 105 :func:`torch.tensor` always copies :attr:`data`. If you have a Tensor 107 :meth:`~torch.Tensor.requires_grad_` or 108 :meth:`~torch.Tensor.detach` to avoid a copy. 142 Use :meth:`torch.Tensor.item` to get a Python number from a tensor containing a 181 :class:`torch.layout` attributes of a :class:`torch.Tensor`, see 192 :meth:`~torch.Tensor.to` method on the tensor. 195 Current implementation of :class:`torch.Tensor` introduces memory overhead, [all …]
|
/external/pytorch/aten/src/ATen/native/ |
D | BinaryOps.cpp | 151 TORCH_META_FUNC2(add, Tensor) ( in TORCH_META_FUNC2() argument 152 const Tensor& self, const Tensor& other, const Scalar& alpha in TORCH_META_FUNC2() 158 TORCH_META_FUNC2(sub, Tensor) ( in TORCH_META_FUNC2() argument 159 const Tensor& self, const Tensor& other, const Scalar& alpha in TORCH_META_FUNC2() 166 TORCH_META_FUNC2(mul, Tensor) ( in TORCH_META_FUNC2() argument 167 const Tensor& self, const Tensor& other in TORCH_META_FUNC2() 172 TORCH_META_FUNC2(div, Tensor) (const Tensor& self, const Tensor& other) { in TORCH_META_FUNC2() argument 176 TORCH_META_FUNC2(div, Tensor_mode) (const Tensor& self, const Tensor& other, std::optional<c10::str… in TORCH_META_FUNC2() 191 TORCH_META_FUNC(special_xlog1py) (const Tensor& self, const Tensor& other) { in TORCH_META_FUNC() 195 TORCH_META_FUNC(special_zeta) (const Tensor& self, const Tensor& other) { in TORCH_META_FUNC() [all …]
|
D | UnaryOps.cpp | 179 TORCH_META_FUNC(func) (const Tensor& self) { \ 233 TORCH_META_FUNC(polygamma)(int64_t n, const Tensor& self) { in CREATE_UNARY_FLOAT_META_FUNC() 240 TORCH_META_FUNC(func) (const Tensor& self) { \ 248 TORCH_META_FUNC2(round, decimals)(const Tensor& self, int64_t decimals){ in CREATE_UNARY_META_FUNC() 252 TORCH_META_FUNC(neg)(const Tensor& self) { in TORCH_META_FUNC() 259 TORCH_META_FUNC(trunc) (const Tensor& self) { in TORCH_META_FUNC() 266 TORCH_META_FUNC(floor) (const Tensor& self) { in TORCH_META_FUNC() 273 TORCH_META_FUNC(sign) (const Tensor& self) { in TORCH_META_FUNC() 279 TORCH_META_FUNC(signbit) (const Tensor& self) { in TORCH_META_FUNC() 286 TORCH_META_FUNC(ceil) (const Tensor& self) { in TORCH_META_FUNC() [all …]
|
D | Activation.cpp | 86 TORCH_META_FUNC(threshold)(const Tensor& self, const Scalar& threshold, const Scalar& value) { in TORCH_META_FUNC() 87 const Tensor& result = maybe_get_output(); in TORCH_META_FUNC() 100 TORCH_META_FUNC(threshold_backward)(const Tensor& grad, const Tensor& self, const Scalar& threshold… in TORCH_META_FUNC() 101 const Tensor& gradInput = maybe_get_output(); in TORCH_META_FUNC() 114 const Tensor& self, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale in TORCH_META_FUNC() 120 const Tensor& grad_output, in TORCH_META_FUNC() 125 const Tensor& self_or_result in TORCH_META_FUNC() 136 TORCH_META_FUNC(silu) (const Tensor& self) { in TORCH_META_FUNC() 141 const Tensor& grad_output, const Tensor& input in TORCH_META_FUNC() 146 TORCH_META_FUNC(mish) (const Tensor& self) { in TORCH_META_FUNC() [all …]
|
D | Normalization.cpp | 69 TORCH_META_FUNC(renorm)(const Tensor& self, const Scalar& p, int64_t dim, const Scalar& maxnorm) { in TORCH_META_FUNC() 95 static inline Tensor repeat_if_defined(const Tensor& t, const SymInt& repeat) { in repeat_if_defined() 121 static inline bool is_contiguous(const Tensor& t) { in is_contiguous() 128 static inline MemoryFormat suggest_memory_format_contig(const Tensor& t) { in suggest_memory_format_contig() 135 std::tuple<Tensor,Tensor,Tensor> batch_norm_cpu_transform_input_template( in batch_norm_cpu_transform_input_template() 136 const Tensor& input, const Tensor& weight, const Tensor& bias, in batch_norm_cpu_transform_input_template() 137 const Tensor& save_mean /* optional */, const Tensor& save_invstd /* optional */, in batch_norm_cpu_transform_input_template() 138 const Tensor& running_mean /* optional */, const Tensor& running_var /* optional */, in batch_norm_cpu_transform_input_template() 139 bool train, double eps, Tensor& output) { in batch_norm_cpu_transform_input_template() 161 auto as_nd = [&](const Tensor& t) { in batch_norm_cpu_transform_input_template() [all …]
|
/external/executorch/exir/dialects/edge/op/ |
D | sample_input.py | 18 InArg(ArgType.Tensor), 23 Return(ArgType.Tensor), 28 InArg(ArgType.Tensor, size=[2, 3, 4, 5]), 31 InArg(ArgType.Tensor, size=[3]), 32 InArg(ArgType.Tensor, size=[3]), 37 Return(ArgType.Tensor, argname="__ret0", size=[2, 3, 4, 5]), 38 Return(ArgType.Tensor, argname="__ret1", size=[0]), 39 Return(ArgType.Tensor, argname="__ret2", size=[0]), 44 InArg(ArgType.Tensor), 49 Return(ArgType.Tensor), [all …]
|
/external/pytorch/aten/src/ATen/ |
D | ExpandUtils.h | 77 std::initializer_list<std::reference_wrapper<const Tensor>> tensors, in check_defined() 99 inline c10::MaybeOwned<Tensor> expand_inplace( in expand_inplace() 100 const Tensor& tensor, in expand_inplace() 101 const Tensor& to_expand) { in expand_inplace() 103 return c10::MaybeOwned<Tensor>::borrowed(to_expand); in expand_inplace() 105 return c10::MaybeOwned<Tensor>::owned( in expand_inplace() 109 inline c10::MaybeOwned<Tensor> expand_inplace( 110 const Tensor& tensor, 111 Tensor&& to_expand) = delete; 113 inline c10::MaybeOwned<Tensor> expand_inplace( in expand_inplace() [all …]
|
D | FunctionalInverses.cpp | 15 static Tensor permute_inverse(const Tensor& self, IntArrayRef dims, InverseReturnMode inverse_retur… in permute_inverse() 29 static Tensor unsqueeze_copy_to(const Tensor & self, c10::SymIntArrayRef sizes, InverseReturnMode i… in unsqueeze_copy_to() 48 static Tensor unsqueeze_copy_to(const Tensor & self, IntArrayRef dim, c10::SymIntArrayRef sizes, In… in unsqueeze_copy_to() 51 Tensor result = self; in unsqueeze_copy_to() 106 Tensor FunctionalInverses::_fw_primal_inverse(const at::Tensor& base, const at::Tensor& mutated_vie… in _fw_primal_inverse() 108 return Tensor(); in _fw_primal_inverse() 111 Tensor FunctionalInverses::_make_dual_inverse(const at::Tensor& base, const at::Tensor& mutated_vie… in _make_dual_inverse() 113 return Tensor(); in _make_dual_inverse() 116 Tensor FunctionalInverses::view_as_real_inverse(const Tensor& base, const Tensor& mutated_view, Inv… in view_as_real_inverse() 124 Tensor FunctionalInverses::view_as_complex_inverse(const Tensor& base, const Tensor& mutated_view, … in view_as_complex_inverse() [all …]
|
/external/tensorflow/tensorflow/java/src/main/java/org/tensorflow/ |
D | Tensors.java | 29 public static Tensor<String> create(String data) { in create() 30 return Tensor.create(data.getBytes(UTF_8), String.class); in create() 39 public static Tensor<String> create(String data, java.nio.charset.Charset charset) { in create() 40 return Tensor.create(data.getBytes(charset), String.class); in create() 48 public static Tensor<Float> create(float data) { in create() 49 return Tensor.create(data, Float.class); in create() 58 public static Tensor<Float> create(float[] data) { in create() 59 return Tensor.create(data, Float.class); in create() 68 public static Tensor<Float> create(float[][] data) { in create() 69 return Tensor.create(data, Float.class); in create() [all …]
|
/external/pytorch/torch/nn/ |
D | functional.pyi.in | 17 from torch import Tensor 53 input: Tensor, 58 _random_samples: Optional[Tensor] = ..., 59 ) -> Tuple[Tensor, Tensor]: ... 61 input: Tensor, 66 _random_samples: Optional[Tensor] = ..., 67 ) -> Tuple[Tensor, Tensor]: ... 69 input: Tensor, 76 ) -> Tuple[Tensor, Tensor]: ... 78 input: Tensor, [all …]
|
/external/pytorch/aten/src/ATen/native/transformers/cuda/flash_attn/ |
D | flash_api.h | 10 std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::… 11 mha_fwd(const at::Tensor &q, // batch_size x seqlen_q x num_heads x head_size 12 const at::Tensor &k, // batch_size x seqlen_k x num_heads_k x head_size 13 const at::Tensor &v, // batch_size x seqlen_k x num_heads_k x head_size 14 … std::optional<at::Tensor> &out_, // batch_size x seqlen_q x num_heads x head_size 15 std::optional<at::Tensor> &alibi_slopes_, // num_heads or batch_size x num_heads 24 std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::… 25 mha_varlen_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} … 26 … const at::Tensor &k, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i 27 … const at::Tensor &v, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i [all …]
|
/external/pytorch/aten/src/ATen/native/mkldnn/ |
D | Normalization.cpp | 24 std::tuple<Tensor, Tensor, Tensor> mkldnn_batch_norm( in mkldnn_batch_norm() 25 …Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, cons… in mkldnn_batch_norm() 32 std::tuple<Tensor, Tensor, Tensor> mkldnn_batch_norm_backward( in mkldnn_batch_norm_backward() 33 const Tensor& grad_output, in mkldnn_batch_norm_backward() 34 …Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& running_mean_… in mkldnn_batch_norm_backward() 41 std::tuple<Tensor, Tensor, Tensor> mkldnn_layer_norm_last_index_weight_bias_f32( in mkldnn_layer_norm_last_index_weight_bias_f32() 42 const Tensor& input, in mkldnn_layer_norm_last_index_weight_bias_f32() 43 IntArrayRef normalized_shape, const Tensor& weight, const Tensor& bias, in mkldnn_layer_norm_last_index_weight_bias_f32() 48 std::tuple<Tensor, Tensor, Tensor> _mkldnn_batch_norm_legit( in _mkldnn_batch_norm_legit() 49 …const Tensor& input, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_op… in _mkldnn_batch_norm_legit() [all …]
|
/external/pytorch/aten/src/ATen/native/cuda/linalg/ |
D | BatchLinearAlgebraLib.h | 42 void geqrf_batched_cublas(const Tensor& input, const Tensor& tau); 43 void triangular_solve_cublas(const Tensor& A, const Tensor& B, bool left, bool upper, TransposeType… 44 void triangular_solve_batched_cublas(const Tensor& A, const Tensor& B, bool left, bool upper, Trans… 45 void gels_batched_cublas(const Tensor& a, Tensor& b, Tensor& infos); 47 const Tensor& LD, 48 const Tensor& pivots, 49 const Tensor& info, 53 const Tensor& LD, 54 const Tensor& pivots, 55 const Tensor& B, [all …]
|
/external/pytorch/aten/src/ATen/native/quantized/ |
D | AffineQuantizer.h | 11 Tensor& quantize_tensor_per_tensor_affine( 12 const Tensor& rtensor, 13 Tensor& qtensor, 16 Tensor& quantize_tensor_per_channel_affine( 17 const Tensor& rtensor, 18 Tensor& qtensor, 19 const Tensor& scales, 20 Tensor zero_points, 23 Tensor& quantize_tensor_per_channel_float_qparams( 24 const Tensor& rtensor, [all …]
|
/external/pytorch/functorch/dim/ |
D | op_properties.py | 62 *(getattr(torch.Tensor, m) for m in pointwise_methods), 65 torch.Tensor.abs, 67 torch.Tensor.acos, 69 torch.Tensor.acosh, 71 torch.Tensor.add, 73 torch.Tensor.addcdiv, 75 torch.Tensor.addcmul, 77 torch.Tensor.addr, 79 torch.Tensor.angle, 81 torch.Tensor.asin, [all …]
|
/external/pytorch/torch/csrc/api/include/torch/nn/functional/ |
D | loss.h | 13 inline Tensor l1_loss( in l1_loss() 14 const Tensor& input, in l1_loss() 15 const Tensor& target, in l1_loss() 34 inline Tensor l1_loss( 35 const Tensor& input, 36 const Tensor& target, 45 inline Tensor kl_div( 46 const Tensor& input, 47 const Tensor& target, 92 inline Tensor kl_div( [all …]
|
/external/executorch/kernels/test/ |
D | op_native_batch_norm_test.cpp | 22 ::std::tuple<exec_aten::Tensor&, exec_aten::Tensor&, exec_aten::Tensor&> 24 const exec_aten::Tensor& input, in op_native_batch_norm_legit_no_training_out() 25 const exec_aten::optional<exec_aten::Tensor>& weight, in op_native_batch_norm_legit_no_training_out() 26 const exec_aten::optional<exec_aten::Tensor>& bias, in op_native_batch_norm_legit_no_training_out() 27 const exec_aten::Tensor& running_mean, in op_native_batch_norm_legit_no_training_out() 28 const exec_aten::Tensor& running_var, in op_native_batch_norm_legit_no_training_out() 31 exec_aten::Tensor& out0, in op_native_batch_norm_legit_no_training_out() 32 exec_aten::Tensor& out1, in op_native_batch_norm_legit_no_training_out() 33 exec_aten::Tensor& out2) { in op_native_batch_norm_legit_no_training_out() 51 ::std::tuple<exec_aten::Tensor&, exec_aten::Tensor&, exec_aten::Tensor&> [all …]
|
D | op_index_test.cpp | 24 using exec_aten::Tensor; 27 using OptTensorArrayRef = ArrayRef<optional<Tensor>>; 31 Tensor& op_index_tensor_out( in op_index_tensor_out() 32 const Tensor& input, in op_index_tensor_out() 34 Tensor& out) { in op_index_tensor_out() 36 c10::List<std::optional<at::Tensor>> indices_list(indices); in op_index_tensor_out() 55 Tensor x = tf.make( in test_dtype() 75 optional<Tensor> indices[] = { in test_dtype() 76 optional<Tensor>(tfl.make({2}, {0, 1})), in test_dtype() 77 optional<Tensor>(tfl.make({2}, {1, 0})), in test_dtype() [all …]
|
D | op_index_put_test.cpp | 23 using exec_aten::Tensor; 26 using OptTensorArrayRef = ArrayRef<optional<Tensor>>; 30 Tensor& op_index_put_out( in op_index_put_out() 31 const Tensor& input, in op_index_put_out() 33 const Tensor& values, in op_index_put_out() 35 Tensor& out) { in op_index_put_out() 37 c10::List<std::optional<at::Tensor>> indices_list(indices); in op_index_put_out() 55 Tensor x = tf.make( in test_dtype() 75 optional<Tensor> indices[] = { in test_dtype() 76 optional<Tensor>(tfl.make({1, 3}, {0, 1, 2})), in test_dtype() [all …]
|
/external/pytorch/torch/csrc/api/include/torch/nn/modules/ |
D | rnn.h | 54 std::vector<Tensor> all_weights() const; 66 void check_input(const Tensor& input, const Tensor& batch_sizes) const; 69 const Tensor& input, 70 const Tensor& batch_sizes) const; 73 const Tensor& hx, 77 void check_forward_args(Tensor input, Tensor hidden, Tensor batch_sizes) 80 Tensor permute_hidden(Tensor hx, const Tensor& permutation) const; 87 std::vector<Tensor> flat_weights_; 111 std::tuple<Tensor, Tensor> forward(const Tensor& input, Tensor hx = {}); 114 FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())}) [all …]
|
/external/pytorch/aten/src/ATen/native/transformers/ |
D | attention.h | 11 using fused_sdp_choice_fn = int64_t (*)(const Tensor& query_, const Tensor& key, const Tensor& valu… 12 …const std::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, std::optional<double> s… 16 TORCH_API Tensor bmm_nt(const Tensor& a, const Tensor& b); 17 TORCH_API Tensor masked_softmax( 18 Tensor& attn_scores, 19 std::optional<Tensor> attn_mask, 20 const Tensor& query, 35 TORCH_API Tensor transform0213_gemm_nt_bias( 36 const Tensor& a, 37 const Tensor& b, [all …]
|
/external/pytorch/aten/src/ATen/native/cuda/ |
D | Normalization.cu | 46 ScalarType first_type(const Tensor& arg, const Args&... parameters) { in first_type() 52 bool is_mixed_type(const Tensor& input, const Args&... parameters) { in is_mixed_type() 58 inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) { in batch_norm_use_channels_last_kernels() 72 inline Impl batch_norm_choose_impl(const Tensor& self) { in batch_norm_choose_impl() 88 inline Impl batch_norm_choose_impl(const Tensor& in1, const Tensor& in2) { in batch_norm_choose_impl() 98 const Tensor& out, const Tensor& self, const std::optional<Tensor>& weight_opt, in batch_norm_elementwise() 99 const std::optional<Tensor>& bias_opt, const Tensor& mean_, const Tensor& invstd_) { in batch_norm_elementwise() 102 c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt); in batch_norm_elementwise() 103 c10::MaybeOwned<Tensor> bias = at::borrow_from_optional_tensor(bias_opt); in batch_norm_elementwise() 142 auto as_nd = [&](const Tensor& t) { in batch_norm_elementwise() [all …]
|