• Home
  • Raw
  • Download

Lines Matching full:dim

193 TORCH_META_FUNC2(all, dim)(const Tensor& self, int64_t dim, bool keepdim) {  in TORCH_META_FUNC2()  argument
194 allany_meta(*this, "all", self, dim, keepdim); in TORCH_META_FUNC2()
197 TORCH_META_FUNC2(all, dims)(const Tensor& self, OptionalIntArrayRef dim, bool keepdim) { in TORCH_META_FUNC2()
198 allany_meta(*this, "all", self, dim, keepdim); in TORCH_META_FUNC2()
205 TORCH_META_FUNC2(any, dim)(const Tensor& self, int64_t dim, bool keepdim) { in TORCH_META_FUNC2() argument
206 allany_meta(*this, "any", self, dim, keepdim); in TORCH_META_FUNC2()
209 TORCH_META_FUNC2(any, dims)(const Tensor& self, OptionalIntArrayRef dim, bool keepdim) { in TORCH_META_FUNC2()
210 allany_meta(*this, "any", self, dim, keepdim); in TORCH_META_FUNC2()
220 const std::optional<int64_t>& dim) { in check_argmax_argmin() argument
221 if (dim.has_value()) { in check_argmax_argmin()
222 auto dim_ = maybe_wrap_dim(dim.value(), self.dim()); in check_argmax_argmin()
227 name, ": Expected reduction dim to be specified for input.numel() == 0."); in check_argmax_argmin()
232 (const Tensor& self, std::optional<int64_t> dim, bool keepdim) { in TORCH_META_FUNC()
233 check_argmax_argmin("argmax()", self, dim); in TORCH_META_FUNC()
234 resize_reduction(*this, self, optional_to_arrayref(dim), keepdim, kLong); in TORCH_META_FUNC()
238 (const Tensor& self, std::optional<int64_t> dim, bool keepdim) { in TORCH_META_FUNC()
239 check_argmax_argmin("argmin()", self, dim); in TORCH_META_FUNC()
240 resize_reduction(*this, self, optional_to_arrayref(dim), keepdim, kLong); in TORCH_META_FUNC()
247 int64_t dim, in meta_func_cum_ops() argument
249 // Checking whether 'dim' is valid. in meta_func_cum_ops()
250 maybe_wrap_dim(dim, self.dim()); in meta_func_cum_ops()
267 (const Tensor& self, int64_t dim, std::optional<ScalarType> dtype) { in TORCH_META_FUNC()
268 meta_func_cum_ops(*this, "cumsum", self, dim, dtype); in TORCH_META_FUNC()
272 (const Tensor& self, int64_t dim, std::optional<ScalarType> dtype) { in TORCH_META_FUNC()
273 meta_func_cum_ops(*this, "cumprod", self, dim, dtype); in TORCH_META_FUNC()
284 int64_t dim, in TORCH_META_FUNC2()
288 resize_reduction(*this, self, dim, keepdim, out_dtype); in TORCH_META_FUNC2()
291 TORCH_META_FUNC2(mean, dim) in TORCH_META_FUNC2() argument
327 (const Tensor& self, const OptionalScalarRef p, IntArrayRef dim, bool keepdim) { in TORCH_META_FUNC2()
334 resize_reduction(*this, self, dim, keepdim, out_dtype); in TORCH_META_FUNC2()
340 IntArrayRef dim, in TORCH_META_FUNC2()
349 resize_reduction(*this, self, dim, keepdim, out_dtype); in TORCH_META_FUNC2()
356 auto dim = maybe_wrap_dim(dim_opt.value(), self.ndimension()); in TORCH_META_FUNC() local
357 native::zero_numel_check_dims(self, dim, "aminmax"); in TORCH_META_FUNC()
358 shape = get_reduction_shape(self, dim, keepdim); in TORCH_META_FUNC()
374 (const Tensor& self, IntArrayRef dim, bool keepdim) { in TORCH_META_FUNC()
381 at::native::zero_numel_check_dims(self, dim, "amax()"); in TORCH_META_FUNC()
384 resize_reduction(*this, self, dim, keepdim, out_dtype); in TORCH_META_FUNC()
388 (const Tensor& self, IntArrayRef dim, bool keepdim) { in TORCH_META_FUNC()
395 at::native::zero_numel_check_dims(self, dim, "amin()"); in TORCH_META_FUNC()
398 resize_reduction(*this, self, dim, keepdim, out_dtype); in TORCH_META_FUNC()
445 Tensor _logcumsumexp_cpu(const Tensor& self, int64_t dim) { in _logcumsumexp_cpu() argument
447 return _logcumsumexp_out_cpu(self, dim, result); in _logcumsumexp_cpu()
450 Tensor& _logcumsumexp_out_cpu(const Tensor& self, int64_t dim, Tensor& result) { in _logcumsumexp_out_cpu() argument
451 logcumsumexp_stub(self.device().type(), result, self, dim); in _logcumsumexp_out_cpu()
455 Tensor logcumsumexp(const Tensor& self, int64_t dim) { in logcumsumexp() argument
458 return at::_logcumsumexp(self, dim); in logcumsumexp()
464 Tensor& logcumsumexp_out(const Tensor& self, int64_t dim, Tensor& result) { in logcumsumexp_out() argument
468 at::_logcumsumexp_out(result, self.toType(result.scalar_type()), dim); in logcumsumexp_out()
477 int64_t dim, in impl_func_cum_ops() argument
481 if (self.dim() == 0) { in impl_func_cum_ops()
486 dim = maybe_wrap_dim(dim, self.dim()); in impl_func_cum_ops()
487 stub(self.device().type(), result, self.to(result.scalar_type()), dim); in impl_func_cum_ops()
493 int64_t dim, in TORCH_IMPL_FUNC()
496 impl_func_cum_ops(self, dim, result, cumsum_stub); in TORCH_IMPL_FUNC()
501 int64_t dim, in TORCH_IMPL_FUNC()
504 impl_func_cum_ops(self, dim, result, cumprod_stub); in TORCH_IMPL_FUNC()
507 static Tensor reversed_cumsum(const Tensor& w, int64_t dim) { in reversed_cumsum() argument
508 return w.flip(dim).cumsum(dim).flip(dim); in reversed_cumsum()
511 Tensor cumprod_backward(const Tensor& grad, const Tensor& input, int64_t dim, const Tensor& output)… in cumprod_backward() argument
602 dim = at::maybe_wrap_dim(dim, input.dim()); in cumprod_backward()
603 const int64_t dim_size = input.sym_sizes()[dim].guard_int(__FILE__, __LINE__); in cumprod_backward()
621 return reversed_cumsum(w, dim).div(input_conj); in cumprod_backward()
642 // indices = (cumsum == 1).max(dim, keepdim=True).indices in cumprod_backward()
644 // zeros_like(indices).scatter_(dim, indices, 1.) & cumsum == 1 in cumprod_backward()
648 const auto cumsum = is_zero.cumsum(dim); in cumprod_backward()
655 reversed_cumsum(w.masked_fill(~mask, 0.), dim).div_(input_conj).masked_select(mask)); in cumprod_backward()
666 const auto first_zero_index = std::get<1>(mask.max(dim, /*keepdim*/ true)); in cumprod_backward()
668 .scatter_(dim, first_zero_index, /*src*/ 1) in cumprod_backward()
678 input_conj.masked_fill(~mask, 1.).cumprod(dim) in cumprod_backward()
680 .sum(dim, /*keepdim*/true) in cumprod_backward()
681 … .mul_(at::gather(output_conj, dim, (first_zero_index - 1).relu_()) in cumprod_backward()
717 ones_size[dim] = 1; in cumprod_backward()
723 prods_from_k_plus_1 = at::cumprod(input_conj.slice(dim, k + 1), dim); in cumprod_backward()
724 omitted_products = at::cat({ones, std::move(prods_from_k_plus_1)}, dim); in cumprod_backward()
726 const Tensor prods_until_k = at::prod(input_conj.slice(dim, 0, k), dim, true); in cumprod_backward()
729 const Tensor prods_until_k = at::prod(input_conj.slice(dim, 0, k), dim, true); in cumprod_backward()
730 prods_from_k_plus_1 = at::cumprod(input_conj.slice(dim, k+1), dim); in cumprod_backward()
732 omitted_products = at::cat({prods_until_k, omitted_products}, dim); in cumprod_backward()
736 // as input, except on the dimension dim where it's in cumprod_backward()
738 TORCH_CHECK(omitted_products.sym_size(dim) == dim_size - k); in cumprod_backward()
740 auto grad_slice = at::sum(grad.slice(dim, k) * omitted_products, dim); in cumprod_backward()
744 grad_input.select(dim, k).copy_(grad_slice); in cumprod_backward()
748 return are_inputs_tensors_sublcass ? at::stack(grad_inputs, dim) : std::move(grad_input); in cumprod_backward()
788 void cummax_helper_cpu(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) { in cummax_helper_cpu() argument
792 …at::native::tensor_dim_apply3<scalar_t, int64_t>(self, values, indices, dim, cummax_cummin_helper<… in cummax_helper_cpu()
796 std::tuple<Tensor&, Tensor&> cummax_out(const Tensor& self, int64_t dim, Tensor& values, Tensor& in… in cummax_out() argument
803 if(self.dim() == 0) { in cummax_out()
807 dim = maybe_wrap_dim(dim, self.dim()); in cummax_out()
808 at::_cummax_helper(self, values, indices, dim); in cummax_out()
816 std::tuple<Tensor, Tensor> cummax(const Tensor& self, int64_t dim) { in cummax() argument
819 at::cummax_out(values, indices, self, dim); in cummax()
823 void cummin_helper_cpu(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) { in cummin_helper_cpu() argument
827 …at::native::tensor_dim_apply3<scalar_t, int64_t>(self, values, indices, dim, cummax_cummin_helper<… in cummin_helper_cpu()
831 std::tuple<Tensor&, Tensor&> cummin_out(const Tensor& self, int64_t dim, Tensor& values, Tensor& in… in cummin_out() argument
838 if(self.dim() == 0) { in cummin_out()
842 dim = maybe_wrap_dim(dim, self.dim()); in cummin_out()
843 at::_cummin_helper(self, values, indices, dim); in cummin_out()
851 std::tuple<Tensor, Tensor> cummin(const Tensor& self, int64_t dim) { in cummin() argument
854 at::cummin_out(values, indices, self, dim); in cummin()
858 …r cummaxmin_backward(const Tensor& grad, const Tensor& input, const Tensor& indices, int64_t dim) { in cummaxmin_backward() argument
867 return result.scatter_add(dim, indices, grad); in cummaxmin_backward()
869 return result.scatter_add_(dim, indices, grad); in cummaxmin_backward()
872 …or& self, const std::optional<Tensor>& prepend, const std::optional<Tensor>& append, int64_t dim) { in prepend_append_on_dim() argument
876 return at::cat({self, append.value()}, dim); in prepend_append_on_dim()
878 return at::cat({prepend.value(), self}, dim); in prepend_append_on_dim()
880 return at::cat({prepend.value(), self, append.value()}, dim); in prepend_append_on_dim()
884 …d diff_check_compatible_shape(const Tensor& self, const std::optional<Tensor>&other, int64_t dim) { in diff_check_compatible_shape() argument
888 int64_t wrapped_dim = maybe_wrap_dim(dim, self.dim(), false); in diff_check_compatible_shape()
891 other.value().dim() == self.dim(), in diff_check_compatible_shape()
894 for (const auto i : c10::irange(other.value().dim())) { in diff_check_compatible_shape()
908 static inline void diff_check(const Tensor& self, int64_t n, int64_t dim, const std::optional<Tenso… in diff_check() argument
911 self.dim() >= 1, in diff_check()
918 diff_check_compatible_shape(self, prepend, dim); in diff_check()
919 diff_check_compatible_shape(self, append, dim); in diff_check()
922 static inline Tensor diff_helper(const Tensor& self, int64_t n, int64_t dim) { in diff_helper() argument
929 auto out_len = self.sym_size(dim) - 1; in diff_helper()
932 n = n > self.sym_size(dim) ? self.sym_size(dim).guard_int(__FILE__, __LINE__) : n; in diff_helper()
937 at::narrow_symint(result, dim, 1, out_len), in diff_helper()
938 at::narrow_symint(result, dim, 0, out_len) in diff_helper()
941 … result = at::narrow_symint(result, dim, 1, out_len) - at::narrow_symint(result, dim, 0, out_len); in diff_helper()
949 Tensor diff(const Tensor& self, int64_t n, int64_t dim, const std::optional<Tensor>& prepend, const… in diff() argument
950 diff_check(self, n, dim, prepend, append); in diff()
952 return diff_helper(self, n, dim); in diff()
954 auto a = prepend_append_on_dim(self, prepend, append, dim); in diff()
955 return diff_helper(a, n, dim); in diff()
959 static inline Tensor& diff_out_helper(const Tensor& self, int64_t n, int64_t dim, Tensor& result) { in diff_out_helper() argument
968 n = n > self.sym_size(dim) ? self.sym_size(dim).guard_int(__FILE__, __LINE__) : n; in diff_out_helper()
969 const auto out_len = self.sym_size(dim) - n; in diff_out_helper()
973 prev_result = diff_helper(self, n - 1, dim); in diff_out_helper()
979 at::narrow_symint(prev_result, dim, 1, out_len), in diff_out_helper()
980 at::narrow_symint(prev_result, dim, 0, out_len) in diff_out_helper()
985 at::narrow_symint(prev_result, dim, 1, out_len), in diff_out_helper()
986 at::narrow_symint(prev_result, dim, 0, out_len) in diff_out_helper()
993 Tensor& diff_out(const Tensor& self, int64_t n, int64_t dim, const std::optional<Tensor>& prepend, … in diff_out() argument
994 diff_check(self, n, dim, prepend, append); in diff_out()
996 return diff_out_helper(self, n, dim, result); in diff_out()
998 auto a = prepend_append_on_dim(self, prepend, append, dim); in diff_out()
999 return diff_out_helper(a, n, dim, result); in diff_out()
1003 …Tensor& self, std::optional<int64_t> spacing_size, at::OptionalIntArrayRef dim, int64_t edge_orde… in pre_check_gradient() argument
1006 if (spacing_size.has_value() && !dim.has_value()) { in pre_check_gradient()
1009 TORCH_CHECK(spacing_size.value() == self.dim(), in pre_check_gradient()
1011 "of length equal to 'self.dim() = ", self.dim(), "', since dim argument ", in pre_check_gradient()
1014 if (spacing_size.has_value() && dim.has_value()) { in pre_check_gradient()
1015 TORCH_CHECK(spacing_size.value() == static_cast<int64_t>(dim.value().size()), in pre_check_gradient()
1016 …ng and dim arguments to have the same length, but got a spacing argument of length ", spacing_size… in pre_check_gradient()
1019 if (dim.has_value()) { in pre_check_gradient()
1020 // The following function get called to check whether dim argument satisfies prerequisites. in pre_check_gradient()
1022 dim_list_to_bitset(dim.value(), self.dim()); in pre_check_gradient()
1023 for (const auto i : c10::irange(dim.value().size())) { in pre_check_gradient()
1024 …TORCH_CHECK(self.size(dim.value()[i]) >= edge_order + 1, "torch.gradient expected each dimension s… in pre_check_gradient()
1027 for (const auto i : c10::irange(self.dim())) { in pre_check_gradient()
1033 …r> gradient_helper(const Tensor& self, TensorList coordinates, IntArrayRef dim, int64_t edge_order… in gradient_helper() argument
1039 for (const auto i : c10::irange(dim.size())) { in gradient_helper()
1040 …rdinates[i].dim() == 1, "torch.gradient expected each element of spacing to have one dimension, bu… in gradient_helper()
1041 int64_t direction = maybe_wrap_dim(dim[i], self.dim()); in gradient_helper()
1043 std::vector<int64_t> shape(self.dim(),1); in gradient_helper()
1074 …ent_helper_float(const Tensor& self, ArrayRef<Scalar> spacing, IntArrayRef dim, int64_t edge_order… in gradient_helper_float() argument
1076 for (const auto i : c10::irange(dim.size())) { in gradient_helper_float()
1077 int64_t direction = maybe_wrap_dim(dim[i], self.dim()); in gradient_helper_float()
1094 static std::vector<int64_t> gradient_dim_preprocess(const Tensor& self, std::optional<int64_t> dim)… in gradient_dim_preprocess() argument
1095 …// if gradient dim is provided as an integer, then we need to compute gradient only on this direct… in gradient_dim_preprocess()
1097 …// Finally, if dim is provided as vector of ints, then it is not expected to be called by this fun… in gradient_dim_preprocess()
1098 if (dim.has_value()) { in gradient_dim_preprocess()
1099 return std::vector<int64_t>{dim.value()}; in gradient_dim_preprocess()
1102 std::vector<int64_t> axis(self.dim()); in gradient_dim_preprocess()
1107 std::vector<Tensor> gradient(const Tensor& self, TensorList coordinates, IntArrayRef dim, int64_t e… in gradient() argument
1110 at::OptionalIntArrayRef(dim), in gradient()
1112 return gradient_helper(self, coordinates, dim, edge_order); in gradient()
1115 std::vector<Tensor> gradient(const Tensor& self, TensorList coordinates, std::optional<int64_t> dim in gradient() argument
1116 const auto processed_dim = gradient_dim_preprocess(self, dim); in gradient()
1119 dim.has_value() ? at::OptionalIntArrayRef(processed_dim) : std::nullopt, in gradient()
1124 std::vector<Tensor> gradient(const Tensor& self, c10::ArrayRef<Scalar> spacing, IntArrayRef dim, in… in gradient() argument
1127 at::OptionalIntArrayRef(dim), in gradient()
1129 return gradient_helper_float(self, spacing, dim, edge_order); in gradient()
1132 …dient(const Tensor& self, ArrayRef<Scalar> spacing, std::optional<int64_t> dim, int64_t edge_order… in gradient() argument
1133 const auto processed_dim = gradient_dim_preprocess(self, dim); in gradient()
1136 dim.has_value() ? at::OptionalIntArrayRef(processed_dim) : std::nullopt, in gradient()
1141 std::vector<Tensor> gradient(const Tensor& self, const Scalar& unit_size, IntArrayRef dim, int64_t … in gradient() argument
1142 // When spacing is given as scalar, while dim is given as IntArrayRef, scalar value need to in gradient()
1143 // be taken as unit size at every given dimension element of - dim. in gradient()
1144 std::vector<Scalar> spacing(dim.size(), unit_size); in gradient()
1147 at::OptionalIntArrayRef(dim), in gradient()
1149 return gradient_helper_float(self, spacing, dim, edge_order); in gradient()
1152 …nsor& self, const std::optional<Scalar>& unit_size, std::optional<int64_t> dim, int64_t edge_order… in gradient() argument
1153 const auto processed_dim = gradient_dim_preprocess(self, dim); in gradient()
1155 …// When dim has integer value it implies we are looking for gradient in the specific direction, ho… in gradient()
1157 std::vector<Scalar> spacing(dim.has_value() ? 1 : self.dim(), in gradient()
1161 dim.has_value() ? at::OptionalIntArrayRef(processed_dim) : std::nullopt, in gradient()
1166 std::vector<Tensor> gradient(const Tensor& self, IntArrayRef dim, int64_t edge_order) { in gradient() argument
1167 std::vector<Scalar> spacing(dim.size(), 1.0) ; in gradient()
1170 at::OptionalIntArrayRef(dim), in gradient()
1172 return gradient_helper_float(self, spacing, dim, edge_order); in gradient()
1189 for (const auto dim : c10::irange(0, 2)) { in should_use_acc_buffer() local
1190 if (out_strides[dim] != 0) { in should_use_acc_buffer()
1211 …// the intermediate sums is forced to do accumulation in the second reduced dim with lower precisi… in TORCH_IMPL_FUNC()
1227 Tensor sum(const Tensor& self, DimnameList dim, bool keepdim, std::optional<ScalarType> dtype) { in sum() argument
1228 return at::sum(self, dimnames_to_positions(self, dim), keepdim, dtype); in sum()
1231 Tensor& sum_out(const Tensor& self, DimnameList dim, in sum_out() argument
1233 return at::sum_out(result, self, dimnames_to_positions(self, dim), keepdim, opt_dtype); in sum_out()
1236 Tensor& nansum_out(const Tensor& self, at::OptionalIntArrayRef dim, in nansum_out() argument
1245 return at::sum_out(result, self, dim, keepdim, opt_dtype); in nansum_out()
1249 auto iter = make_reduction("nansum", result, self, dim, keepdim, dtype); in nansum_out()
1258 Tensor nansum(const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, std::optional<ScalarTy… in nansum() argument
1260 Tensor result = create_reduction_result(self, dim, keepdim, dtype); in nansum()
1261 return at::native::nansum_out(self, dim, keepdim, dtype, result); in nansum()
1292 TORCH_CHECK(self.dim() == 2, "trace: expected a matrix, but got tensor with dim ", self.dim()); in trace_cpu()
1324 int64_t dim, in TORCH_IMPL_FUNC()
1328 impl_func_prod(self, dim, keepdim, dtype, result); in TORCH_IMPL_FUNC()
1339 Tensor prod(const Tensor& self, Dimname dim, bool keepdim, std::optional<ScalarType> dtype) { in prod() argument
1340 return at::prod(self, dimname_to_position(self, dim), keepdim, dtype); in prod()
1343 Tensor& prod_out(const Tensor& self, Dimname dim, in prod_out() argument
1345 return at::prod_out(result, self, dimname_to_position(self, dim), keepdim, opt_dtype); in prod_out()
1364 auto dim = opt_dim.value(); in TORCH_IMPL_FUNC() local
1365 for (auto d : dim) { in TORCH_IMPL_FUNC()
1408 Tensor mean(const Tensor& self, DimnameList dim, bool keepdim, std::optional<ScalarType> dtype) { in mean() argument
1409 return at::mean(self, dimnames_to_positions(self, dim), keepdim, dtype); in mean()
1412 Tensor& mean_out(const Tensor& self, DimnameList dim, in mean_out() argument
1414 return at::mean_out(result, self, dimnames_to_positions(self, dim), keepdim, opt_dtype); in mean_out()
1429 at::OptionalIntArrayRef dim, in nanmean_out() argument
1437 const auto factor = at::native::isnan(self).logical_not_().sum(dim, keepdim); in nanmean_out()
1438 at::native::nansum_out(self, dim, keepdim, opt_dtype, result).div_(factor); in nanmean_out()
1444 at::OptionalIntArrayRef dim, in nanmean() argument
1452 at::native::isnan(self.detach()).logical_not_().sum(dim, keepdim); in nanmean()
1453 return at::nansum(self, dim, keepdim, opt_dtype).div(factor); in nanmean()
1524 IntArrayRef dim, in impl_func_norm() argument
1531 at::linalg_vector_norm_out(const_cast<Tensor&>(result), self, p, dim, keepdim, opt_dtype); in impl_func_norm()
1537 IntArrayRef dim, in TORCH_IMPL_FUNC()
1540 impl_func_norm(self, p, dim, keepdim, std::nullopt, result); in TORCH_IMPL_FUNC()
1546 IntArrayRef dim, in TORCH_IMPL_FUNC()
1550 impl_func_norm(self, p, dim, keepdim, dtype, result); in TORCH_IMPL_FUNC()
1556 IntArrayRef dim, in sparse_norm() argument
1558 return at::native_norm(self, p, dim, keepdim, std::nullopt); in sparse_norm()
1564 IntArrayRef dim, in sparse_dtype_norm() argument
1567 return at::native_norm(self, p, dim, keepdim, dtype); in sparse_dtype_norm()
1612 (const Tensor& self, int64_t dim, bool keepdim, const Tensor& result) { in TORCH_IMPL_FUNC()
1613 allany_impl<1>(self, result, dim, keepdim, and_stub); in TORCH_IMPL_FUNC()
1617 (const Tensor& self, OptionalIntArrayRef dim, bool keepdim, const Tensor& result) { in TORCH_IMPL_FUNC()
1618 allany_impl<1>(self, result, dim, keepdim, and_stub); in TORCH_IMPL_FUNC()
1626 (const Tensor& self, int64_t dim, bool keepdim, const Tensor& result) { in TORCH_IMPL_FUNC()
1627 allany_impl<0>(self, result, dim, keepdim, or_stub); in TORCH_IMPL_FUNC()
1631 (const Tensor& self, OptionalIntArrayRef dim, bool keepdim, const Tensor& result) { in TORCH_IMPL_FUNC()
1632 allany_impl<0>(self, result, dim, keepdim, or_stub); in TORCH_IMPL_FUNC()
1640 Tensor allany_dims_default(const Tensor &self, OptionalIntArrayRef dim, bool keepdim) { in allany_dims_default() argument
1641 // Default implementation in terms of all-reduce or single dim reduce in allany_dims_default()
1642 if (!dim) { in allany_dims_default()
1651 DimVector out_shape(self.dim(), 1); in allany_dims_default()
1657 if (dim->empty()) { in allany_dims_default()
1668 for (auto d : *dim) { in allany_dims_default()
1675 return keepdim ? out : out.squeeze(*dim); in allany_dims_default()
1678 Tensor all_dims_default(const Tensor &self, OptionalIntArrayRef dim, bool keepdim) { in all_dims_default() argument
1679 return allany_dims_default<true>(self, dim, keepdim); in all_dims_default()
1682 Tensor any_dims_default(const Tensor &self, OptionalIntArrayRef dim, bool keepdim) { in any_dims_default() argument
1683 return allany_dims_default<false>(self, dim, keepdim); in any_dims_default()
1687 const Tensor &self, OptionalIntArrayRef dim, bool keepdim, Tensor &result) { in all_dims_out_default() argument
1689 auto tmp = all_dims_default(self, dim, keepdim); in all_dims_out_default()
1695 const Tensor &self, OptionalIntArrayRef dim, bool keepdim, Tensor &result) { in any_dims_out_default() argument
1697 auto tmp = any_dims_default(self, dim, keepdim); in any_dims_out_default()
1702 TORCH_IMPL_FUNC(amin_out) (const Tensor& self, IntArrayRef dim, bool keepdim, const Tensor& result)… in TORCH_IMPL_FUNC()
1704 meta::make_reduction(self, result, dim, keepdim, self.scalar_type()); in TORCH_IMPL_FUNC()
1710 TORCH_IMPL_FUNC(amax_out) (const Tensor& self, IntArrayRef dim, bool keepdim, const Tensor& result)… in TORCH_IMPL_FUNC()
1712 meta::make_reduction(self, result, dim, keepdim, self.scalar_type()); in TORCH_IMPL_FUNC()
1721 std::optional<int64_t> dim, in argmax_argmin_impl() argument
1729 if (dim.has_value()) { in argmax_argmin_impl()
1730 _dim = maybe_wrap_dim(dim.value(), self.dim()); in argmax_argmin_impl()
1755 std::optional<int64_t> dim, in TORCH_IMPL_FUNC()
1758 argmax_argmin_impl(self, dim, keepdim, result, argmax_stub); in TORCH_IMPL_FUNC()
1763 std::optional<int64_t> dim, in TORCH_IMPL_FUNC()
1766 argmax_argmin_impl(self, dim, keepdim, result, argmin_stub); in TORCH_IMPL_FUNC()
1830 at::OptionalIntArrayRef dim, const std::optional<Scalar>& correction_opt, in std_var_out() argument
1850 dim, in std_var_out()
1861 dim, in std_var_out()
1876 auto iter = make_reduction(fname, result, self, dim, keepdim, dtype); in std_var_out()
1902 at::OptionalIntArrayRef dim, const std::optional<Scalar>& correction_opt, in std_var_mean_out() argument
1929 dim, in std_var_mean_out()
1942 dim, in std_var_mean_out()
1959 make_reduction(fname, result1, result2, self, dim, keepdim, dtype); in std_var_mean_out()
1973 const Tensor& self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { in var_mean() argument
1975 self, /*dim=*/at::OptionalIntArrayRef(dim), in var_mean()
1981 const Tensor& self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { in std_mean() argument
1983 self, /*dim=*/at::OptionalIntArrayRef(dim), in std_mean()
1990 self, /*dim=*/std::nullopt, in std_mean()
1996 self, /*dim=*/std::nullopt, in var_mean()
2000 Tensor& result1, Tensor& result2, const Tensor& self, IntArrayRef dim, in var_mean_out() argument
2003 "var_mean", result1, result2, self, dim, correction, keepdim, false); in var_mean_out()
2012 const Tensor& self, at::OptionalIntArrayRef dim, in var_mean() argument
2017 "var_mean", result1, result2, self, dim, correction, keepdim, false); in var_mean()
2021 const Tensor& self, at::OptionalIntArrayRef dim, in std_mean() argument
2026 "std_mean", result1, result2, self, dim, correction, keepdim, true); in std_mean()
2031 self, /*dim=*/std::nullopt, in var()
2035 Tensor var(const Tensor& self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { in var() argument
2037 self, /*dim=*/at::OptionalIntArrayRef(dim), in var()
2042 Tensor& var_out(const Tensor& self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, Tenso… in var_out() argument
2044 result, self, /*dim=*/at::OptionalIntArrayRef(dim), in var_out()
2051 self, /*dim=*/std::nullopt, /*correction=*/std::make_optional<Scalar>(unbiased ? 1 : 0)); in std()
2054 Tensor std(const Tensor& self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { in std() argument
2055 return at::std(self, dim, in std()
2064 Tensor std(const Tensor& self, at::OptionalIntArrayRef dim, in std() argument
2067 return std_var_out("std", result, self, dim, correction, keepdim, true); in std()
2071 const Tensor& self, at::OptionalIntArrayRef dim, in std_out() argument
2073 return std_var_out("std", result, self, dim, correction, keepdim, true); in std_out()
2077 const Tensor& self, at::OptionalIntArrayRef dim, in var_out() argument
2079 return std_var_out("var", result, self, dim, correction, keepdim, false); in var_out()
2083 const Tensor& self, at::OptionalIntArrayRef dim, in var() argument
2086 return std_var_out("var", result, self, dim, correction, keepdim, false); in var()
2089 Tensor std(const Tensor& self, DimnameList dim, bool unbiased, bool keepdim) { in std() argument
2090 return at::std(self, dimnames_to_positions(self, dim), unbiased, keepdim); in std()
2093 Tensor& std_out(const Tensor& self, DimnameList dim, bool unbiased, bool keepdim, Tensor& result) { in std_out() argument
2094 return at::std_out(result, self, dimnames_to_positions(self, dim), unbiased, keepdim); in std_out()
2097 Tensor var(const Tensor& self, DimnameList dim, bool unbiased, bool keepdim) { in var() argument
2098 return at::var(self, dimnames_to_positions(self, dim), unbiased, keepdim); in var()
2101 Tensor& var_out(const Tensor& self, DimnameList dim, bool unbiased, bool keepdim, Tensor& result) { in var_out() argument
2103 result, self, dimnames_to_positions(self, dim), unbiased, keepdim); in var_out()
2106 std::tuple<Tensor,Tensor> var_mean(const Tensor& self, DimnameList dim, bool unbiased, bool keepdim… in var_mean() argument
2107 return at::var_mean(self, dimnames_to_positions(self, dim), unbiased, keepdim); in var_mean()
2110 std::tuple<Tensor,Tensor> std_mean(const Tensor& self, DimnameList dim, bool unbiased, bool keepdim… in std_mean() argument
2111 return at::std_mean(self, dimnames_to_positions(self, dim), unbiased, keepdim); in std_mean()
2114 Tensor std(const Tensor& self, DimnameList dim, const std::optional<Scalar>& correction, bool keepd… in std() argument
2115 return at::std(self, dimnames_to_positions(self, dim), correction, keepdim); in std()
2118 Tensor& std_out(const Tensor& self, DimnameList dim, const std::optional<Scalar>& correction, in std_out() argument
2120 return at::std_out(result, self, dimnames_to_positions(self, dim), correction, keepdim); in std_out()
2123 Tensor var(const Tensor& self, DimnameList dim, const std::optional<Scalar>& correction, bool keepd… in var() argument
2124 return at::var(self, dimnames_to_positions(self, dim), correction, keepdim); in var()
2127 Tensor& var_out(const Tensor& self, DimnameList dim, const std::optional<Scalar>& correction, in var_out() argument
2130 result, self, dimnames_to_positions(self, dim), correction, keepdim); in var_out()
2133 std::tuple<Tensor,Tensor> var_mean(const Tensor& self, DimnameList dim, in var_mean() argument
2135 return at::var_mean(self, dimnames_to_positions(self, dim), correction, keepdim); in var_mean()
2138 std::tuple<Tensor,Tensor> std_mean(const Tensor& self, DimnameList dim, in std_mean() argument
2140 return at::std_mean(self, dimnames_to_positions(self, dim), correction, keepdim); in std_mean()
2143 Tensor& norm_out(const Tensor& self, const std::optional<Scalar>& p, DimnameList dim, bool keepdim,… in norm_out() argument
2144 return at::norm_out(result, self, p, dimnames_to_positions(self, dim), keepdim, dtype); in norm_out()
2147 Tensor& norm_out(const Tensor& self, const std::optional<Scalar>& p, DimnameList dim, bool keepdim,… in norm_out() argument
2148 return at::norm_out(result, self, p, dimnames_to_positions(self, dim), keepdim); in norm_out()
2151 Tensor norm(const Tensor& self, const std::optional<Scalar>& p, DimnameList dim, bool keepdim, Scal… in norm() argument
2152 return at::norm(self, p, dimnames_to_positions(self, dim), keepdim, dtype); in norm()
2155 Tensor norm(const Tensor& self, const std::optional<Scalar>& p, DimnameList dim, bool keepdim) { in norm() argument
2156 return at::norm(self, p, dimnames_to_positions(self, dim), keepdim); in norm()
2159 Tensor any(const Tensor& self, Dimname dim, bool keepdim) { in any() argument
2162 Tensor& any_out(const Tensor &self, Dimname dim, bool keepdim, Tensor& result) { in any_out() argument
2165 Tensor all(const Tensor& self, Dimname dim, bool keepdim) { in all() argument
2168 Tensor& all_out(const Tensor &self, Dimname dim, bool keepdim, Tensor& result) { in all_out() argument
2179 Tensor logcumsumexp(const Tensor& self, Dimname dim) { in logcumsumexp() argument
2180 return at::logcumsumexp(self, dimname_to_position(self, dim)); in logcumsumexp()
2182 Tensor& logcumsumexp_out(const Tensor& self, Dimname dim, Tensor& result) { in logcumsumexp_out() argument
2183 return at::logcumsumexp_out(result, self, dimname_to_position(self, dim)); in logcumsumexp_out()
2185 Tensor cumsum(const Tensor& self, Dimname dim, std::optional<ScalarType> dtype) { in cumsum() argument
2186 return at::cumsum(self, dimname_to_position(self, dim), dtype); in cumsum()
2188 Tensor& cumsum_(Tensor& self, Dimname dim, std::optional<ScalarType> dtype) { in cumsum_() argument
2189 return at::cumsum_out(self, self, dimname_to_position(self, dim), dtype); in cumsum_()
2191 Tensor& cumsum_out(const Tensor& self, Dimname dim, std::optional<ScalarType> dtype, Tensor& result… in cumsum_out() argument
2192 return at::cumsum_out(result, self, dimname_to_position(self, dim), dtype); in cumsum_out()
2194 Tensor cumprod(const Tensor& self, Dimname dim, std::optional<ScalarType> dtype) { in cumprod() argument
2195 return at::cumprod(self, dimname_to_position(self, dim), dtype); in cumprod()
2197 Tensor& cumprod_(Tensor& self, Dimname dim, std::optional<ScalarType> dtype) { in cumprod_() argument
2198 return at::cumprod_out(self, self, dimname_to_position(self, dim), dtype); in cumprod_()
2200 Tensor& cumprod_out(const Tensor& self, Dimname dim, std::optional<ScalarType> dtype, Tensor& resul… in cumprod_out() argument
2201 return at::cumprod_out(result, self, dimname_to_position(self, dim), dtype); in cumprod_out()
2203 std::tuple<Tensor, Tensor> cummax(const Tensor& self, Dimname dim) { in cummax() argument
2204 return at::cummax(self, dimname_to_position(self, dim)); in cummax()
2206 std::tuple<Tensor&, Tensor&> cummax_out(const Tensor& self, Dimname dim, Tensor& values, Tensor& in… in cummax_out() argument
2207 return at::cummax_out(values, indices, self, dimname_to_position(self, dim)); in cummax_out()
2209 std::tuple<Tensor, Tensor> cummin(const Tensor& self, Dimname dim) { in cummin() argument
2210 return at::cummin(self, dimname_to_position(self, dim)); in cummin()
2212 std::tuple<Tensor&, Tensor&> cummin_out(const Tensor& self, Dimname dim, Tensor& values, Tensor& in… in cummin_out() argument
2213 return at::cummin_out(values, indices, self, dimname_to_position(self, dim)); in cummin_out()
2294 // max(dim), min(dim), topk(dim), mode(dim), are examples of reduction
2298 Tensor value_selecting_reduction_backward_symint(const Tensor& grad, int64_t dim, const Tensor& ind… in value_selecting_reduction_backward_symint() argument
2303 return grad_in.scatter(dim, indices_, grad_out); in value_selecting_reduction_backward_symint()
2305 return grad_in.scatter_(dim, indices_, grad_out); in value_selecting_reduction_backward_symint()
2309 auto grad_ = grad.unsqueeze(dim); in value_selecting_reduction_backward_symint()
2310 auto indices_ = indices.unsqueeze(dim); in value_selecting_reduction_backward_symint()
2324 Tensor sum_sparse_coo(const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, std::optional<… in sum_sparse_coo() argument
2326 if (dim.has_value()) { in sum_sparse_coo()
2328 result = at::_sparse_sum(self, *dim, *dtype); in sum_sparse_coo()
2331 result = at::_sparse_sum(self, *dim, at::kLong); in sum_sparse_coo()
2333 result = at::_sparse_sum(self, *dim); in sum_sparse_coo()
2340 auto dim_mask = make_dim_mask(dim, self.dim()); in sum_sparse_coo()
2341 for (int dim = 0; dim < self.dim(); dim++) { in sum_sparse_coo() local
2342 if (dim_mask[dim]) { in sum_sparse_coo()
2343 result = result.unsqueeze(dim); in sum_sparse_coo()
2352 at::OptionalIntArrayRef dim, in sum_sparse_compressed() argument
2356 // bit different in the second parameters `dim`, which causes the conversion of `dim` in sum_sparse_compressed()
2359 dim.has_value(), "dim has no value, cannot be used in sum.dim_IntList"); in sum_sparse_compressed()
2365 return at::_sparse_csr_sum(self, *dim, keepdim, dtype); in sum_sparse_compressed()