/external/tensorflow/tensorflow/compiler/xla/ |
D | window_util.cc | 65 /* static */ std::string ToString(const WindowDimension& dim) { in ToString() 109 [](const WindowDimension& dim) { return StrCat(dim.size()); }); in ToString() 113 [](const WindowDimension& dim) { return StrCat(dim.stride()); }); in ToString() 116 add_field(" pad", [](const WindowDimension& dim) { in ToString() 121 add_field(" lhs_dilate", [](const WindowDimension& dim) { in ToString() 126 add_field(" rhs_dilate", [](const WindowDimension& dim) { in ToString() 131 add_field(" rhs_reversal", [](const WindowDimension& dim) { in ToString() 139 for (const auto& dim : window.dimensions()) { in HasStride() local 148 for (const auto& dim : window.dimensions()) { in HasPadding() local 157 return absl::c_all_of(window.dimensions(), [](const WindowDimension& dim) { in HasSymmetricPadding() [all …]
|
/external/pytorch/aten/src/ATen/ |
D | WrapDimUtils.h | 17 inline int64_t maybe_wrap_dim(int64_t dim, TensorImpl* tensor) { in maybe_wrap_dim() 21 inline int64_t maybe_wrap_dim(int64_t dim, TensorList tensors) { in maybe_wrap_dim() 31 int64_t dim, in maybe_wrap_dim() 68 auto& dim = dims[i]; variable 108 int64_t dim, in legacy_cat_wrap_dim() 120 int64_t dim, in legacy_cat_wrap_dim_symint() 134 int64_t dim, in legacy_cat_wrap_dim()
|
/external/pytorch/aten/src/ATen/native/ |
D | Integration.cpp | 32 Tensor do_trapezoid(const Tensor& y, const Tensor& dx, int64_t dim) { in do_trapezoid() 42 Tensor do_trapezoid(const Tensor& y, double dx, int64_t dim) { in do_trapezoid() 46 Tensor zeros_like_except(const Tensor& y, int64_t dim) { in zeros_like_except() 53 Tensor do_cumulative_trapezoid(const Tensor& y, const Tensor& dx, int64_t dim) { in do_cumulative_trapezoid() 60 Tensor do_cumulative_trapezoid(const Tensor& y, double dx, int64_t dim) { in do_cumulative_trapezoid() 85 Tensor trapezoid(const Tensor& y, const Tensor& x, int64_t dim) { in trapezoid() 122 Tensor trapezoid(const Tensor& y, const Scalar& dx, int64_t dim) { in trapezoid() 132 Tensor trapz(const Tensor& y, const Tensor& x, int64_t dim) { in trapz() 136 Tensor trapz(const Tensor& y, double dx, int64_t dim) { in trapz() 140 Tensor cumulative_trapezoid(const Tensor& y, const Tensor& x, int64_t dim) { in cumulative_trapezoid() [all …]
|
D | ReduceOps.cpp | 193 TORCH_META_FUNC2(all, dim)(const Tensor& self, int64_t dim, bool keepdim) { in TORCH_META_FUNC2() argument 205 TORCH_META_FUNC2(any, dim)(const Tensor& self, int64_t dim, bool keepdim) { in TORCH_META_FUNC2() argument 220 const std::optional<int64_t>& dim) { in check_argmax_argmin() 247 int64_t dim, in meta_func_cum_ops() 291 TORCH_META_FUNC2(mean, dim) in TORCH_META_FUNC2() argument 356 auto dim = maybe_wrap_dim(dim_opt.value(), self.ndimension()); in TORCH_META_FUNC() local 445 Tensor _logcumsumexp_cpu(const Tensor& self, int64_t dim) { in _logcumsumexp_cpu() 450 Tensor& _logcumsumexp_out_cpu(const Tensor& self, int64_t dim, Tensor& result) { in _logcumsumexp_out_cpu() 455 Tensor logcumsumexp(const Tensor& self, int64_t dim) { in logcumsumexp() 464 Tensor& logcumsumexp_out(const Tensor& self, int64_t dim, Tensor& result) { in logcumsumexp_out() [all …]
|
D | SpectralOps.cpp | 162 IntArrayRef dim, int64_t norm, bool onesided) { in fft_r2c_maybe_out() 174 IntArrayRef dim, int64_t norm, SymInt last_dim_size) { in fft_c2r_maybe_out() 188 IntArrayRef dim, int64_t norm, bool forward) { in fft_c2c_maybe_out() 207 const auto dim = maybe_wrap_dim(unwrapped_dim, input_dim, /*wrap_scalar=*/false); in fft_c2r() local 233 const auto dim = maybe_wrap_dim(unwrapped_dim, input_dim, /*wrap_scalar=*/false); in fft_r2c() local 265 const auto dim = maybe_wrap_dim(unwrapped_dim, input_dim, /*wrap_scalar=*/false); in fft_c2c() local 278 DimVector dim; member 285 Tensor input, at::OptionalSymIntArrayRef shape, at::OptionalIntArrayRef dim) { in canonicalize_fft_shape_and_dim_args() 349 IntArrayRef dim, std::optional<c10::string_view> norm_str, bool forward) { in fftn_c2c() 360 Tensor fft_fft_symint(const Tensor& self, std::optional<SymInt> n, int64_t dim, in fft_fft_symint() [all …]
|
D | NamedTensor.cpp | 87 ptrdiff_t dim = (ptrdiff_t)tensor_sizes.size() - 1; in aligned_size() local 290 const auto& dim = tensor_names[idx]; in align_to() local 335 Tensor gather(const Tensor& self, Dimname dim, const Tensor& index, bool sparse_grad) { in gather() 338 Tensor& gather_out(const Tensor& self, Dimname dim, const Tensor& index, bool sparse_grad, Tensor& … in gather_out() 341 Tensor index_add(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const … in index_add() 344 Tensor index_fill(const Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) { in index_fill() 347 Tensor& index_fill_(Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) { in index_fill_() 350 Tensor index_fill(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) { in index_fill() 353 Tensor& index_fill_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) { in index_fill_() 356 Tensor index_copy(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) { in index_copy() [all …]
|
D | Sorting.cpp | 56 int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true); in TORCH_META_FUNC() local 95 void _fill_indices(const TensorBase &indices, int64_t dim) { in _fill_indices() 421 int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true); in kthvalue_out_impl_cpu() local 505 int64_t dim, in median_with_indices_impl() 656 std::optional<int64_t> dim, in quantile_out() 674 std::optional<int64_t> dim, in quantile_out() 692 std::optional<int64_t> dim, in quantile() 707 std::optional<int64_t> dim, in quantile() 719 std::optional<int64_t> dim, in nanquantile_out() 737 std::optional<int64_t> dim, in nanquantile_out() [all …]
|
D | TensorProperties.cpp | 47 int64_t size(const Tensor& self, int64_t dim) { in size() 51 int64_t stride(const Tensor& self, int64_t dim) { in stride() 55 c10::SymInt sym_size(const Tensor& self, int64_t dim) { in sym_size() 59 c10::SymInt sym_stride(const Tensor& self, int64_t dim) { in sym_stride() 71 int64_t size(const Tensor& self, Dimname dim) { in size() 76 int64_t stride(const Tensor& self, Dimname dim) { in stride()
|
D | NonEmptyUtils.h | 7 inline int64_t ensure_nonempty_dim(int64_t dim) { in ensure_nonempty_dim() 11 inline int64_t ensure_nonempty_size(const TensorBase &t, int64_t dim) { in ensure_nonempty_size() 15 inline int64_t ensure_nonempty_stride(const TensorBase &t, int64_t dim) { in ensure_nonempty_stride()
|
/external/executorch/kernels/test/ |
D | op_squeeze_copy_test.cpp | 27 op_squeeze_copy_dim_out(const Tensor& self, int64_t dim, Tensor& out) { in op_squeeze_copy_dim_out() 49 int64_t dim = 0; in TEST_F() local 59 int64_t dim = 0; variable 70 int64_t dim = 1; variable 80 int64_t dim = 0; variable 92 int64_t dim = 0; variable 104 int64_t dim = 1; variable 120 int64_t dim = 0; variable 135 int64_t dim = 0; variable 148 int64_t dim = 0; variable [all …]
|
/external/pytorch/torch/csrc/api/src/nn/options/ |
D | activation.cpp | 8 GLUOptions::GLUOptions(int64_t dim) : dim_(dim) {} in GLUOptions() 12 SoftmaxOptions::SoftmaxOptions(int64_t dim) : dim_(dim) {} in SoftmaxOptions() 14 SoftminOptions::SoftminOptions(int64_t dim) : dim_(dim) {} in SoftminOptions() 16 LogSoftmaxOptions::LogSoftmaxOptions(int64_t dim) : dim_(dim) {} in LogSoftmaxOptions() 34 SoftmaxFuncOptions::SoftmaxFuncOptions(int64_t dim) : dim_(dim) {} in SoftmaxFuncOptions() 36 SoftminFuncOptions::SoftminFuncOptions(int64_t dim) : dim_(dim) {} in SoftminFuncOptions() 38 LogSoftmaxFuncOptions::LogSoftmaxFuncOptions(int64_t dim) : dim_(dim) {} in LogSoftmaxFuncOptions()
|
/external/pytorch/aten/src/ATen/templates/ |
D | Functions.h | 86 TORCH_API inline Tensor var(const Tensor& self, int dim) { in var() 89 TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) { in var_mean() 92 TORCH_API inline Tensor std(const Tensor& self, int dim) { in std() 95 TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) { in std_mean() 103 inline int64_t size(const Tensor& tensor, int64_t dim) { in size() 107 inline int64_t stride(const Tensor& tensor, int64_t dim) { in stride()
|
/external/mesa3d/src/amd/common/nir/ |
D | ac_nir_lower_image_opcodes_cdna.c | 31 static unsigned get_coord_components(enum glsl_sampler_dim dim, bool is_array) in get_coord_components() 50 enum glsl_sampler_dim dim, bool is_array, in lower_image_coords() 112 enum gl_access_qualifier access, enum glsl_sampler_dim dim, in emulated_image_load() 128 enum glsl_sampler_dim dim, bool is_array) in emulated_image_store() 140 static nir_def *get_dim(nir_builder *b, nir_def *desc, unsigned dim) in get_dim() 172 for (unsigned dim = 0; dim < num_dim_coords; dim++) in emulated_tex_level_zero() local 198 for (unsigned dim = 0; dim < num_dim_coords; dim++) { in emulated_tex_level_zero() local 220 for (unsigned dim = 0; dim < num_dim_coords; dim++) { in emulated_tex_level_zero() local 248 for (unsigned dim = 0; dim < num_dim_coords; dim++) in emulated_tex_level_zero() local 257 for (unsigned dim = 0; dim < num_dim_coords; dim++) { in emulated_tex_level_zero() local [all …]
|
/external/executorch/kernels/portable/cpu/util/ |
D | index_util.cpp | 17 int64_t dim, in check_gather_args() 62 int64_t dim, in check_index_select_args() 112 int64_t dim, in get_index_select_out_target_size() 144 int64_t dim, in check_scatter_add_args() 196 int64_t dim, in check_scatter_src_args() 205 int64_t dim, in check_scatter_value_args() 215 int64_t dim, in check_select_scatter_args()
|
D | copy_ops_util.cpp | 76 int64_t dim, in check_cat_args() 127 int64_t dim, in get_cat_out_target_size() 238 size_t dim = dims[i] >= 0 ? dims[i] : in.dim() + dims[i]; in check_permute_copy_args() local 253 bool check_unbind_copy_args(const Tensor& in, int64_t dim, TensorList out) { in check_unbind_copy_args() 390 int64_t dim, in check_select_copy_out_args() 402 int64_t dim, in get_select_copy_out_target_size() 419 int64_t dim, in check_split_with_sizes_copy_args() 446 int64_t dim, in get_split_with_sizes_copy_out_target_size() 459 int64_t dim, in check_squeeze_copy_dim_args() 469 int64_t dim, in get_squeeze_copy_dim_out_target_size() [all …]
|
D | slice_util.cpp | 20 int64_t dim, in check_narrow_copy_args() 39 int64_t dim, in get_narrow_copy_out_target_size() 53 int64_t dim, in check_slice_copy_args() 66 int64_t dim, in get_slice_copy_out_target_size() 76 int64_t dim, in check_slice_scatter_args() 152 int64_t dim, in compute_slice()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | ragged_tensor_to_sparse_kernel.cc | 89 for (int dim = rt_nested_splits_len - 2; dim >= 0; --dim) { in Compute() local 96 for (int dim = 0; dim < index_prefix.size(); ++dim) { in Compute() local 108 int dim = 0; in Compute() local 141 for (int dim = 0; dim < rt_nested_splits_len; ++dim) { in Compute() local 149 for (int dim = 1; dim < rt_dense_values_in.dims(); ++dim) { in Compute() local 204 for (int dim = 1; dim < values_shape.dims(); ++dim) { in MakeIndexSuffixes() local 221 const std::vector<int64_t>& pos, int dim, in IsCompleted()
|
D | unique_op_test.cc | 40 TensorProto GetRandomInt32TensorProto(int dim, int max_int) { in GetRandomInt32TensorProto() 52 TensorProto GetRandomInt32TensorProtoWithRepeat(int dim, int repeat, in GetRandomInt32TensorProtoWithRepeat() 68 const int dim = state.range(0); in BM_Unique_INT32() local 91 const int dim = state.range(0); in BM_Unique_INT32_Repeat() local 114 TensorProto GetRandomStringsTensorProto(int dim, int max_str_len) { in GetRandomStringsTensorProto() 132 const int dim = state.range(0); in BM_Unique_STRING() local
|
/external/tensorflow/tensorflow/core/grappler/utils/ |
D | symbolic_shapes.cc | 36 bool IsKnown(const TensorShapeProto::Dim& dim) { return dim.size() >= 0; } in IsKnown() 38 bool IsKnownSymbolically(const TensorShapeProto::Dim& dim) { in IsKnownSymbolically() 42 bool IsUnknown(const TensorShapeProto::Dim& dim) { return dim.size() == -1; } in IsUnknown() 48 [](const TensorShapeProto::Dim& dim) { return !IsUnknown(dim); }); in ShapeIsSymbolicallyDefined() 67 for (const auto& dim : shape.dim()) { in NumCoefficients() local 125 for (const auto& dim : bcast.output_shape()) { in ShapeAfterBroadcast() local 157 const auto& dim = shape.dim(i); in CompareSymbolicallyShapedTensorSizes() local 226 for (const auto& dim : numerator.dim()) { in ComputeSizeRatio() local 236 for (const auto& dim : denominator.dim()) { in ComputeSizeRatio() local
|
/external/pytorch/aten/src/ATen/native/cuda/ |
D | ScanKernels.cpp | 28 void cummax_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) { in cummax_helper_cuda() 45 void cummin_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) { in cummin_helper_cuda() 62 Tensor& _logcumsumexp_out_cuda(const Tensor& self, int64_t dim, Tensor& result) { in _logcumsumexp_out_cuda() 86 Tensor _logcumsumexp_cuda(const Tensor& self, int64_t dim) { in _logcumsumexp_cuda() 91 void cumsum_cuda_kernel(const Tensor& result, const Tensor& self, int64_t dim) { in cumsum_cuda_kernel() 104 void cumprod_cuda_kernel(const Tensor& result, const Tensor& self, int64_t dim) { in cumprod_cuda_kernel()
|
/external/pytorch/aten/src/ATen/native/quantized/cpu/ |
D | TensorShape.cpp | 35 bool is_cat_nhwc_fast_path(const MaterializedITensorListRef& qxs, int64_t dim) { in is_cat_nhwc_fast_path() 80 int64_t dim, in quantized_cat_impl() 120 int64_t dim, in quantized_cat_impl() 129 int64_t dim, in qcat() 141 Tensor qcat_out(const c10::List<Tensor>& qxs, int64_t dim, Tensor out) { in qcat_out() 161 Tensor cat_quantized_cpu(const ITensorListRef& qxs, int64_t dim) { in cat_quantized_cpu() 177 Tensor& cat_out_quantized_cpu(const ITensorListRef& qxs, int64_t dim, Tensor& out) { in cat_out_quantized_cpu()
|
/external/pytorch/torch/_higher_order_ops/ |
D | associative_scan.py | 41 def _interleave(a, b, dim): argument 76 def __call__(self, combine_fn, input, dim): argument 185 def generic_associative_scan(operator, elems_flat, dim=0): argument 337 def associative_scan_op_dense(combine_fn, input, dim): argument 347 def associative_scan_proxy_mode(mode, combine_fn, input, dim): argument 352 def assoiciative_scan_fake_tensor_mode(mode, combine_fn, input, dim): argument 358 def associative_scan_functionalize(ctx, combine_fn, input, dim): argument
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | sharding_util_ops.cc | 80 for (int dim = 0; dim < expected_rank; ++dim) { in GetAndValidateAttributes() local 108 auto divisor = [&](const int dim) { in GetSliceIndices() 116 for (int dim = num_partitions.size() - 1; dim > 0; --dim) { in GetSliceIndices() local 153 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local 166 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local 185 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local 199 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local 208 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local 349 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local 373 for (int dim = 0; dim < rank; ++dim) { in CompileInternal() local [all …]
|
/external/pytorch/aten/src/ATen/native/mkl/ |
D | SpectralOps.cpp | 146 for (const auto& dim : mirror_dims) { in _fft_fill_with_conjugate_symmetry_cpu_() local 170 Tensor& _fft_r2c_mkl_out(const Tensor& self, IntArrayRef dim, int64_t normalization, in _fft_r2c_mkl_out() 188 Tensor& _fft_c2r_mkl_out(const Tensor& self, IntArrayRef dim, int64_t normalization, in _fft_c2r_mkl_out() 195 Tensor& _fft_c2c_mkl_out(const Tensor& self, IntArrayRef dim, int64_t normalization, in _fft_c2c_mkl_out() 247 T compute_fct(const Tensor& t, IntArrayRef dim, int64_t normalization) { in compute_fct() 261 Tensor _fft_c2r_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization, int64_t last_dim_si… in _fft_c2r_mkl() 280 Tensor _fft_r2c_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization, bool onesided) { in _fft_r2c_mkl() 308 Tensor _fft_c2c_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization, bool forward) { in _fft_c2c_mkl() 431 IntArrayRef dim, int64_t normalization, bool forward) { in _exec_fft() 508 static DimVector _sort_dims(const Tensor& self, IntArrayRef dim, bool exclude_last=false) { in _sort_dims() [all …]
|
/external/pytorch/torch/csrc/lazy/core/ |
D | shape.h | 33 int64_t dim() const { in dim() function 39 int64_t size(int64_t dim) const { in size() 42 void set_size(int64_t dim, int64_t size) { in set_size()
|