Home
last modified time | relevance | path

Searched full:dim (Results 1 – 25 of 5075) sorted by relevance

12345678910>>...203

/external/sdv/vsomeip/third_party/boost/numeric/odeint/include/boost/numeric/odeint/algebra/
Darray_algebra.hpp36 //template< typename T , size_t dim , class Op >
38 size_t dim, class Op >
39 static void for_each1( Array< T, dim > &s1, Op op ) in for_each1()
41 for( size_t i=0 ; i<dim ; ++i ) in for_each1()
46 size_t dim, class Op >
47 static void for_each2( Array< T, dim > &s1, const Array< T, dim > &s2, in for_each2()
50 for( size_t i=0 ; i<dim ; ++i ) in for_each2()
55 size_t dim, class Op >
56 static void for_each3( Array< T , dim > &s1 , in for_each3()
57 const Array< T , dim > &s2 , in for_each3()
[all …]
/external/pytorch/torch/_refs/
Dfft.py120 dim: int,
126 dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
127 last_dim_size = n if n is not None else 2 * (input.shape[dim] - 1)
139 output = prims.fft_c2r(input, dim=dims, last_dim_size=last_dim_size)
147 dim: int,
158 dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
159 dim_size = n if n is not None else input.shape[dim]
167 ret = prims.fft_r2c(input, dim=dims, onesided=onesided)
176 dim: int,
185 dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
[all …]
/external/executorch/kernels/portable/cpu/util/
Dindex_util.cpp17 int64_t dim, in check_gather_args() argument
22 ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); in check_gather_args()
33 // Normalize dim to non-negative value in check_gather_args()
34 if (dim < 0) { in check_gather_args()
35 dim += nonzero_dim(in); in check_gather_args()
39 if (d != dim) { in check_gather_args()
42 …d of index should be smaller than the size of that dimension of input if dimension %zd != dim %zd", in check_gather_args()
45 (size_t)dim); in check_gather_args()
51 index_data[i] >= 0 && index_data[i] < nonempty_size(in, dim), in check_gather_args()
53 (size_t)dim, in check_gather_args()
[all …]
Dcopy_ops_util.cpp76 int64_t dim, in check_cat_args() argument
107 tensor_is_rank(tensors[ref_i], tensors[i].dim())); in check_cat_args()
109 for (size_t d = 0; d < tensors[i].dim(); ++d) { in check_cat_args()
110 if (d != dim) { in check_cat_args()
117 // Ensure dim is in range. in check_cat_args()
119 tensors[ref_i].numel() == 0 || tensors[ref_i].dim() > dim); in check_cat_args()
120 ET_LOG_AND_RETURN_IF_FALSE(dim >= 0); in check_cat_args()
127 int64_t dim, in get_cat_out_target_size() argument
132 // calculate out dim in get_cat_out_target_size()
137 cat_dim_size += tensors[i].size(dim); in get_cat_out_target_size()
[all …]
Dslice_util.cpp20 int64_t dim, in check_narrow_copy_args() argument
24 ET_LOG_AND_RETURN_IF_FALSE(in.dim() > 0); in check_narrow_copy_args()
26 ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); in check_narrow_copy_args()
28 ET_LOG_AND_RETURN_IF_FALSE(start >= -in.size(dim)); in check_narrow_copy_args()
29 ET_LOG_AND_RETURN_IF_FALSE(start <= in.size(dim)); in check_narrow_copy_args()
31 start += in.size(dim); in check_narrow_copy_args()
33 ET_LOG_AND_RETURN_IF_FALSE(start + lenth <= in.size(dim)); in check_narrow_copy_args()
39 int64_t dim, in get_narrow_copy_out_target_size() argument
43 *out_ndim = in.dim(); in get_narrow_copy_out_target_size()
45 for (size_t d = 0; d < in.dim(); ++d) { in get_narrow_copy_out_target_size()
[all …]
/external/pytorch/torch/_refs/linalg/
D__init__.py14 Dim,
73 def cross(a: Tensor, b: Tensor, dim: int = -1):
79 a.size(dim) == 3 and b.size(dim) == 3,
80 … lambda: f"linalg.cross: inputs dim {dim} must have length 3, got {a.size(dim)} and {b.size(dim)}",
83 dim = utils.canonicalize_dim(a.ndim, dim)
85 return a.index_select(dim, (idx + 1) % 3) * b.index_select(
86 dim, (idx + 2) % 3
87 ) - a.index_select(dim, (idx + 2) % 3) * b.index_select(dim, (idx + 1) % 3)
105 dim: Optional[DimsType] = None,
115 if isinstance(dim, Dim):
[all …]
/external/pytorch/torch/export/
Ddynamic_shapes.py34 "Dim",
56 Metaclass for :func:`Dim` types.
68 return f"Dim('{name}')"
70 return f"Dim('{name}', max={max_})"
72 return f"Dim('{name}', min={min_})"
73 return f"Dim('{name}', min={min_}, max={max_})"
76 # e.g., dim + 1
88 # e.g., dim - 1
103 # e.g., dim * 2
125 Meta class for static :func:`Dim` types.
[all …]
/external/pytorch/aten/src/ATen/native/
DIntegration.cpp32 Tensor do_trapezoid(const Tensor& y, const Tensor& dx, int64_t dim) { in do_trapezoid() argument
33 Tensor left = y.slice(dim, 0, -1); in do_trapezoid()
34 Tensor right = y.slice(dim, 1); in do_trapezoid()
37 return ((left + right) * dx).sum(dim) / 2.; in do_trapezoid()
42 Tensor do_trapezoid(const Tensor& y, double dx, int64_t dim) { in do_trapezoid() argument
43 return (y.sum(dim) - (y.select(dim, 0) + y.select(dim, -1)) * (0.5)) * dx; in do_trapezoid()
46 Tensor zeros_like_except(const Tensor& y, int64_t dim) { in zeros_like_except() argument
48 dim = maybe_wrap_dim(dim, y.dim()); in zeros_like_except()
49 sizes.erase(sizes.begin() + dim); in zeros_like_except()
53 Tensor do_cumulative_trapezoid(const Tensor& y, const Tensor& dx, int64_t dim) { in do_cumulative_trapezoid() argument
[all …]
DReduceOps.cpp193 TORCH_META_FUNC2(all, dim)(const Tensor& self, int64_t dim, bool keepdim) { in TORCH_META_FUNC2() argument
194 allany_meta(*this, "all", self, dim, keepdim); in TORCH_META_FUNC2()
197 TORCH_META_FUNC2(all, dims)(const Tensor& self, OptionalIntArrayRef dim, bool keepdim) { in TORCH_META_FUNC2()
198 allany_meta(*this, "all", self, dim, keepdim); in TORCH_META_FUNC2()
205 TORCH_META_FUNC2(any, dim)(const Tensor& self, int64_t dim, bool keepdim) { in TORCH_META_FUNC2() argument
206 allany_meta(*this, "any", self, dim, keepdim); in TORCH_META_FUNC2()
209 TORCH_META_FUNC2(any, dims)(const Tensor& self, OptionalIntArrayRef dim, bool keepdim) { in TORCH_META_FUNC2()
210 allany_meta(*this, "any", self, dim, keepdim); in TORCH_META_FUNC2()
220 const std::optional<int64_t>& dim) { in check_argmax_argmin() argument
221 if (dim.has_value()) { in check_argmax_argmin()
[all …]
DDilatedConvolutionUtils.h10 #define TORCH_CHECK_DIM_SIZE(T, DIM, DIM_SIZE, SIZE) \ argument
12 T.dim() == DIM && T.size(DIM_SIZE) == SIZE, \
14 DIM, \
37 template <int64_t dim>
45 for (const auto index : c10::irange(dim)) { in get_output_size()
48 input.size(index + input.dim() - dim) + 2 * pad_size[index] - in get_output_size()
57 template <int64_t dim>
65 auto output_size = get_output_size<dim>( in get_output_size()
68 if (input.dim() == dim + 2) { in get_output_size()
77 template <int64_t dim>
[all …]
DSpectralOps.cpp162 IntArrayRef dim, int64_t norm, bool onesided) { in fft_r2c_maybe_out() argument
167 return at::_fft_r2c_outf(input, dim, norm, onesided, out_mut); in fft_r2c_maybe_out()
169 return at::_fft_r2c(input, dim, norm, onesided); in fft_r2c_maybe_out()
174 IntArrayRef dim, int64_t norm, SymInt last_dim_size) { in fft_c2r_maybe_out() argument
181 return at::_fft_c2r_symint_outf(input, dim, norm, last_dim_size, out_mut); in fft_c2r_maybe_out()
183 return at::_fft_c2r_symint(input, dim, norm, last_dim_size); in fft_c2r_maybe_out()
188 IntArrayRef dim, int64_t norm, bool forward) { in fft_c2c_maybe_out() argument
193 return at::_fft_c2c_outf(input, dim, norm, forward, out_mut); in fft_c2c_maybe_out()
195 return at::_fft_c2c(input, dim, norm, forward); in fft_c2c_maybe_out()
206 const auto input_dim = input.dim(); in fft_c2r()
[all …]
DSorting.cpp56 int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true); in TORCH_META_FUNC() local
58 k >= 0 && k <= (self.dim() > 0 ? self.size(dim) : 1), in TORCH_META_FUNC()
60 int64_t sliceSize = self.dim() == 0 ? 1 : self.size(dim); in TORCH_META_FUNC()
63 // Build the output size, which is the dim being selected set to in TORCH_META_FUNC()
67 topKSize[dim] = k; in TORCH_META_FUNC()
74 (const Tensor& self, std::optional<bool> stable, int64_t dim, bool descending) { in TORCH_META_FUNC2()
75 maybe_wrap_dim(dim, self.dim()); in TORCH_META_FUNC2()
95 void _fill_indices(const TensorBase &indices, int64_t dim) { in _fill_indices() argument
96 auto ndim = indices.dim(); in _fill_indices()
97 assert(0 <= dim && dim < ndim); in _fill_indices()
[all …]
/external/eigen/Eigen/src/Geometry/
DTransform.h24 Dim = Transform::Dim, enumerator
42 int Dim,
58 int Dim,
95 * - #Affine: the transformation is stored as a (Dim+1)^2 matrix,
97 * - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix.
98 * - #Projective: the transformation is stored as a (Dim+1)^2 matrix
129 * to a compatible (Dim+1)^2 matrix and then perform a pure matrix product.
136 * \b Translation t (Dim)x(1):
142 * \b Rotation R (Dim)x(Dim):
148 * \b Linear \b Matrix L (Dim)x(Dim):
[all …]
DRotationBase.h32 enum { Dim = _Dim }; enumerator
37 typedef Matrix<Scalar,Dim,Dim> RotationMatrixType;
38 typedef Matrix<Scalar,Dim,1> VectorType;
56 …EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Isometry> operator*(const Translation<Scalar,Dim>& t…
57 { return Transform<Scalar,Dim,Isometry>(*this) * t; }
67 * - a vector of size Dim
80 …EIGEN_DEVICE_FUNC friend inline Transform<Scalar,Dim,Affine> operator*(const DiagonalMatrix<Scalar…
82 Transform<Scalar,Dim,Affine> res(r);
89 …EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode> operator*(const Transform<Scalar,Dim,Mode,Opti…
103 enum { Dim = RotationDerived::Dim };
[all …]
/external/tensorflow/tensorflow/core/profiler/internal/testdata/
Dgraph.pbtxt9 dim {
12 dim {
15 dim {
18 dim {
37 dim {
40 dim {
43 dim {
46 dim {
71 dim {
89 dim {
[all …]
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
Dsharding_util_ops.cc80 for (int dim = 0; dim < expected_rank; ++dim) { in GetAndValidateAttributes() local
81 if (paddings[dim] < 0) { in GetAndValidateAttributes()
83 "'padding' must be all non-negative, but got ", paddings[dim], in GetAndValidateAttributes()
84 " at index ", dim, "."); in GetAndValidateAttributes()
86 if (paddings[dim] > 0) { in GetAndValidateAttributes()
108 auto divisor = [&](const int dim) { in GetSliceIndices() argument
110 for (int i = num_partitions.size() - 1; i > dim; --i) { in GetSliceIndices()
116 for (int dim = num_partitions.size() - 1; dim > 0; --dim) { in GetSliceIndices() local
117 slice_indices[dim] = in GetSliceIndices()
118 ((index / divisor(dim)) % num_partitions[dim]) * slice_shape[dim]; in GetSliceIndices()
[all …]
/external/sdv/vsomeip/third_party/boost/numeric/odeint/examples/
Dpoint_type.hpp23 template< class T , size_t Dim >
25 boost::additive1< point< T , Dim > ,
26 boost::additive2< point< T , Dim > , T ,
27 boost::multiplicative2< point< T , Dim > , T
32 const static size_t dim = Dim; member in point
34 typedef point< value_type , dim > point_type;
41 for( size_t i=0 ; i<dim ; ++i ) m_val[i] = 0.0; in point()
46 for( size_t i=0 ; i<dim ; ++i ) m_val[i] = val; in point()
51 if( dim > 0 ) m_val[0] = x; in point()
52 if( dim > 1 ) m_val[1] = y; in point()
[all …]
/external/tensorflow/tensorflow/compiler/tests/
Dlstm_layer_inference.config.pbtxt2 feed{ id{node_name:"inputs/x_seq_0/read"} shape{dim{size:128}dim{size:1024}} }
3 feed{ id{node_name:"inputs/x_seq_1/read"} shape{dim{size:128}dim{size:1024}} }
4 feed{ id{node_name:"inputs/x_seq_2/read"} shape{dim{size:128}dim{size:1024}} }
5 feed{ id{node_name:"inputs/x_seq_3/read"} shape{dim{size:128}dim{size:1024}} }
6 feed{ id{node_name:"inputs/x_seq_4/read"} shape{dim{size:128}dim{size:1024}} }
7 feed{ id{node_name:"inputs/pad_seq_0/read"} shape{dim{size:128}dim{size:1}} }
8 feed{ id{node_name:"inputs/pad_seq_1/read"} shape{dim{size:128}dim{size:1}} }
9 feed{ id{node_name:"inputs/pad_seq_2/read"} shape{dim{size:128}dim{size:1}} }
10 feed{ id{node_name:"inputs/pad_seq_3/read"} shape{dim{size:128}dim{size:1}} }
11 feed{ id{node_name:"inputs/pad_seq_4/read"} shape{dim{size:128}dim{size:1}} }
[all …]
/external/armnn/src/armnnOnnxParser/test/
DConv2D.cpp29 dim { in SimpleConv2DFixture()
32 dim { in SimpleConv2DFixture()
35 dim { in SimpleConv2DFixture()
38 dim { in SimpleConv2DFixture()
51 dim { in SimpleConv2DFixture()
54 dim { in SimpleConv2DFixture()
57 dim { in SimpleConv2DFixture()
60 dim { in SimpleConv2DFixture()
127 dim { in SimpleConv2DFixture()
130 dim { in SimpleConv2DFixture()
[all …]
DAddition.cpp29 dim { in AddMainFixture()
32 dim { in AddMainFixture()
35 dim { in AddMainFixture()
38 dim { in AddMainFixture()
51 dim { in AddMainFixture()
54 dim { in AddMainFixture()
57 dim { in AddMainFixture()
60 dim { in AddMainFixture()
82 dim { in AddMainFixture()
85 dim { in AddMainFixture()
[all …]
/external/tensorflow/tensorflow/compiler/xla/
Dwindow_util.cc65 /* static */ std::string ToString(const WindowDimension& dim) { in ToString() argument
68 std::string str = StrCat("(size=", dim.size()); in ToString()
69 if (dim.stride() != 1) { in ToString()
70 StrAppend(&str, ",stride=", dim.stride()); in ToString()
72 if (dim.padding_low() != 0) { in ToString()
73 StrAppend(&str, ",padding_low=", dim.padding_low()); in ToString()
75 if (dim.padding_high() != 0) { in ToString()
76 StrAppend(&str, ",padding_high=", dim.padding_high()); in ToString()
78 if (dim.base_dilation() != 1) { in ToString()
79 StrAppend(&str, ",base_dilation=", dim.base_dilation()); in ToString()
[all …]
/external/pytorch/aten/src/ATen/native/vulkan/ops/
DMean.cpp15 int64_t dim, in mean_dim() argument
19 self.dim() >= 2 && self.dim() <= 4, in mean_dim()
22 dim >= -self.dim() && dim < self.dim(), in mean_dim()
23 "Vulkan mean.dim dimension out of range expected to be in range of [", in mean_dim()
24 -self.dim(), in mean_dim()
26 self.dim() - 1, in mean_dim()
28 dim); in mean_dim()
37 // Normalize dim into range [0, self.dim()] in mean_dim()
38 dim = utils::normalize(dim, self.dim()); in mean_dim()
42 uint32_t dim_size = output_size[dim]; in mean_dim()
[all …]
/external/pytorch/torch/_higher_order_ops/
Dassociative_scan.py41 def _interleave(a, b, dim): argument
43 if b_trunc := (a.shape[dim] == b.shape[dim] + 1):
45 [0] * ((b.ndim - dim - 1) * 2 + 1)
47 + [0] * (b.ndim * 2 - ((b.ndim - dim - 1) * 2 + 2))
51 stacked = torch.stack([a, b], dim=dim + 1)
52 interleaved = torch.flatten(stacked, start_dim=dim, end_dim=dim + 1)
54 # TODO: find torch alternative for slice_along dim for torch.jit.script to work
55 interleaved = aten.slice(interleaved, dim, 0, b.shape[dim] + a.shape[dim] - 1)
76 def __call__(self, combine_fn, input, dim): argument
77 return super().__call__(combine_fn, input, dim)
[all …]
/external/tensorflow/tensorflow/core/ir/importexport/tests/roundtrip/
Dshape-attrs.pbtxt90 dim {
93 dim {
98 dim {
101 dim {
104 dim {
137 dim {
140 dim {
145 dim {
148 dim {
151 dim {
[all …]
/external/pytorch/aten/src/ATen/native/cuda/
DSorting.cpp39 int64_t dim = maybe_wrap_dim(dim_, self.dim()); in kthvalue_out_impl_cuda() local
40 int64_t slicesize = self.dim() == 0 ? 1 : self.size(dim); in kthvalue_out_impl_cuda()
41 zero_numel_check_dims(self, dim, "kthvalue()"); in kthvalue_out_impl_cuda()
44 "kthvalue(): selected number k out of range for dimension ", dim); in kthvalue_out_impl_cuda()
49 values, indices, self, dim, keepdim); in kthvalue_out_impl_cuda()
50 if (self.dim() == 0 && self.numel() == 1) { in kthvalue_out_impl_cuda()
57 self.dim() <= MAX_TENSORINFO_DIMS, in kthvalue_out_impl_cuda()
65 launch_kthvalue_kernel(values, indices, self, dim, k); in kthvalue_out_impl_cuda()
69 values.squeeze_(dim); in kthvalue_out_impl_cuda()
70 indices.squeeze_(dim); in kthvalue_out_impl_cuda()
[all …]

12345678910>>...203