/external/tensorflow/tensorflow/core/kernels/ |
D | strided_slice_op_impl.h | 39 template <typename Device, typename T, int NDIM> 47 template <typename Device, typename T, int NDIM> 55 template <typename Device, typename T, int NDIM> 77 template <typename Device, typename T, int NDIM> 88 Eigen::DSizes<Eigen::DenseIndex, NDIM> begin_di; in HandleStridedSliceCase() 89 Eigen::DSizes<Eigen::DenseIndex, NDIM> sizes_di; in HandleStridedSliceCase() 90 for (int i = 0; i < NDIM; ++i) { in HandleStridedSliceCase() 94 functor::Slice<Device, Proxy, NDIM>()( in HandleStridedSliceCase() 96 result->bit_casted_shaped<Proxy, NDIM>(processing_dims), in HandleStridedSliceCase() 97 context->input(0).bit_casted_tensor<Proxy, NDIM>(), begin_di, sizes_di); in HandleStridedSliceCase() [all …]
|
D | betainc_op.cc | 91 #define CASE(NDIM) \ in Compute() argument 92 case NDIM: { \ in Compute() 93 functor::Betainc<Device, T, NDIM> functor; \ in Compute() 94 auto a_value = a.shaped<T, NDIM>(a_shaper.x_reshape()); \ in Compute() 95 auto b_value = b.shaped<T, NDIM>(b_shaper.x_reshape()); \ in Compute() 96 auto x_value = x.shaped<T, NDIM>(x_shaper.x_reshape()); \ in Compute() 98 BCast::ToIndexArray<NDIM>(a_shaper.x_bcast()), b_value, \ in Compute() 99 BCast::ToIndexArray<NDIM>(b_shaper.x_bcast()), x_value, \ in Compute() 100 BCast::ToIndexArray<NDIM>(x_shaper.x_bcast()), \ in Compute() 101 output->shaped<T, NDIM>(a_shaper.y_reshape())); \ in Compute() [all …]
|
D | tile_ops_gpu_impl.h | 24 // DEFINE_TILE_OPS(NDIM) 27 // where NDIM is an integer. 41 #define DEFINE_DIM(T, NDIM) \ argument 42 template struct TileGrad<Eigen::GpuDevice, T, NDIM>; \ 43 template struct ReduceAndReshape<Eigen::GpuDevice, T, NDIM, 1>; 45 #define DEFINE_TILE_OPS(NDIM) \ argument 48 DEFINE_DIM(int16, NDIM) \ 49 DEFINE_DIM(int32, NDIM) \ 50 DEFINE_DIM(int64, NDIM) \ 51 DEFINE_DIM(Eigen::half, NDIM) \ [all …]
|
D | tile_ops.cc | 53 template <typename Device, typename T, int NDIM> 55 void operator()(const Device& d, typename TTypes<T, NDIM>::Tensor out, 56 typename TTypes<T, NDIM>::ConstTensor in, 57 const Eigen::DSizes<Eigen::DenseIndex, NDIM>& indices, 58 const Eigen::DSizes<Eigen::DenseIndex, NDIM>& sizes, 70 template <typename Device, typename T, int NDIM, int REDUCEDNDIM> 73 const Device& d, typename TTypes<T, NDIM>::Tensor out, 74 typename TTypes<T, NDIM>::ConstTensor in, 76 const Eigen::DSizes<Eigen::DenseIndex, NDIM>& reshape_dim) const; 101 #define DECLARE_CUDA_DIM(T, NDIM) \ argument [all …]
|
D | betainc_op.h | 27 template <typename Device, typename T, int NDIM> 29 void operator()(const Device& d, typename TTypes<T, NDIM>::ConstTensor a, in operator() 30 typename TTypes<T, NDIM>::ConstTensor b, in operator() 31 typename TTypes<T, NDIM>::ConstTensor x, in operator() 32 typename TTypes<T, NDIM>::Tensor output) { in operator() 36 void BCast(const Device& d, typename TTypes<T, NDIM>::ConstTensor a, in BCast() 37 const typename Eigen::array<Eigen::DenseIndex, NDIM>& bcast_a, in BCast() 38 typename TTypes<T, NDIM>::ConstTensor b, in BCast() 39 const typename Eigen::array<Eigen::DenseIndex, NDIM>& bcast_b, in BCast() 40 typename TTypes<T, NDIM>::ConstTensor x, in BCast() [all …]
|
D | slice_op.cc | 195 #define HANDLE_DIM(NDIM) \ in Compute() argument 196 if (input_dims == NDIM) { \ in Compute() 197 HandleCase<NDIM>(context, begin, size, input, result); \ in Compute() 219 template <int NDIM> 223 Eigen::DSizes<Eigen::DenseIndex, NDIM> indices; in HandleCase() 224 Eigen::DSizes<Eigen::DenseIndex, NDIM> sizes; in HandleCase() 225 for (int i = 0; i < NDIM; ++i) { in HandleCase() 230 functor::Slice<Device, T, NDIM>()(context->eigen_device<Device>(), in HandleCase() 231 result->tensor<T, NDIM>(), in HandleCase() 232 input.tensor<T, NDIM>(), indices, sizes); in HandleCase() [all …]
|
D | where_op_gpu.cu.h | 39 template <int NDIM, typename TIndex> 41 const TIndex output_rows, const typename Eigen::array<TIndex, NDIM> strides, in PropagateWhereIndicesKernel() 47 TIndex index_value = ldg(output + NDIM * i); in PropagateWhereIndicesKernel() 49 for (int c = 0; c < NDIM; ++c) { in PropagateWhereIndicesKernel() 50 *(output + NDIM * i + c) = index_value / strides[c]; in PropagateWhereIndicesKernel() 200 template <int NDIM> 233 return *(ptr_ + (valid ? (NDIM * n) : 0)); 241 template <typename TIndex, typename T, int NDIM> 242 Eigen::array<TIndex, NDIM> CalculateStrides( 243 typename TTypes<T, NDIM>::ConstTensor input) { [all …]
|
D | tile_ops_impl.h | 27 template <typename Device, typename T, int NDIM> 29 void operator()(const Device& d, typename TTypes<T, NDIM>::Tensor out, in operator() 30 typename TTypes<T, NDIM>::ConstTensor in, in operator() 31 const Eigen::DSizes<Eigen::DenseIndex, NDIM>& indices, in operator() 32 const Eigen::DSizes<Eigen::DenseIndex, NDIM>& sizes, in operator() 57 template <typename Device, typename T, int NDIM, int REDUCEDNDIM> 60 const Device& d, typename TTypes<T, NDIM>::Tensor out, 61 typename TTypes<T, NDIM>::ConstTensor in, 63 const Eigen::DSizes<Eigen::DenseIndex, NDIM>& reshape_dim) const {
|
/external/pytorch/aten/src/ATen/native/cpu/ |
D | PaddingKernel.cpp | 16 int ndim; member 30 ndim = padding.size() / 2; in PaddingParams() 32 bool is_batch = input.dim() == ndim + 2; in PaddingParams() 40 for (const auto d : c10::irange(ndim)) { in PaddingParams() 51 if (ndim == 1) { in PaddingParams() 53 } else if (ndim == 2) { in PaddingParams() 61 for (const auto d : c10::irange(ndim)) { in PaddingParams() 145 int ndim = p.ndim; in cpu_padding() local 146 int64_t input_depth = ndim == 3 ? p.ishape[ndim - 3] : 1; in cpu_padding() 147 int64_t input_height = ndim >=2 ? p.ishape[ndim - 2] : 1; in cpu_padding() [all …]
|
D | UpSampleMoreKernel.cpp | 106 auto ndim = input_sizes.size(); in cpu_upsample_nearest_backward() local 110 int64_t input_depth = (ndim == 5) ? input_sizes[2] : 1; in cpu_upsample_nearest_backward() 111 int64_t output_depth = (ndim == 5) ? output_sizes[2] : 1; in cpu_upsample_nearest_backward() 112 int64_t input_height = (ndim >= 4) ? input_sizes[ndim - 2] : 1; in cpu_upsample_nearest_backward() 113 int64_t output_height = (ndim >= 4) ? output_sizes[ndim - 2] : 1; in cpu_upsample_nearest_backward() 114 int64_t input_width = input_sizes[ndim - 1]; in cpu_upsample_nearest_backward() 115 int64_t output_width = output_sizes[ndim - 1]; in cpu_upsample_nearest_backward() 207 if (ndim == 3) { in cpu_upsample_nearest_backward() 210 } else if (ndim == 4) { in cpu_upsample_nearest_backward() 215 TORCH_INTERNAL_ASSERT(ndim == 5); in cpu_upsample_nearest_backward() [all …]
|
/external/pytorch/aten/src/ATen/native/ |
D | Pool.h | 128 const int64_t ndim = input.ndimension(); in pool2d_shape_check() local 146 TORCH_CHECK((ndim == 4 && valid_dims && input.size(3) != 0), in pool2d_shape_check() 150 TORCH_CHECK((ndim == 3 && input.size(0) != 0 && valid_dims) || in pool2d_shape_check() 151 (ndim == 4 && valid_dims && input.size(3) != 0), in pool2d_shape_check() 184 const int64_t ndim = input.ndimension(); in max_pool2d_backward_shape_check() local 187 check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane); in max_pool2d_backward_shape_check() 188 check_dim_size(gradOutput, ndim, ndim-2, outputHeight); in max_pool2d_backward_shape_check() 189 check_dim_size(gradOutput, ndim, ndim-1, outputWidth); in max_pool2d_backward_shape_check() 191 check_dim_size(indices, ndim, ndim-3, nOutputPlane); in max_pool2d_backward_shape_check() 192 check_dim_size(indices, ndim, ndim-2, outputHeight); in max_pool2d_backward_shape_check() [all …]
|
/external/tensorflow/tensorflow/python/keras/engine/ |
D | input_spec.py | 46 ndim: Integer, expected rank of the input. 74 ndim=None, argument 87 self.ndim = len(shape) 90 self.ndim = ndim 102 if self.axes and (self.ndim is not None or self.max_ndim is not None): 103 max_dim = (self.ndim if self.ndim else self.max_ndim) - 1 112 ('ndim=' + str(self.ndim)) if self.ndim else '', 122 'ndim': self.ndim, 135 If the InputSpec's shape or ndim is defined, this method will return a fully 144 if spec.ndim is None and spec.shape is None: [all …]
|
/external/pytorch/torch/distributed/tensor/_ops/ |
D | _view_ops.py | 171 def dim_pad_left(ndim: int, min_dims: int) -> DimMap: 172 return (Singleton(),) * max(0, min_dims - ndim) + tuple( 173 InputDim(i) for i in range(ndim) 177 def dim_atleast_3d(ndim: int) -> DimMap: 178 if ndim == 0: 180 elif ndim == 1: 182 elif ndim == 2: 185 return tuple(InputDim(i) for i in range(ndim)) 221 def dim_flatten(ndim: int, start_dim=0, end_dim=-1) -> DimMap: 222 if ndim == 0: [all …]
|
/external/executorch/exir/ |
D | dim_order_utils.py | 16 def _get_contiguous_dim_order(ndim: int) -> List[int]: 17 if ndim < 0: 19 …pported rank for contiguous dim order. Only supports ndim greater than or equal to 0, but got {ndi… 22 return list(range(ndim)) 25 def _get_channels_last_dim_order(ndim: int) -> List[int]: 26 if ndim == 4: 30 … f"Unsupported rank for channels last dim order. Only support ndim equal to 4, but got {ndim}" 53 memory_format: Optional[torch.memory_format], ndim: int 61 return _get_contiguous_dim_order(ndim) 63 return _get_channels_last_dim_order(ndim)
|
/external/pytorch/aten/src/ATen/native/cuda/ |
D | SortImpl.cu | 9 int64_t ndim = self.dim(); in infer_dense_strides_dim_last() local 14 std::vector<int64_t> original_dim(ndim); in infer_dense_strides_dim_last() 15 for (int64_t i = 0; i < ndim; i++) { in infer_dense_strides_dim_last() 19 thrust::host, strides.data(), strides.data() + ndim, original_dim.data(), in infer_dense_strides_dim_last() 23 std::vector<int64_t> new_strides(ndim); in infer_dense_strides_dim_last() 24 std::vector<int64_t> new_strides_unsort(ndim); in infer_dense_strides_dim_last() 26 for (int64_t i = 0; i < ndim; i++) { in infer_dense_strides_dim_last() 27 new_strides[ndim - 1 - i] = cumprod; in infer_dense_strides_dim_last() 28 cumprod *= self.sizes()[original_dim[ndim - 1 - i]]; in infer_dense_strides_dim_last() 31 for (int64_t i = 0; i < ndim; i++) { in infer_dense_strides_dim_last()
|
/external/tensorflow/tensorflow/python/framework/ |
D | fast_tensor_util.pyx | 10 tensor_proto, np.ndarray[np.uint16_t, ndim=1] nparray): argument 22 tensor_proto, np.ndarray[np.uint16_t, ndim=1] nparray): argument 30 tensor_proto, np.ndarray[np.float32_t, ndim=1] nparray): argument 38 tensor_proto, np.ndarray[np.float64_t, ndim=1] nparray): argument 46 tensor_proto, np.ndarray[np.int32_t, ndim=1] nparray): argument 53 tensor_proto, np.ndarray[np.uint32_t, ndim=1] nparray): argument 60 tensor_proto, np.ndarray[np.int64_t, ndim=1] nparray): argument 67 tensor_proto, np.ndarray[np.uint64_t, ndim=1] nparray): argument 74 tensor_proto, np.ndarray[np.uint8_t, ndim=1] nparray): argument 82 tensor_proto, np.ndarray[np.uint16_t, ndim=1] nparray): argument [all …]
|
/external/executorch/kernels/portable/cpu/ |
D | op_tril.cpp | 66 int64_t ndim = self.dim(); in tril_kernel() local 70 ndim < kTensorDimensionLimit, in tril_kernel() 73 "ndim %" PRId64 " >= %zu", in tril_kernel() 74 ndim, in tril_kernel() 80 for (size_t i = 0; i < ndim; ++i) { in tril_kernel() 85 IntArrayRef sizes_ref(sizes, ndim); in tril_kernel() 86 IntArrayRef strides_ref(strides, ndim); in tril_kernel() 88 int64_t num_rows = sizes_ref[ndim - 2]; in tril_kernel() 89 int64_t num_cols = sizes_ref[ndim - 1]; in tril_kernel() 95 int64_t batch_size = getLeadingDims(self, ndim - 2); in tril_kernel() [all …]
|
D | op_constant_pad_nd.cpp | 32 size_t ndim, in apply_padding_to_dim() argument 43 if (dim >= ndim) { in apply_padding_to_dim() 47 size_t pad_i = ndim - 1 - dim; in apply_padding_to_dim() 80 ndim, in apply_padding_to_dim() 112 size_t ndim = self.dim(); in constant_pad_nd_out_impl() local 114 if (ndim == 0) { in constant_pad_nd_out_impl() 127 for (size_t i = 0; i < ndim; ++i) { in constant_pad_nd_out_impl() 133 size_t pad_i = ndim - 1 - i; in constant_pad_nd_out_impl() 141 IntArrayRef self_sizes_ref(self_sizes, ndim); in constant_pad_nd_out_impl() 142 IntArrayRef self_strides_ref(self_strides, ndim); in constant_pad_nd_out_impl() [all …]
|
/external/pytorch/aten/src/ATen/ |
D | ExpandUtils.cpp | 21 auto ndim = dimsA > dimsB ? dimsA : dimsB; in infer_size_impl() local 22 Container expandedSizes(ndim); in infer_size_impl() 24 for (ptrdiff_t i = ndim - 1; i >= 0; --i) { in infer_size_impl() 25 ptrdiff_t offset = ndim - 1 - i; in infer_size_impl() 66 int64_t ndim = static_cast<int64_t>(sizes.size()); in inferExpandGeometryImpl() local 70 return InferExpandGeometryResult<Container>(sizes, ndim); in inferExpandGeometryImpl() 73 InferExpandGeometryResult<Container> result(ndim); in inferExpandGeometryImpl() 78 for (int64_t i = ndim - 1; i >= 0; --i) { in inferExpandGeometryImpl() 79 int64_t offset = ndim - 1 - i; in inferExpandGeometryImpl() 153 size_t ndim = tensor_sizes.size(); in infer_dense_strides() local [all …]
|
/external/tensorflow/tensorflow/core/util/sparse/ |
D | sparse_tensor_test.cc | 36 GetSimpleIndexTensor(int N, const int NDIM) { in GetSimpleIndexTensor() argument 37 Eigen::Tensor<int64_t, 2, Eigen::RowMajor, Eigen::DenseIndex> ix(N, NDIM); in GetSimpleIndexTensor() 62 const int NDIM = 3; in TEST() local 63 auto ix = GetSimpleIndexTensor(N, NDIM); in TEST() 64 TTypes<int64_t>::Matrix map(ix.data(), N, NDIM); in TEST() 95 const int NDIM = 3; in TEST() local 96 Tensor ix(DT_INT32, TensorShape({N, NDIM})); in TEST() 108 const int NDIM = 3; in TEST() local 109 Tensor ix(DT_INT64, TensorShape({N, NDIM, 1})); in TEST() 121 const int NDIM = 3; in TEST() local [all …]
|
/external/pytorch/torch/_numpy/ |
D | _util.py | 47 def normalize_axis_index(ax, ndim, argname=None): argument 48 if not (-ndim <= ax < ndim): 49 raise AxisError(f"axis {ax} is out of bounds for array of dimension {ndim}") 51 ax += ndim 56 def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): argument 71 ndim : int 83 The normalized axis index, such that `0 <= normalized_axis < ndim` 92 axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis]) 120 def apply_keepdims(tensor, axis, ndim): argument 123 shape = (1,) * ndim [all …]
|
/external/pytorch/aten/src/ATen/native/mkldnn/xpu/detail/ |
D | Conv.cpp | 19 int64_t ndim, in conv_dst_size() argument 27 dnnl::memory::dims dst_size(ndim); in conv_dst_size() 30 for (int d = 2; d < ndim; ++d) { in conv_dst_size() 52 const int64_t ndim, in conv_src_fmt() argument 55 return (ndim == 3) in conv_src_fmt() 57 : ((ndim == 4) ? dnnl::memory::format_tag::nchw in conv_src_fmt() 58 : ((ndim == 5) ? dnnl::memory::format_tag::ncdhw in conv_src_fmt() 61 return (ndim == 3) in conv_src_fmt() 63 : ((ndim == 4) ? dnnl::memory::format_tag::nhwc in conv_src_fmt() 64 : ((ndim == 5) ? dnnl::memory::format_tag::ndhwc in conv_src_fmt() [all …]
|
/external/python/cpython3/Modules/ |
D | _testbuffer.c | 52 #define ND_SCALAR 0x008 /* scalar: ndim = 0 */ 151 base->ndim = 1; in ndbuf_new() 264 if (ndbuf->base.ndim == 0) in init_flags() 470 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize, in copy_rec() argument 477 assert(ndim >= 1); in copy_rec() 479 if (ndim == 1) { in copy_rec() 503 copy_rec(shape+1, ndim-1, itemsize, in copy_rec() 517 dest->ndim != src->ndim) in cmp_structure() 520 for (i = 0; i < dest->ndim; i++) { in cmp_structure() 531 ndim and shape. Copying is atomic, the function never fails with [all …]
|
/external/pytorch/aten/src/ATen/cuda/ |
D | CUDASparseDescriptors.cpp | 63 auto ndim = input.dim(); in createRawDnMatDescriptor() local 64 TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2); in createRawDnMatDescriptor() 65 auto rows = input_sizes[ndim - 2]; in createRawDnMatDescriptor() 66 auto cols = input_sizes[ndim - 1]; in createRawDnMatDescriptor() 76 is_row_major ? input_strides[ndim - 2] : input_strides[ndim - 1]; in createRawDnMatDescriptor() 85 auto batch_stride = ndim > 2 && batch_offset >= 0 ? input_strides[ndim - 3] : 0; in createRawDnMatDescriptor() 106 if (ndim >= 3 && batch_offset == -1) { in createRawDnMatDescriptor() 110 raw_descriptor, batch_count, input_strides[ndim - 3])); in createRawDnMatDescriptor() 146 auto ndim = input.dim(); in CuSparseSpMatCsrDescriptor() local 147 auto rows = input_sizes[ndim - 2]; in CuSparseSpMatCsrDescriptor() [all …]
|
/external/python/cpython3/Objects/ |
D | memoryobject.c | 239 /* Fast contiguity test. Caller must ensure suboffsets==NULL and ndim==1. */ 259 with the same logical structure: format, itemsize, ndim and shape 260 are identical, with ndim > 0. 266 /* Assumptions: ndim >= 1. The macro tests for a corner case that should 269 (view->suboffsets && view->suboffsets[view->ndim-1] >= 0) 274 assert(dest->ndim > 0 && src->ndim > 0); in last_dim_is_contiguous() 277 dest->strides[dest->ndim-1] == dest->itemsize && in last_dim_is_contiguous() 278 src->strides[src->ndim-1] == src->itemsize); in last_dim_is_contiguous() 312 if (dest->ndim != src->ndim) in equiv_shape() 315 for (i = 0; i < dest->ndim; i++) { in equiv_shape() [all …]
|